content
stringlengths 255
17.2k
|
---|
return self.n_splits
class RepeatedKFold:
"""
Repeated :class:`KFold` cross validator.
Repeats :class:`KFold` n times with different randomization in each
repetition.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
n_repeats(int): The number of repetitions.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Default
is ``True``.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
self.n_repeats = n_repeats
self.random_state = random_state
self.n_splits = n_splits
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
rng = get_rng(self.random_state)
for _ in range(self.n_repeats):
cv = KFold(n_splits=self.n_splits, random_state=rng, shuffle=True)
yield from cv.split(data)
def get_n_folds(self):
return self.n_repeats * self.n_splits
class ShuffleSplit:
"""A basic cross-validation iterator with random trainsets and testsets.
Contrary to other cross-validation strategies, random splits do not
guarantee that all folds will be different, although this is still very
likely for sizeable datasets.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
test_size(float or int ``None``): If float, it represents the
proportion of ratings to include in the testset. If int,
represents the absolute number of ratings in the testset. If
``None``, the value is set to the complement of the trainset size.
Default is ``.2``.
train_size(float or int or ``None``): If float, it represents the
proportion of ratings to include in the trainset. If int,
represents the absolute number of ratings in the trainset. If
``None``, the value is set to the complement of the testset size.
Default is ``None``.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data`` parameter
of the ``split()`` method. Shuffling is not done in-place. Setting
this to `False` defeats the purpose of this iterator, but it's
useful for the implementation of :func:`train_test_split`. Default
is ``True``.
"""
def __init__(
self,
n_splits=5,
test_size=0.2,
train_size=None,
random_state=None,
shuffle=True,
):
if n_splits <= 0:
raise ValueError(
"n_splits = {} should be strictly greater than " "0.".format(n_splits)
)
if test_size is not None and test_size <= 0:
raise ValueError(
"test_size={} should be strictly greater than " "0".format(test_size)
)
if train_size is not None and train_size <= 0:
raise ValueError(
"train_size={} should be strictly greater than " "0".format(train_size)
)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.shuffle = shuffle
def validate_train_test_sizes(self, test_size, train_size, n_ratings):
if test_size is not None and test_size >= n_ratings:
raise ValueError(
"test_size={} should be less than the number of "
"ratings {}".format(test_size, n_ratings)
)
if train_size is not None and train_size >= n_ratings:
raise ValueError(
"train_size={} should be less than the number of"
" ratings {}".format(train_size, n_ratings)
)
if np.asarray(test_size).dtype.kind == "f":
test_size = ceil(test_size * n_ratings)
if train_size is None:
train_size = n_ratings - test_size
elif np.asarray(train_size).dtype.kind == "f":
train_size = floor(train_size * n_ratings)
if test_size is None:
test_size = n_ratings - train_size
if train_size + test_size > n_ratings:
raise ValueError(
"The sum of train_size and test_size ({}) "
"should be smaller than the number of "
"ratings {}.".format(train_size + test_size, n_ratings)
)
return int(train_size), int(test_size)
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
test_size, train_size = self.validate_train_test_sizes(
self.test_size, self.train_size, len(data.raw_ratings)
)
rng = get_rng(self.random_state)
for _ in range(self.n_splits):
if self.shuffle:
permutation = rng.permutation(len(data.raw_ratings))
else:
permutation = np.arange(len(data.raw_ratings))
raw_trainset = [data.raw_ratings[i] for i in permutation[:test_size]]
raw_testset = [
data.raw_ratings[i]
for i in permutation[test_size : (test_size + train_size)]
]
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
def train_test_split(
data, test_size=0.2, train_size=None, random_state=None, shuffle=True
):
"""Split a dataset into trainset and testset.
See an example in the :ref:`User Guide <train_test_split_example>`.
Note: this function cannot be used as a cross-validation iterator.
Args:
data(:obj:`Dataset <surprise.dataset.Dataset>`): The dataset to split
into trainset and testset.
test_size(float or int ``None``): If float, it represents the
proportion of ratings to include in the testset. If int,
represents the absolute number of ratings in the testset. If
``None``, the value is set to the complement of the trainset size.
Default is ``.2``.
train_size(float or int or ``None``): If float, it represents the
proportion of ratings to include in the trainset. If int,
represents the absolute number of ratings in the trainset. If
``None``, the value is set to the complement of the testset size.
Default is ``None``.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
shuffle(bool): Whether to shuffle the ratings in the ``data``
parameter. Shuffling is not done in-place. Default is ``True``.
"""
ss = ShuffleSplit(
n_splits=1,
test_size=test_size,
train_size=train_size,
random_state=random_state,
shuffle=shuffle,
)
return next(ss.split(data))
class LeaveOneOut:
"""Cross-validation iterator where each user has exactly one rating in the
testset.
Contrary to other cross-validation strategies, ``LeaveOneOut`` does not
guarantee that all folds will be different, although this is still very
likely for sizeable datasets.
See an example in the :ref:`User Guide <use_cross_validation_iterators>`.
Args:
n_splits(int): The number of folds.
random_state(int, RandomState instance from numpy, or ``None``):
Determines the RNG that will be used for determining the folds. If
int, ``random_state`` will be used as a seed for a new RNG. This is
useful to get the same splits over multiple calls to ``split()``.
If RandomState instance, this same instance is used as RNG. If
``None``, the current RNG from numpy is used. ``random_state`` is
only used if ``shuffle`` is ``True``. Default is ``None``.
min_n_ratings(int): Minimum number of ratings for each user in the
trainset. E.g. if ``min_n_ratings`` is ``2``, we are sure each user
has at least ``2`` ratings in the trainset (and ``1`` in the
testset). Other users are discarded. Default is ``0``, so some
users (having only one rating) may be in the testset and not in the
trainset.
"""
def __init__(self, n_splits=5, random_state=None, min_n_ratings=0):
self.n_splits = n_splits
self.random_state = random_state
self.min_n_ratings = min_n_ratings
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
# map ratings to the users ids
user_ratings = defaultdict(list)
for uid, iid, r_ui, _ in data.raw_ratings:
user_ratings[uid].append((uid, iid, r_ui, None))
rng = get_rng(self.random_state)
for _ in range(self.n_splits):
# for each user, randomly choose a rating and put it in the
# testset.
raw_trainset, raw_testset = [], []
for uid, ratings in user_ratings.items():
if len(ratings) > self.min_n_ratings:
i = rng.randint(0, len(ratings))
raw_testset.append(ratings[i])
raw_trainset += [
rating for (j, rating) in enumerate(ratings) if j != i
]
if not raw_trainset:
raise ValueError(
"Could not build any trainset. Maybe " "min_n_ratings is too high?"
)
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
class PredefinedKFold:
"""A cross-validation iterator to when a dataset has been loaded with the
:meth:`load_from_folds <surprise.dataset.Dataset.load_from_folds>`
method.
See an example in the :ref:`User Guide <load_from_folds_example>`.
"""
def split(self, data):
"""Generator function to iterate over trainsets and testsets.
Args:
data(:obj:`Dataset<surprise.dataset.Dataset>`): The data containing
ratings that will be divided into trainsets and testsets.
Yields:
tuple of (trainset, testset)
"""
self.n_splits = len(data.folds_files)
for train_file, test_file in data.folds_files:
raw_trainset = data.read_ratings(train_file)
raw_testset = data.read_ratings(test_file)
trainset = data.construct_trainset(raw_trainset)
testset = data.construct_testset(raw_testset)
yield trainset, testset
def get_n_folds(self):
return self.n_splits
<s> """
The validation module contains the cross_validate function, inspired from
the mighty scikit learn.
"""
import time
import numpy as np
from joblib import delayed, Parallel
from .. import accuracy
from .split import get_cv
def cross_validate(
algo,
data,
measures=["rmse", "mae"],
cv=None,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
verbose=False,
):
"""
Run a cross validation procedure for a given algorithm, reporting accuracy
measures and computation times.
See an example in the :ref:`User Guide <cross_validate_example>`.
Args:
algo(:obj:`AlgoBase \\
<surprise.prediction_algorithms.algo_base.AlgoBase>`):
The algorithm to evaluate.
data(:obj:`Dataset <surprise.dataset.Dataset>`): The dataset on which
to evaluate the algorithm.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module. Default is ``['rmse', 'mae']``.
cv(cross-validation iterator, int or ``None``): Det |
ermines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surprise.model_selection.split.KFold>` is used with
``n_splits=5``.
return_train_measures(bool): Whether to compute performance measures on
the trainsets. Default is ``False``.
n_jobs(int): The maximum number of folds evaluated in parallel.
- If ``-1``, all CPUs are used.
- If ``1`` is given, no parallel computing code is used at all,\\
which is useful for debugging.
- For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\\
used. For example, with ``n_jobs = -2`` all CPUs but one are\\
used.
Default is ``1``.
pre_dispatch(int or string): Controls the number of jobs that get
dispatched during parallel execution. Reducing this number can be
useful to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created\\
and spawned. Use this for lightweight and fast-running\\
jobs, to avoid delays due to on-demand spawning of the\\
jobs.
- An int, giving the exact number of total jobs that are\\
spawned.
- A string, giving an expression as a function of ``n_jobs``,\\
as in ``'2*n_jobs'``.
Default is ``'2*n_jobs'``.
verbose(int): If ``True`` accuracy measures for each split are printed,
as well as train and test times. Averages and standard deviations
over all splits are also reported. Default is ``False``: nothing is
printed.
Returns:
dict: A dict with the following keys:
- ``'test_*'`` where ``*`` corresponds to a lower-case accuracy
measure, e.g. ``'test_rmse'``: numpy array with accuracy values
for each testset.
- ``'train_*'`` where ``*`` corresponds to a lower-case accuracy
measure, e.g. ``'train_rmse'``: numpy array with accuracy values
for each trainset. Only available if ``return_train_measures`` is
``True``.
- ``'fit_time'``: numpy array with the training time in seconds for
each split.
- ``'test_time'``: numpy array with the testing time in seconds for
each split.
"""
measures = [m.lower() for m in measures]
cv = get_cv(cv)
delayed_list = (
delayed(fit_and_score)(algo, trainset, testset, measures, return_train_measures)
for (trainset, testset) in cv.split(data)
)
out = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch)(delayed_list)
(test_measures_dicts, train_measures_dicts, fit_times, test_times) = zip(*out)
test_measures = dict()
train_measures = dict()
ret = dict()
for m in measures:
# transform list of dicts into dict of lists
# Same as in GridSearchCV.fit()
test_measures[m] = np.asarray([d[m] for d in test_measures_dicts])
ret["test_" + m] = test_measures[m]
if return_train_measures:
train_measures[m] = np.asarray([d[m] for d in train_measures_dicts])
ret["train_" + m] = train_measures[m]
ret["fit_time"] = fit_times
ret["test_time"] = test_times
if verbose:
print_summary(
algo,
measures,
test_measures,
train_measures,
fit_times,
test_times,
cv.n_splits,
)
return ret
def fit_and_score(algo, trainset, testset, measures, return_train_measures=False):
"""Helper method that trains an algorithm and compute accuracy measures on
a testset. Also report train and test times.
Args:
algo(:obj:`AlgoBase \\
<surprise.prediction_algorithms.algo_base.AlgoBase>`):
The algorithm to use.
trainset(:obj:`Trainset <surprise.trainset.Trainset>`): The trainset.
testset(:obj:`testset`): The testset.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module.
return_train_measures(bool): Whether to compute performance measures on
the trainset. Default is ``False``.
Returns:
tuple: A tuple containing:
- A dictionary mapping each accuracy metric to its value on the
testset (keys are lower case).
- A dictionary mapping each accuracy metric to its value on the
trainset (keys are lower case). This dict is empty if
return_train_measures is False.
- The fit time in seconds.
- The testing time in seconds.
"""
start_fit = time.time()
algo.fit(trainset)
fit_time = time.time() - start_fit
start_test = time.time()
predictions = algo.test(testset)
test_time = time.time() - start_test
if return_train_measures:
train_predictions = algo.test(trainset.build_testset())
test_measures = dict()
train_measures = dict()
for m in measures:
f = getattr(accuracy, m.lower())
test_measures[m] = f(predictions, verbose=0)
if return_train_measures:
train_measures[m] = f(train_predictions, verbose=0)
return test_measures, train_measures, fit_time, test_time
def print_summary(
algo, measures, test_measures, train_measures, fit_times, test_times, n_splits
):
"""Helper for printing the result of cross_validate."""
print(
"Evaluating {} of algorithm {} on {} split(s).".format(
", ".join(m.upper() for m in measures), algo.__class__.__name__, n_splits
)
)
print()
row_format = "{:<18}" + "{:<8}" * (n_splits + 2)
s = row_format.format(
"", *[f"Fold {i + 1}" for i in range(n_splits)] + ["Mean"] + ["Std"]
)
s += "\\n"
s += "\\n".join(
row_format.format(
key.upper() + " (testset)",
*[f"{v:1.4f}" for v in vals]
+ [f"{np.mean(vals):1.4f}"]
+ [f"{np.std(vals):1.4f}"],
)
for (key, vals) in test_measures.items()
)
if train_measures:
s += "\\n"
s += "\\n".join(
row_format.format(
key.upper() + " (trainset)",
*[f"{v:1.4f}" for v in vals]
+ [f"{np.mean(vals):1.4f}"]
+ [f"{np.std(vals):1.4f}"],
)
for (key, vals) in train_measures.items()
)
s += "\\n"
s += row_format.format(
"Fit time",
*[f"{t:.2f}" for t in fit_times]
+ [f"{np.mean(fit_times):.2f}"]
+ [f"{np.std(fit_times):.2f}"],
)
s += "\\n"
s += row_format.format(
"Test time",
*[f"{t:.2f}" for t in test_times]
+ [f"{np.mean(test_times):.2f}"]
+ [f"{np.std(test_times):.2f}"],
)
print(s)
<s> from abc import ABC, abstractmethod
from itertools import product
import numpy as np
from joblib import delayed, Parallel
from ..dataset import DatasetUserFolds
from ..utils import get_rng
from .split import get_cv
from .validation import fit_and_score
class BaseSearchCV(ABC):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(
self,
algo_class,
measures=["rmse", "mae"],
cv=None,
refit=False,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
joblib_verbose=0,
):
self.algo_class = algo_class
self.measures = [measure.lower() for measure in measures]
self.cv = cv
if isinstance(refit, str):
if refit.lower() not in self.measures:
raise ValueError(
"It looks like the measure you want to use "
"with refit ({}) is not in the measures "
"parameter"
)
self.refit = refit.lower()
elif refit is True:
self.refit = self.measures[0]
else:
self.refit = False
self.return_train_measures = return_train_measures
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self.joblib_verbose = joblib_verbose
def _parse_options(self, params):
# As sim_options and bsl_options are dictionaries, they require a
# special treatment.
if "sim_options" in params:
sim_options = params["sim_options"]
sim_options_list = [
dict(zip(sim_options, v)) for v in product(*sim_options.values())
]
params["sim_options"] = sim_options_list
if "bsl_options" in params:
bsl_options = params["bsl_options"]
bsl_options_list = [
dict(zip(bsl_options, v)) for v in product(*bsl_options.values())
]
params["bsl_options"] = bsl_options_list
return params
def fit(self, data):
"""Runs the ``fit()`` method of the algorithm for all parameter
combinations, over different splits given by the ``cv`` parameter.
Args:
data (:obj:`Dataset <surprise.dataset.Dataset>`): The dataset on
which to evaluate the algorithm, in parallel.
"""
if self.refit and isinstance(data, DatasetUserFolds):
raise ValueError(
"refit cannot be used when data has been "
"loaded with load_from_folds()."
)
cv = get_cv(self.cv)
delayed_list = (
delayed(fit_and_score)(
self.algo_class(**params),
trainset,
testset,
self.measures,
self.return_train_measures,
)
for params, (trainset, testset) in product(
self.param_combinations, cv.split(data)
)
)
out = Parallel(
n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch,
verbose=self.joblib_verbose,
)(delayed_list)
(test_measures_dicts, train_measures_dicts, fit_times, test_times) = zip(*out)
# test_measures_dicts is a list of dict like this:
# [{'mae': 1, 'rmse': 2}, {'mae': 2, 'rmse': 3} ...]
# E.g. for 5 splits, the first 5 dicts are for the first param
# combination, the next 5 dicts are for the second param combination,
# etc...
# We convert it into a dict of list:
# {'mae': [1, 2, ...], 'rmse': [2, 3, ...]}
# Each list is still of size n_parameters_combinations * n_splits.
# Then, reshape each list to have 2-D arrays of shape
# (n_parameters_combinations, n_splits). This way we can easily compute
# the mean and std dev over all splits or over all param comb.
test_measures = dict()
train_measures = dict()
new_shape = (len(self.param_combinations), cv.get_n_folds())
for m in self.measures:
test_measures[m] = np.asarray([d[m] for d in test_measures_dicts])
test_measures[m] = test_measures[m].reshape(new_shape)
if self.return_train_measures:
train_measures[m] = np.asarray([d[m] for d in train_measures_dicts])
train_measures[m] = train_measures[m].reshape(new_shape)
cv_results = dict()
best_index = dict()
best_params = dict()
best_score = dict()
best_estimator = dict()
for m in self.measures:
# cv_results: set measures for each split and each param comb
for split in range(cv.get_n_folds()):
cv_results[f"split{split}_test_{m}"] = test_measures[m][:, split]
if self.return_train_measures:
cv_results[f"split{split}_train_{m}"] = train_measures[m][:, split]
# cv_results: set mean and std over all splits (testset and
# trainset) for each param comb
mean_test_measures = test_measures[m].mean(axis=1)
cv_results[f"mean_test_{m}"] = mean_test_measures
cv_results[f"std_test_{m}"] = test_measures[m].std(axis=1)
if self.return_train_measures:
mean_train_measures = train_measures[m].mean(axis=1)
cv_results[f"mean_train_{m}"] = mean_train_measures
cv_results[f"std_train_{m}"] = train_measures[m].std(axis=1)
# cv_results: set rank of each param comb
# also set best_index, and best_xxxx attributes
indices = cv_results[f"mean_test_{m}"].argsort()
cv_results[f"rank_test_{m}"] = np.empty_like(indices)
if m in ("mae", "rmse", "mse"):
cv_results[f"rank_test_{m}"][indices] = (
np.arange(len(indices)) + 1
) # sklearn starts at 1 as well
best_index[m] = mean_test_measures.argmin()
elif m in ("fcp",):
cv |
_results[f"rank_test_{m}"][indices] = np.arange(len(indices), 0, -1)
best_index[m] = mean_test_measures.argmax()
best_params[m] = self.param_combinations[best_index[m]]
best_score[m] = mean_test_measures[best_index[m]]
best_estimator[m] = self.algo_class(**best_params[m])
# Cv results: set fit and train times (mean, std)
fit_times = np.array(fit_times).reshape(new_shape)
test_times = np.array(test_times).reshape(new_shape)
for s, times in zip(("fit", "test"), (fit_times, test_times)):
cv_results[f"mean_{s}_time"] = times.mean(axis=1)
cv_results[f"std_{s}_time"] = times.std(axis=1)
# cv_results: set params key and each param_* values
cv_results["params"] = self.param_combinations
for param in self.param_combinations[0]:
cv_results["param_" + param] = [
comb[param] for comb in self.param_combinations
]
if self.refit:
best_estimator[self.refit].fit(data.build_full_trainset())
self.best_index = best_index
self.best_params = best_params
self.best_score = best_score
self.best_estimator = best_estimator
self.cv_results = cv_results
def test(self, testset, verbose=False):
"""Call ``test()`` on the estimator with the best found parameters
(according the the ``refit`` parameter). See :meth:`AlgoBase.test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>`.
Only available if ``refit`` is not ``False``.
"""
if not self.refit:
raise ValueError("refit is False, cannot use test()")
return self.best_estimator[self.refit].test(testset, verbose)
def predict(self, *args):
"""Call ``predict()`` on the estimator with the best found parameters
(according the the ``refit`` parameter). See :meth:`AlgoBase.predict()
<surprise.prediction_algorithms.algo_base.AlgoBase.predict>`.
Only available if ``refit`` is not ``False``.
"""
if not self.refit:
raise ValueError("refit is False, cannot use predict()")
return self.best_estimator[self.refit].predict(*args)
class GridSearchCV(BaseSearchCV):
"""The :class:`GridSearchCV` class computes accuracy metrics for an
algorithm on various combinations of parameters, over a cross-validation
procedure. This is useful for finding the best set of parameters for a
prediction algorithm. It is analogous to `GridSearchCV
<https://scikit-learn.org/stable/modules/generated/sklearn.
model_selection.GridSearchCV.html>`_ from scikit-learn.
See an example in the :ref:`User Guide <tuning_algorithm_parameters>`.
Args:
algo_class(:obj:`AlgoBase \\
<surprise.prediction_algorithms.algo_base.AlgoBase>`): The class
of the algorithm to evaluate.
param_grid(dict): Dictionary with algorithm parameters as keys and
list of values as keys. All combinations will be evaluated with
desired algorithm. Dict parameters such as ``sim_options`` require
special treatment, see :ref:`this note<grid_search_note>`.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module. Default is ``['rmse', 'mae']``.
cv(cross-validation iterator, int or ``None``): Determines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surprise.model_selection.split.KFold>` is used with
``n_splits=5``.
refit(bool or str): If ``True``, refit the algorithm on the whole
dataset using the set of parameters that gave the best average
performance for the first measure of ``measures``. Other measures
can be used by passing a string (corresponding to the measure
name). Then, you can use the ``test()`` and ``predict()`` methods.
``refit`` can only be used if the ``data`` parameter given to
``fit()`` hasn't been loaded with :meth:`load_from_folds()
<surprise.dataset.Dataset.load_from_folds>`. Default is ``False``.
return_train_measures(bool): Whether to compute performance measures on
the trainsets. If ``True``, the ``cv_results`` attribute will
also contain measures for trainsets. Default is ``False``.
n_jobs(int): The maximum number of parallel training procedures.
- If ``-1``, all CPUs are used.
- If ``1`` is given, no parallel computing code is used at all,\\
which is useful for debugging.
- For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\\
used. For example, with ``n_jobs = -2`` all CPUs but one are\\
used.
Default is ``1``.
pre_dispatch(int or string): Controls the number of jobs that get
dispatched during parallel execution. Reducing this number can be
useful to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created\\
and spawned. Use this for lightweight and fast-running\\
jobs, to avoid delays due to on-demand spawning of the\\
jobs.
- An int, giving the exact number of total jobs that are\\
spawned.
- A string, giving an expression as a function of ``n_jobs``,\\
as in ``'2*n_jobs'``.
Default is ``'2*n_jobs'``.
joblib_verbose(int): Controls the verbosity of joblib: the higher, the
more messages.
Attributes:
best_estimator (dict of AlgoBase):
Using an accuracy measure as key, get the algorithm that gave the
best accuracy results for the chosen measure, averaged over all
splits.
best_score (dict of floats):
Using an accuracy measure as key, get the best average score
achieved for that measure.
best_params (dict of dicts):
Using an accuracy measure as key, get the parameters combination
that gave the best accuracy results for the chosen measure (on
average).
best_index (dict of ints):
Using an accuracy measure as key, get the index that can be used
with ``cv_results`` that achieved the highest accuracy for that
measure (on average).
cv_results (dict of arrays):
A dict that contains accuracy measures over all splits, as well as
train and test time for each parameter combination. Can be imported
into a pandas `DataFrame` (see :ref:`example
<cv_results_example>`).
"""
def __init__(
self,
algo_class,
param_grid,
measures=["rmse", "mae"],
cv=None,
refit=False,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
joblib_verbose=0,
):
super().__init__(
algo_class=algo_class,
measures=measures,
cv=cv,
refit=refit,
return_train_measures=return_train_measures,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
joblib_verbose=joblib_verbose,
)
self.param_grid = self._parse_options(param_grid.copy())
self.param_combinations = [
dict(zip(self.param_grid, v)) for v in product(*self.param_grid.values())
]
class RandomizedSearchCV(BaseSearchCV):
"""The :class:`RandomizedSearchCV` class computes accuracy metrics for an
algorithm on various combinations of parameters, over a cross-validation
procedure. As opposed to GridSearchCV, which uses an exhaustive
combinatorial approach, RandomizedSearchCV samples randomly from the
parameter space. This is useful for finding the best set of parameters
for a prediction algorithm, especially using a coarse to fine approach.
It is analogous to `RandomizedSearchCV <https://scikit-learn.org/stable/
modules/generated/sklearn.model_selection.RandomizedSearchCV.html>`_ from
scikit-learn.
See an example in the :ref:`User Guide <tuning_algorithm_parameters>`.
Args:
algo_class(:obj:`AlgoBase \\
<surprise.prediction_algorithms.algo_base.AlgoBase>`): The class
of the algorithm to evaluate.
param_distributions(dict): Dictionary with algorithm parameters as
keys and distributions or lists of parameters to try. Distributions
must provide a rvs method for sampling (such as those from
scipy.stats.distributions). If a list is given, it is sampled
uniformly. Parameters will be sampled n_iter times.
n_iter(int): Number of times parameter settings are sampled. Default is
``10``.
measures(list of string): The performance measures to compute. Allowed
names are function names as defined in the :mod:`accuracy
<surprise.accuracy>` module. Default is ``['rmse', 'mae']``.
cv(cross-validation iterator, int or ``None``): Determines how the
``data`` parameter will be split (i.e. how trainsets and testsets
will be defined). If an int is passed, :class:`KFold
<surprise.model_selection.split.KFold>` is used with the
appropriate ``n_splits`` parameter. If ``None``, :class:`KFold
<surprise.model_selection.split.KFold>` is used with
``n_splits=5``.
refit(bool or str): If ``True``, refit the algorithm on the whole
dataset using the set of parameters that gave the best average
performance for the first measure of ``measures``. Other measures
can be used by passing a string (corresponding to the measure
name). Then, you can use the ``test()`` and ``predict()`` methods.
``refit`` can only be used if the ``data`` parameter given to
``fit()`` hasn't been loaded with :meth:`load_from_folds()
<surprise.dataset.Dataset.load_from_folds>`. Default is ``False``.
return_train_measures(bool): Whether to compute performance measures on
the trainsets. If ``True``, the ``cv_results`` attribute will
also contain measures for trainsets. Default is ``False``.
n_jobs(int): The maximum number of parallel training procedures.
- If ``-1``, all CPUs are used.
- If ``1`` is given, no parallel computing code is used at all,\\
which is useful for debugging.
- For ``n_jobs`` below ``-1``, ``(n_cpus + n_jobs + 1)`` are\\
used. For example, with ``n_jobs = -2`` all CPUs but one are\\
used.
Default is ``1``.
pre_dispatch(int or string): Controls the number of jobs that get
dispatched during parallel execution. Reducing this number can be
useful to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created\\
and spawned. Use this for lightweight and fast-running\\
jobs, to avoid delays due to on-demand spawning of the\\
jobs.
- An int, giving the exact number of total jobs that are\\
spawned.
- A string, giving an expression as a function of ``n_jobs``,\\
as in ``'2*n_jobs'``.
Default is ``'2*n_jobs'``.
random_state(int, RandomState or None): Pseudo random number
generator seed used for random uniform sampling from lists of
possible values instead of scipy.stats distributions. If int,
``random_state`` is the seed used by the random number generator.
If ``RandomState`` instance, ``random_state`` is the random number
generator. If ``None``, the random number generator is the
RandomState instance used by ``np.random``. Default is ``None``.
joblib_verbose(int): Controls the verbosity of joblib: the higher, the
more messages.
Attributes:
best_estimator (dict of AlgoBase):
Using an accuracy measure as key, get the algorithm that gave the
best accuracy results for the chosen measure, averaged over all
splits.
best_score (dict of floats):
Using an accuracy measure as key, get the best average score
achieved for that measure.
best_params (dict of dicts):
Using an accuracy measure as key, get the parameters combination
that gave the best accuracy results for the chosen measure (on
average).
best_index (dict of ints):
Using an accuracy measure as key, get the index that can be used
with ``cv_results`` that achieved the highest accuracy for that
measure (on average).
cv_results (dict of arrays):
A dict that contains accuracy measures over all splits, as well as
train and test time for each parameter combination. Can be imported
into a pandas `DataFrame` (see :ref:`example
<cv_results_example>`).
"""
def __init__(
self,
algo_class,
param_distributions,
n_iter=10,
measures=["rmse", "mae"],
cv=None,
refit=False,
return_train_measures=False,
n_jobs=1,
pre_dispatch="2*n_jobs",
random_state=None,
joblib_verbose=0,
):
super().__init__(
algo_class=algo_class,
measures=measures,
cv=cv,
refit=refit,
return_train_measures=return_train_measures,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
joblib_verbose=joblib_verbose,
)
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = self._parse_options(param_distributions.copy())
self.param_combinations = self._sample_parameters(
self.param_distributions, self.n_iter, self.random_state
)
@staticmethod
def _sample_parameters(param_distributions, n_iter, random_state=None):
"""Samples ``n_iter`` parameter combinations from
``param_distributions`` using ``random_state`` as a seed.
Non-deterministic iterable over random candidate combinations for
hyper-parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not |
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used
to define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Args:
param_distributions(dict): Dictionary where the keys are
parameters and values are distributions from which a parameter
is to be sampled. Distributions either have to provide a
``rvs`` function to sample from them, or can be given as a list
of values, where a uniform distribution is assumed.
n_iter(int): Number of parameter settings produced.
Default is ``10``.
random_state(int, RandomState instance or None):
Pseudo random number generator seed used for random uniform
sampling from lists of possible values instead of scipy.stats
distributions. If ``None``, the random number generator is the
random state instance used by np.random. Default is ``None``.
Returns:
combos(list): List of parameter dictionaries with sampled values.
"""
# check if all distributions are given as lists
# if so, sample without replacement
all_lists = np.all(
[not hasattr(v, "rvs") for v in param_distributions.values()]
)
rnd = get_rng(random_state)
# sort for reproducibility
items = sorted(param_distributions.items())
if all_lists:
# create exhaustive combinations
param_grid = [
dict(zip(param_distributions, v))
for v in product(*param_distributions.values())
]
combos = np.random.choice(param_grid, n_iter, replace=False)
else:
combos = []
for _ in range(n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
combos.append(params)
return combos
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import kfp
import kfp.dsl as dsl
import json
from pathlib import Path
class aionpipeline():
containerRegistry = str()
containerLabel = str()
containerSecret = str()
pipelineName = 'AION MLOps Pipeline {0}'
exeCmd = 'python'
codeFile = 'aionCode.py'
mntPoint = '/aion'
inputArg = '-i'
msIP = '0.0.0.0'
port = '8094'
cachingStrategy = 'P0D'
deafultVolume = '1Gi'
volName = 'aion-pvc'
volMode = 'ReadWriteMany'
fileExt = '.tar.gz'
fileName = 'aion_mlops_pipeline_{0}'
containerMM = 'modelmonitoring'
containerDI = 'dataingestion'
containerDT = 'datatransformation'
containerFE = 'featureengineering'
containerMR = 'modelregistry'
containerMS = 'modelserving'
containerImage = '{0}/{1}:{2}'
models = {}
nameSeprator = '-'
modelsLiteral = 'models'
modelNameLiteral = 'modelname'
msTemplate = '{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "{{workflow.name}}-{0}"}, "spec": {"containers": [{"name": "{0}", "image": "{1}", "command": ["python"], "args": ["aionCode.py", "-ip", "{2}", "-pn", "{3}"],"volumeMounts": [{"name": "aion-pvc", "mountPath": "{4}"}], "ports": [{"name": "http", "containerPort": {3}, "protocol": "TCP"}]}], "imagePullSecrets": [{"name": "{5}"}], "volumes": [{"name": "aion-pvc", "persistentVolumeClaim": {"claimName": "{{workflow.name}}-{6}"}}]}}'
def __init__(self, models, containerRegistry, containerLabel, containerSecret=str()):
self.models = models
self.containerRegistry = containerRegistry
self.containerLabel = containerLabel
self.containerSecret = containerSecret
@dsl.pipeline(
name=pipelineName.format(containerLabel),
description=pipelineName.format(containerLabel),
)
def aion_mlops(self, inputUri=str(), volSize=deafultVolume):
vop = dsl.VolumeOp(
name=self.volName + self.nameSeprator + self.containerLabel,
resource_name=self.volName,
modes=[self.volMode],
size=volSize
)
mm = dsl.ContainerOp(
name=self.containerMM,
image=self.containerImage.format(self.containerRegistry,self.containerMM,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
self.inputArg,
inputUri,
],
pvolumes={self.mntPoint: vop.volume}
)
mm.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
di = dsl.ContainerOp(
name=self.containerDI,
image=self.containerImage.format(self.containerRegistry,self.containerDI,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: mm.pvolume}
)
di.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
dt = dsl.ContainerOp(
name=self.containerDT,
image=self.containerImage.format(self.containerRegistry,self.containerDT,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: di.pvolume}
)
dt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
fe = dsl.ContainerOp(
name=self.containerFE,
image=self.containerImage.format(self.containerRegistry,self.containerFE,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: dt.pvolume}
)
fe.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
dictMT = {}
listMTOps = []
for model in self.models[self.modelsLiteral]:
modelName = model[self.modelNameLiteral]
mt=dsl.ContainerOp(
name=modelName,
image=self.containerImage.format(self.containerRegistry,modelName,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: fe.pvolume})
mt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
listMTOps.append(mt)
dictMT[self.mntPoint]=mt.pvolume
mr = dsl.ContainerOp(
name=self.containerMR,
image=self.containerImage.format(self.containerRegistry,self.containerMR,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes=dictMT
).after(*tuple(listMTOps))
mr.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
msJson = self.msTemplate.replace(str({0}),self.containerMS).replace(str({1}),self.containerImage.format(self.containerRegistry,self.containerMS,self.containerLabel)).replace(str({2}),self.msIP).replace(str({3}),self.port).replace(str({4}),self.mntPoint).replace(str({5}),self.containerSecret).replace(str({6}),self.volName)
ms = dsl.ResourceOp(
name=self.containerMS + self.nameSeprator + self.containerLabel,
k8s_resource=json.loads(msJson),
)
ms.after(mr)
def compilepl(self, targetPath=str()):
filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt
if targetPath != str():
filePath = Path(targetPath, filePath)
kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath))
def executepl(self, kfhost=str()):
client = kfp.Client(kfhost)
client.create_run_from_pipeline_func(self.aion_mlops,arguments={})
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from pathlib import Path
import sqlite3
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem
db_file = str(location / self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
def table_exists(self, name):
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
return len(listOfTables) > 0
def read_data(self, table_name, condition = None):
if condition:
query = f"SELECT * FROM {table_name} WHERE "+condition
else:
query = f"SELECT * FROM {table_name}"
row = self.cursor.execute(query).fetchall()
return list(row)
def column_names(self, table_name):
query = f"SELECT * FROM {table_name}"
row = self.cursor.execute(query).fetchall()
column_names = list(map(lambda x:x[0],self.cursor.description))
return column_names
# return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self, name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def delete_record(self, table_name, col_name, col_value):
try:
query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'"
self.conn.execute(query)
self.conn.commit()
return 'success'
except Exception as e:
print(str(e))
print("Deletion Failed")
return 'error'
def drop_table(self,table_name):
query = f"DROP TABLE {table_name}"
self.cursor.execute(query)
print("Table dropped... ")
# Commit your changes in the database
self.conn.commit()
def get_data(self, table_name, col_name, col_value):
query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"
row = self.cursor.execute(query).fetchone()
if (row == None):
return []
return list(row)
def execute_query(self,query):
self.cursor.execute(query)
self.conn.commit()
def write_data(self, data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def update_dict_data(self,data:dict,condition,table_name):
if not data:
return
if not table_name:
raise ValueError('Database table name is not provided')
updates = ''
#TODO validation of keys
for i,kv in enumerate(data.items()):
if i:
updates += ','
updates += f'"{kv[0]}"="{kv[1]}"'
if condition == '':
update_query = f'UPDATE {table_name} SET {updates}'
else:
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
def update_data(self,updates,condition,table_name):
if condition == '':
update_query = f'UPDATE {table_name} SET {updates}'
else:
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
def close(self):
self.conn.close()<s> # -*- coding: utf-8 -*-
import os
import glob, os
import pandas as pd
from openai.embeddings_utils import cosine_similarity
import numpy as np
from openai.embeddings_utils import get_embedding
import tiktoken
import openai
import importlib.util
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
import time
from tqdm import tqdm
import concurrent.futures
from openai.error import RateLimitError, Timeout
try:
import chromadb
from chromadb.api.types import Documents, Embeddings
except:
#Looks no chromadb installed,just proceed to use csv embedd
pass
from openai.embeddings_utils import get_embedding
import json
from openai.embeddings_utils import cosine_similarity
from langchain.schema import Document
from langchain.vectorstores import Chroma
import warnings
import logging
warnings.simplefilter(action='ignore', category=FutureWarning)
|
"""Code clone detection parent class, based on user input data,the class will detect similar code snippets in the python file """
class CodeCloneDetection:
#Constructor for base inputs
def __init__(self,rootdir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId):
self.rootdir=rootdir
self.embedd_storage_path=embedd_storage_path
self.openai_baseurl=openai_baseurl
self.openai_key=openai_key
self.openai_api_type=openai_api_type
self.openai_api_version=openai_api_version
self.ccdreportpath = os.path.join(self.embedd_storage_path, "codeCloneReport")
self.generativeai_chat_model=generativeai_chat_model
self.generativeai_embedding_engine = generativeai_embedding_engine
self.generativeai_embedding_model = generativeai_embedding_model
self.generativeai_deploymentId = generativeai_deploymentId
try:
os.makedirs(self.ccdreportpath, exist_ok = True)
except OSError as error:
print("Directory 'codeclonedetection' can not be created",self.ccdreportpath)
try:
self.logpath = os.path.join(self.ccdreportpath,'codeclonelog.log')
logging.basicConfig(level=logging.INFO,filename=self.logpath,filemode='w',format='%(message)s')
self.log = logging.getLogger()
except Exception as e:
print("code clone log object creation error.",e)
def get_function_name(self,code):
"""
Extract function name from a line beginning with "def "
"""
assert code.startswith("def ")
return code[len("def "): code.index("(")]
def get_until_no_space(self,all_lines, i) -> str:
"""
Get all lines until a line outside the function definition is found.
"""
ret = [all_lines[i]]
for j in range(i + 1, i + 10000):
if j < len(all_lines):
if len(all_lines[j]) == 0 or all_lines[j][0] in [" ", "\\t", ")"]:
ret.append(all_lines[j])
else:
break
return "\\n".join(ret)
def chunk_functions(self,function_code, chunk_size):
""" To chunk input for gpt models because max token per model is 4090 """
try:
# chunk_size = 1900
chunks = [function_code[i:i + chunk_size] for i in range(0, len(function_code), chunk_size)]
except Exception as e:
self.log.info('Error in chunking input prompt data.')
return chunks
def get_functions(self,filepath):
"""
Get all functions in a Python file.
"""
try:
whole_code = open(filepath).read().replace("\\r", "\\n")
all_lines = whole_code.split("\\n")
for i, l in enumerate(all_lines):
if l.startswith("def "):
code = self.get_until_no_space(all_lines, i)
function_name = self.get_function_name(code)
yield {"code": code, "function_name": function_name, "filepath": filepath}
except Exception as e:
self.log.info("Error in getting function from file. Error message: \\n"+str(e))
def get_clone_function_details(self):
""" To get available functions from python files """
try:
code_root=self.rootdir
from glob import glob
code_files = [y for x in os.walk(code_root) for y in glob(os.path.join(x[0], '*.py'))]
if code_files:
all_funcs = []
total_locs = 0
for code_file in code_files:
with open(code_file) as f:
total_locs += len(f.readlines())
funcs = list(self.get_functions(code_file))
for func in funcs:
all_funcs.append(func)
return all_funcs,code_root,code_files,total_locs
else:
self.log.info("no python files available in the dir:"+str(code_root))
return {"pythondiles_error":"No python files are found."}
except Exception as e:
print("Error in reading the functions from the given directory. Error message: \\n",e)
self.log.info("Error in reading the functions from the given directory. Error message: \\n"+str(e))
def getOpenAICredentials(self):
""" To set openai credential using user input """
#Currently only support openai
try:
package_name = 'openai'
lib_name = importlib.util.find_spec(package_name)
if lib_name is None:
return "openai_pkg_check_failed"
else:
embedding_model_lib ='openai'
#
if isinstance(self.openai_baseurl,str) and isinstance(self.openai_key,str) and isinstance(self.openai_api_type,str):
os.environ['OPENAI_API_TYPE'] = self.openai_api_type
os.environ['OPENAI_API_BASE'] = self.openai_baseurl
# os.environ['OPENAI_API_VERSION'] = '2023-05-15'
# os.environ['OPENAI_API_VERSION'] = "2022-12-01"
os.environ['OPENAI_API_VERSION'] = self.openai_api_version
os.environ['OPENAI_API_KEY'] = self.openai_key
if (embedding_model_lib.lower()=='openai'):
try:
openai.api_type=os.getenv('OPENAI_API_TYPE')
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_key = os.getenv('OPENAI_API_KEY')
openai.api_version = os.getenv('OPENAI_API_VERSION')
except Exception as e:
self.log.info("Unable to get openai credentials,please provide proper credentials."+str(e))
return {"error_msg":"openai_environment_error"}
except Exception as e:
print("Openai credential set and get function error. Error message: \\n",e)
return openai.api_type,openai.api_base,openai.api_key,openai.api_version
def get_embedding_local(self,model: str, text: str) -> list[float]:
""" To get embedding data for single user given prompt text"""
try:
response = openai.Embedding.create(
input=text,
engine=self.generativeai_embedding_engine)
except Exception as e:
self.log.info("openai embedding creation error."+str(e))
return result['data'][0]['embedding']
def get_embeddings_pyfiles(self,all_funcs):
""" To get embedding for python functions """
try:
import tiktoken
openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials()
encoding = tiktoken.encoding_for_model("text-embedding-ada-002")
df = pd.DataFrame(all_funcs)
df["tokens"] = df["code"].apply(lambda c: len(encoding.encode(c)))
embedding_cost = df["tokens"].sum() * (0.0004/1000)
EMBEDDING_FILEPATH=self.ccdreportpath+'\\code_embeddings.csv'
self.log.info("embedding storage location: "+str(EMBEDDING_FILEPATH))
vdb_status = self.get_vdb_status('chromadb')
##Currently chromadb not integrated
vdb_status = False
if not vdb_status:
df['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, engine=self.generativeai_embedding_engine))
df['filepath'] = df['filepath'].apply(lambda x: x.replace(self.rootdir, ""))
df.to_csv(EMBEDDING_FILEPATH, index=False)
else:
df = self.chromadb_embedding(df)
""" Please uncomment below, currently assumption is each run we create embedd based on python files dir """
import numpy as np
df = pd.read_csv(EMBEDDING_FILEPATH)
df["code_embedding"] = df["code_embedding"].apply(eval).apply(np.array)
except Exception as e:
self.log.info("Error in get_embeddings_pyfiles for embedding conversion process. Error Message: "+str(e))
raise Exception("Error in get_embeddings_pyfiles for embedding conversion process.")
return df,embedding_cost
def search_functions_vectordb(df,db, code_query, n=3, pprint=True, n_lines=7):
""" Search function for user query (prompt content), used for vector database embedding query option. """
try:
docs = db.similarity_search_with_score(code_query )[:n]
docs = [{"similarities":score, "code": d.page_content, **d.metadata} for d,score in docs]
res = pd.DataFrame(docs).drop("_additional", axis=1)
##Uncomment for debug
# if pprint:
# for r in res.iterrows():
# print(r[1].filepath+" : "+r[1].function_name + " score=" + str(round(r[1].similarities, 3)))
# print("\\n".join(r[1].code.split("\\n")[:n_lines]))
# print('-'*70)
except Exception as e:
self.log.info("Error in search_functions_vectordb to get similarity information based on user query. Error Message: "+str(e))
raise Exception("Error in search_functions_csv to get similarity information based on user query.")
return res
def search_functions_csv(self,df, code_query, n=3, pprint=True, n_lines=7):
""" Search function for user query (prompt content), used for csv embedding query option. """
try:
embedding = get_embedding(code_query, engine=self.generativeai_embedding_engine)
df['similarities'] = df.code_embedding.apply(lambda x: cosine_similarity(x, embedding))
res = df.sort_values('similarities', ascending=False)
## uncomment for debug purpose
# if pprint:
# for r in res.iterrows():
# print(r[1].filepath+" : "+r[1].function_name + " score=" + str(round(r[1].similarities, 3)))
# print("\\n".join(r[1].code.split("\\n")[:n_lines]))
# print('-'*70)
except Exception as e:
self.log.info("Error in search_functions_functions_csv to get similarity information based on user query. Error Message: "+str(e))
raise Exception("Error in search_functions_csv to get similarity information based on user query.")
return res
def get_prediction(self,prompt_data):
""" To get prediction for given user data """
try:
all_funcs,code_root,code_files,total_locs=self.get_clone_function_details()
if not isinstance(all_funcs,type(None)):
df,embedding_cost=self.get_embeddings_pyfiles(all_funcs)
res = self.search_functions_csv(df, prompt_data, n=3)
return res
else:
return dict({"error":"Empty_root_directory"})
except Exception as e:
self.log.info("Error in get prediction for user prompt information. Error Message: "+str(e))
raise Exception("Error in get prediction for user prompt information. .")
def get_vdb_status(self,vdb_name):
""" To check chromadb python package installed or not"""
try:
vdb_name = 'chromadb'
vdb_status=False
lib_name = importlib.util.find_spec(vdb_name)
if lib_name is None:
vdb_status=False
else:
vdb_status=True
## Processing the files and create a embedding and save it using csv.
except Exception as e:
self.log.info("Error in checking chromadb installed or not. Error Message: "+str(e))
raise Exception("Error in checking chromadb installed or not. .")
## Currently vector db (chromadb) not implemented, so vdb_status is set as False
vdb_status = False
return vdb_status
def create_chroma_db(self,documents, name):
""" Craete chromadb instance (persistant) """
#get openai status
openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials()
# openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials()
try:
from langchain.embeddings.openai import OpenAIEmbeddings
embed_function = OpenAIEmbeddings(deployment=self.generativeai_embedding_engine, chunk_size=1)
except:
from chromadb.utils import embedding_functions
embed_function = embedding_functions.OpenAIEmbeddingFunction(
api_key=openai.api_key,
api_base=openai.api_base,
api_type = openai.api_type,
model_name=self.generativeai_embedding_model
)
try:
# chroma_client = chromadb.Client()
persist_directory = self.embedd_storage_path
chroma_client = chromadb.Client(
Settings(
persist_directory=persist_directory,
chroma_db_impl="duckdb+parquet",
)
)
# Start from scratch
chroma_client.reset()
chroma_client.persist()
try:
embed_function = OpenAIEmbeddings(deployment=self.generativeai_embedding_engine, chunk_size=1)
except:
embed_function = OpenAIEmbeddings()
db = Chroma.from_documents(documents, embed_function, persist_directory=persist_directory)
db.persist()
except Exception as e:
self.log.info("Error in chromadb based embeding creation. Error Message: "+str(e))
raise Exception("Error in chromadb based embeding creation.")
return db,chroma_client
def chromadb_embedding(self,df):
""" Base chromadb embedding creation and storage function, it |
calls above create_chroma_db() to create db.
"""
try:
documents = df.apply(lambda x: Document(page_content= x["code"], metadata= {"function_name": x["function_name"], "filepath": x["filepath"]}), axis=1)
#setup the chromadb
db,chroma_client = self.create_chroma_db(documents,collection_name)
try:
chromadb_df=pd.DataFrame(db)
except:
db_json = db.get(include=['embeddings', 'documents', 'metadatas'])
chromadb_df = pd.read_json(db_json)
self.log.info("chromadb_df records (top ~5 records): "+str(chromadb_df.head(5)))
except Exception as e:
self.log.info("chromadb embedding error. Error message: "+str(e))
return chromadb_df
def num_tokens_from_string(self, string: str) -> int:
""" Get number of tokens of text using tiktokens lib."""
encoding = tiktoken.encoding_for_model("text-embedding-ada-002")
num_tokens = len(encoding.encode(string))
return num_tokens
def validate_code_clone_with_explanation(self,code1, code2, verbose=False):
""" Validate clone detection code snippet and get explanation from openai chat model (gpt-3.5-turbo) """
## Openai using 4 chars as 1 token, same method here followed. Here,we dont need to call tiktoken lib to save cost.
if (len(code1)/4 >1900):
chunk = self.chunk_functions(code1, 1900)
code1 = chunk[0]
print("In side , len of code1\\n",len(code1))
if (len(code2)/4 >1900):
chunk = self.chunk_functions(code2, 1900)
code2 = chunk[0]
print("In side , len of code2\\n",len(code2))
try:
SYS_ROLE = "You are a Senior Code Reviewer, who helps in Code review and integration using code clone detection approach."
openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials()
# openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials()
prompt = f"""Given two code snippets, find if they are clones or not with suitable explaination.
Four types of clone:
1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces.
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone.
3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones.
4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone.
Use JSON object format with following keys:
IsClone: (True, False) wheather two code snippets are clone or not.
CloneType: (Exact clone, Parameterized clone, Never-miss clone, Semantic clone) Choose appropriate clone type or "None".
Explanation: A short explanation for the above answer.
### Code Snippets:
## Code 1:
{code1}
## Code 2:
{code2}
### Answer(Valid JSON object):
"""
response = openai.ChatCompletion.create(deployment_id=self.generativeai_deploymentId,
messages=[{"role": "system", "content": SYS_ROLE},
{"role": "user", "content": prompt},],
temperature = 0,max_tokens = 3900,request_timeout=90)
text = response['choices'][0]['message']['content']
if verbose:
self.log.info("validate_code_clone_with_explanation, text: "+str(text))
except Exception as e:
print(" validate_code_clone_with_explanation: \\n",e)
response = "OpenAI Model Connection"
if e.code == "invalid_request" and "token limit" in e.message.lower():
# Implement your logic to reduce the length of messages or split them into smaller parts
# Modify messages or take appropriate action
self.log.info("Given function is too large and exceeds openai chat model token limit,please review the source file function length. "+str(e))
return response
def validate_code_clone_with_explanation_davinci(self,code1, code2, verbose=False):
""" Validate clone detection code snippet and get explanation from openai chat model (davinci) """
if (len(code1)/4 >1900):
chunk = self.chunk_functions(code1, 1900)
code1 = chunk[0]
if (len(code2)/4 >1900):
chunk = self.chunk_functions(code2, 1900)
code2 = chunk[0]
try:
SYS_ROLE = "You are a Senior Code Reviewer, who helps in Code review and integration. Detecting code clone in the repository."
openai_api_type,openai_api_base,openai_api_key,openai_api_version = self.getOpenAICredentials()
# openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials()
prompt = f"""Given two code snippets, find if they are clones or not with suitable explaination.
Four types of clone:
1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces.
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone.
3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones.
4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone.
Use JSON object format with following keys:
IsClone: (True, False) wheather two code snippets are clone or not.
CloneType: (Exact clone, Parameterized clone, Never-miss clone, Semantic clone) Choose appropriate clone type or "None".
Explanation: A short explanation for the above answer.
### Code Snippets:
## Code 1:
{code1}
## Code 2:
{code2}
### Answer(Valid JSON object):
"""
# response = openai.Completion.create(engine='Text-Datvinci-03', prompt=prompt, temperature=0, max_tokens=1166)
response = openai.Completion.create(engine=self.generativeai_chat_model, prompt=prompt, temperature=0, max_tokens=3900)
text = response.choices[0]["text"]
if verbose:
self.log.info("validate_code_clone_with_explanation, text (chatmodel response) "+str(text))
except Exception as e:
response = "OpenAI Model Connection Error"
if e.code == "invalid_request" and "token limit" in e.message.lower():
# Implement your logic to reduce the length of messages or split them into smaller parts
# Modify messages or take appropriate action
self.log.info("Given function is too large and exceeds openai chat model token limit,please review the source file function length. Error msg: "+str(e))
return response
## For dbscan based clone detction from python files, we use CodeCloneDetection parent class. (Using inheritance)
class CodeCloneDetectionFiles(CodeCloneDetection):
"""For dbscan based clone detction from python files, we use CodeCloneDetection
parent class. (Using inheritance)
"""
def __init__(self,root_dir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId):
super().__init__(root_dir,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId)
def get_embedd_fns(self):
""" To get embedd vector, using parent class methods"""
try:
## Processing the files and create a embedding and save it using csv.
vdb_status = super().get_vdb_status('chromadb')
self.log.info("<------- AION Code Clone Detection started ... ------>\\n ")
if not vdb_status:
openai_api_type,openai_api_base,openai_api_key,openai_api_version = super().getOpenAICredentials()
# openai_api_type,openai_api_base,openai_api_key = self.getOpenAICredentials()
all_funcs,code_root,code_files,total_locs = super().get_clone_function_details()
if (openai.api_key or openai_api_key):
if not isinstance(all_funcs,type(None)):
embedded_df,embedding_cost=super().get_embeddings_pyfiles(all_funcs)
else:
return status
except Exception as e:
# print("Error in getting embedding vector using openai. Error message: ",e)
self.log.info("Error in getting embedding vector using openai. Error message: "+str(e))
raise Exception("Error in getting embedding vector using openai.")
return embedded_df,embedding_cost
def dbscan_clone_detection(self,df):
""" DBScan based code clone similarity detection (for functions in given dir """
try:
vdb_status = super().get_vdb_status('chromadb')
if not vdb_status:
X = np.array(list(df.code_embedding.values))
else:
X = np.array(list(df.embeddings.values))
#X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.2, min_samples=2).fit(X)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
df["cluster"] = labels
cluster_result = []
for i in range(n_clusters_):
cluster_df = df.loc[df['cluster'] == i]
# with open("{}/cluster_{}.txt".format(self.ccdreportpath,i), "w") as f:
for index, row in cluster_df.iterrows():
cluster_result.append({"cluster_id": i,"filepath": row.filepath,"function_name": row.function_name,"code": row.code })
# f.write(f"Source File: {row.filepath}, Function Name: {row.function_name}")
#f.write(f"\\n{row.code}\\n\\n{'-'*80}\\n\\n")
cluster_result_df = pd.DataFrame(cluster_result)
codeclonereport_df = os.path.join(self.ccdreportpath,'cluster_result.csv')
cluster_result_df.to_csv(codeclonereport_df, index=False)
return cluster_result_df
except Exception as e:
self.log.info("Error in dbscan based similar code clone clustering. Error Message: "+str(e))
raise Exception("Error in dbscan based similar code clone clustering.")
def make_pairs(self,data_list:list):
try:
if len(data_list) <=1:
return []
return [(data_list[0], d) for d in data_list[1:]] + self.make_pairs(data_list[1:])
except Exception as e:
self.log.info("Error in make pairs function, error message: "+str(e))
raise Exception("Error in clone code mapping.")
def code_clone_check_with_retry(self,code1,code2, retry_interval=1):
""" Call chat models for code clone detection with retry mechanism. """
try:
# res = super().validate_code_clone_with_explanation(code1,code2)
##sj
if (self.generativeai_embedding_model.lower() =='text-embedding-ada-002' and self.generativeai_chat_model.lower() == 'text-datvinci-03'):
res = super().validate_code_clone_with_explanation_davinci(code1,code2)
return res
elif (self.generativeai_embedding_model.lower() =='text-embedding-ada-002' and self.generativeai_chat_model.lower() == 'gpt-3.5-turbo'):
res = super().validate_code_clone_with_explanation(code1,code2)
return res
except (RateLimitError, Timeout) as e:
self.log.info("Calling chat model issue in code clone check function, error message: "+str(e))
time.sleep(retry_interval)
return self.code_clone_check_with_retry(code1, code2)
def res_formater(self,inp):
""" Function to format gpt-3.5 or text-davinci-003 response body. """
try:
line = inp.replace('{','')
line = line.replace('}','')
line = line.replace('"','')
end=line.split(',')
d1={}
l2=[]
for l in end:
l=l.split(',')
for i in l:
l1=i.split(":")
l2.append(l1)
import pandas as pd
df=pd.DataFrame(l2)
df=df.T
df.columns = df.iloc[0]
df = df[1:]
df.columns = df.columns.str.replace('[#,@,&,\\']', '')
# df.to_csv('test1.csv', index=False)
response=df.iloc[0]
fl=response.to_list()
clone_status=fl[0]
clone_type=fl[1]
result=fl[2]
except Exception as e:
self.log.info("chat model response formatter error. Error message: "+str(e))
return clone_status,clone_type,result
def getcloneresult_modelspecific(self,code_clone_check_tasks,embedding_cost):
""" get the clone type and associated information from chat model response data. """
try:
max_workers = min(len(code_clone_check_tasks), 100)
all_funcs,code_root,code_files,total_locs = super().get_clone_function_details()
if (self.generativeai_chat_model.lower() == 'text-datvinci-03'):
self.log.info("<--- Text-Dat |
vinci-03 chat model based code clone detection. --->")
code_clone_result = []
for task in code_clone_check_tasks:
response=self.code_clone_check_with_retry(task[0]["code"], task[1]["code"])
with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor:
llm_requests = {
executor.submit(self.code_clone_check_with_retry, task[0]["code"], task[1]["code"]): task for task in code_clone_check_tasks
}
with tqdm(total= len(llm_requests)) as progress:
for future in concurrent.futures.as_completed(llm_requests):
task = llm_requests[future]
try:
res = future.result()
try:
my_openai_obj1 = res["choices"][0]["text"]
clone_status,clone_type,result = self.res_formater(my_openai_obj1)
model_value=res['model']
total_tokens_value=res['usage']['total_tokens']
code_clone_result.append({"task": task,
"result":result,
"IsClone": clone_status,
"CloneType": clone_type,
"model":model_value,
"total_tokens":total_tokens_value})
except Exception as e:
self.log.info("getCloneReport, code_clone_result.append error: "+str(e))
except Exception as exc:
self.log.info("getCloneReport error (text davinci chat model): "+str(exc))
progress.update()
## Please uncomment below part if you need to check chat model response body.
#codeclonecheckresult_json = os.path.join(self.ccdreportpath,'code_clone_chatmodel_responsebody.json')
#with open(codeclonecheckresult_json, "w+") as fp:
#json.dump(code_clone_result, fp, indent=2)
code_clone_result_json=json.dumps(code_clone_result)
clone_report=pd.read_json(code_clone_result_json)
cr_totaltokens = clone_report['total_tokens']
total_amt = (cr_totaltokens).sum() * (0.002/1000)
clone_report["function1"] = clone_report["task"].apply(lambda x: x[0]["filepath"] + " -> " + x[0]["function_name"])
clone_report["function2"] = clone_report["task"].apply(lambda x: x[1]["filepath"] + " -> " + x[1]["function_name"])
# clone_report["clone_type"] = clone_report["result"].apply(lambda x: x["CloneType"])
clone_report["clone_type"] = clone_report["CloneType"]
code_dir = code_root
total_files = len(code_files)
total_locs = total_locs
total_functions = len(all_funcs)
total_tokens = clone_report['total_tokens'].sum()
total_cost= embedding_cost + clone_report['total_tokens'].sum() * (0.002/1000)
total_clones = len(clone_report[clone_report.clone_type != "None"])
code_clone_count_by_df = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql')
clone_functions = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1").to_markdown(tablefmt='psql', index=False)
code_clone_count_dict = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count'))
clone_function_dict = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1")
##Final report on code clone detection
report_str = f"""Code_directory: {code_dir}
Files: {total_files}
LOCs: {total_locs}
Functions: {total_functions}
Total_code_clones_detected: {total_clones}
Tokens used: {total_tokens}
Total cost(embedding + clone check): {total_cost}
Four_types_of_clone:
1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces.
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone.
3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones.
4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone.
Code_clones_count_by_clone_type:
{code_clone_count_by_df}
Clone_functions:
{clone_functions}
"""
codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt')
with open(codeclonereport_txt, "w") as f:
f.write(report_str)
report_dict=dict({"Code_directory":code_dir,"total_files":total_files,
"total_locs":total_locs,"total_functions":total_functions,"total_clones":total_clones,
"total_tokens":total_tokens,"total_cost":total_cost,
"Code_clones_count_by_clone_type":code_clone_count_dict,"clone_functions":clone_function_dict})
## report for chat model is gpt 3.5 turbo
elif (self.generativeai_chat_model.lower() == 'gpt-3.5-turbo'):
try:
self.log.info("<--- gpt-3.5-turbo chat model based code clone detection. --->")
code_clone_result = []
for task in code_clone_check_tasks:
response=self.code_clone_check_with_retry(task[0]["code"], task[1]["code"])
with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor:
llm_requests = {
executor.submit(self.code_clone_check_with_retry, task[0]["code"], task[1]["code"]): task for task in code_clone_check_tasks
}
with tqdm(total= len(llm_requests)) as progress:
for future in concurrent.futures.as_completed(llm_requests):
task = llm_requests[future]
try:
res = future.result()
my_openai_obj1 = res["choices"][0]["message"]['content']
clone_status,clone_type,result = self.res_formater(my_openai_obj1)
# result = json.loads(res['choices'][0]['message']['content'])
total_tokens = res["usage"]["total_tokens"]
code_clone_result.append({"task": task,
"result":result ,
"CloneType": clone_type,
"total_tokens": total_tokens})
except Exception as exc:
self.log.info("gpt 3.5 chat model error: "+str(exc))
progress.update()
except Exception as e:
print("In gpt3.5,getcloneresult_modelspecific fn exception : \\n",e)
import traceback
print("traceback, In gpt3.5,getcloneresult_modelspecific fn exception \\n",traceback.print_exc())
## Please uncomment below part if you need to check chat model response body.
#codeclonecheckresult_json = os.path.join(self.ccdreportpath,'code_clone_chatmodel_responsebody.json')
#with open(codeclonecheckresult_json, "w+") as fp:
#json.dump(code_clone_result, fp, indent=2)
try:
code_clone_result_json=json.dumps(code_clone_result)
clone_report = pd.read_json(code_clone_result_json)
codeclone_total_amt = clone_report["total_tokens"].sum() * (0.002/1000)
clone_report["function1"] = clone_report["task"].apply(lambda x: x[0]["filepath"] + " -> " + x[0]["function_name"])
clone_report["function2"] = clone_report["task"].apply(lambda x: x[1]["filepath"] + " -> " + x[1]["function_name"])
# clone_report["clone_type"] = clone_report["result"].apply(lambda x: x["CloneType"])
clone_report["clone_type"] = clone_report["CloneType"]
code_dir = code_root
total_files = len(code_files)
total_locs = total_locs
total_functions = len(all_funcs)
total_tokens = clone_report["total_tokens"].sum()
except Exception as e:
self.log.info("Error in getting clone report: "+str(e))
total_cost= embedding_cost + clone_report["total_tokens"].sum() * (0.002/1000)
total_clones = len(clone_report[clone_report.clone_type != "None"])
code_clone_count_by_df = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql')
clone_functions = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1").to_markdown(tablefmt='psql', index=False)
code_clone_count_dict = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count'))
clone_function_dict = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1")
report_str = f"""Code_directory: {code_dir}
Files: {total_files}
LOCs: {total_locs}
Functions: {total_functions}
Total code clones detected: {total_clones}
Tokens used: {total_tokens}
Total cost(embedding + clone check): {total_cost}
Four types of clone:
1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces.
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone.
3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones.
4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone.
5. None: Not a clone, discard this one.
Code_clones_count_by_clone_type:
{code_clone_count_by_df}
Clone_functions:
{clone_functions}
"""
codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt')
with open(codeclonereport_txt, "w") as f:
f.write(report_str)
report_dict=dict({"Code_directory":code_dir,"total_files":total_files,
"total_locs":total_locs,"total_functions":total_functions,"total_clones":total_clones,
"total_tokens":total_tokens,"total_cost":total_cost,
"Code_clones_count_by_clone_type":code_clone_count_dict,"clone_functions":clone_function_dict})
except Exception as e:
self.log.info("Error in clone type and information retrival process .Error message: "+str(e))
return code_clone_result,report_str,report_dict
def getCloneReport(self):
""" To get the clone report from the given python directory """
try:
self.log.info("To get clone report, we are calling embedding and chat model.")
import time
vdb_status = super().get_vdb_status('chromadb')
start_time = time.time()
# self.log.info("code clone detection start time."+str(start_time))
if not vdb_status:
embedded_df,embedding_cost = self.get_embedd_fns()
cluster_df = self.dbscan_clone_detection(embedded_df)
cluster_df_group = cluster_df.groupby("cluster_id")
len_cluster_df_group = len(cluster_df_group)
code_clone_check_tasks = []
for name, group in cluster_df_group:
res = self.make_pairs(group.to_dict(orient="records"))
code_clone_check_tasks += res
#For text-embedding-ada-002 and gpt 3.5 chat model
code_clone_result,report_str,report_dict = self.getcloneresult_modelspecific(code_clone_check_tasks,embedding_cost)
end_time = time.time()
total_time_taken = end_time - start_time
self.log.info("Total time taken for code clone detction: "+str(total_time_taken))
self.log.info("<------------- Final code clone report: -------------------> \\n"+str(report_str))
report_df = pd.DataFrame.from_dict(report_dict, orient="index").reset_index()
report_df.columns = ['ccd_properties', 'Values']
report_df=report_df.T
codecloneresult_df = os.path.join(self.ccdreportpath,'code_clone_report_df.csv')
report_df.to_csv(codecloneresult_df)
return report_str,report_dict,report_df,json.dumps(report_str)
else:
#Below code indended for vector db.
all_funcs,code_root,code_files,total_locs = super().get_clone_function_details()
df = pd.DataFrame(all_funcs)
df['filepath'] = df['filepath'].apply(lambda x: x.replace(code_root, ""))
chromadb_df=super().chromadb_embedding(df)
df = self.dbscan_clone_detection(chromadb_df)
cluster_df_group = cluster_df.groupby("cluster_id")
len_cluster_df_group = len(cluster_df_group)
code_clone_check_tasks = []
for name, group in cluster_df_group:
res = self.make_pairs(group |
.to_dict(orient="records"))
code_clone_check_tasks += res
code_clone_result = []
max_workers = min(len(code_clone_check_tasks), 100)
with concurrent.futures.ThreadPoolExecutor(max_workers= max_workers) as executor:
llm_requests = {
executor.submit(self.code_clone_check_with_retry, task[0]["code"], task[1]["code"]): task for task in code_clone_check_tasks
}
with tqdm(total= len(llm_requests)) as progress:
for future in concurrent.futures.as_completed(llm_requests):
task = llm_requests[future]
try:
res = future.result()
code_clone_result.append({"task": task,
"result": json.loads(res['choices'][0]['message']['content']),
"total_tokens": res["usage"]["total_tokens"]})
except Exception as exc:
print('%r generated an exception: %s' % (task, exc))
progress.update()
with open("code_clone_check_result.json", "w+") as fp:
json.dump(code_clone_result, fp, indent=2)
code_clone_result_json=json.dumps(code_clone_result)
clone_report=pd.read_json(code_clone_result_json)
total_amt = clone_report["total_tokens"].sum() * (0.002/1000)
clone_report["function1"] = clone_report["task"].apply(lambda x: x[0]["filepath"] + " -> " + x[0]["function_name"])
clone_report["function2"] = clone_report["task"].apply(lambda x: x[1]["filepath"] + " -> " + x[1]["function_name"])
clone_report["clone_type"] = clone_report["result"].apply(lambda x: x["CloneType"])
all_funcs,code_root,code_files,total_locs = super().get_clone_function_details()
code_dir = code_root
total_files = len(code_files)
total_locs = total_locs
total_functions = len(all_funcs)
total_tokens = clone_report["total_tokens"].sum()
# total_cost= embedding_cost + clone_report["total_tokens"].sum() * (0.002/1000)
total_clones = len(clone_report[clone_report.clone_type != "None"])
code_clone_count_by_df = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count')).to_markdown(tablefmt='psql')
clone_functions = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1").to_markdown(tablefmt='psql', index=False)
code_clone_count_dict = clone_report[clone_report.clone_type != "None"].groupby("clone_type").agg(Count=('clone_type', 'count'))
clone_function_dict = clone_report[["function1", "function2", "clone_type"]][clone_report.clone_type != "None"].sort_values("function1")
##Final report on code clone detection
report_str = f"""Code_directory: {code_dir}
Files: {total_files}
LOCs: {total_locs}
Functions: {total_functions}
Total code clones detected: {total_clones}
Tokens used: {total_tokens}
Four types of clone:
1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces.
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments, result in this clone.
3. Near-miss clone: Near-miss clone occurs by adding, deleting statements in code fragments of type 2 clones.
4. Semantic clone: The code snippets have different syntax but with alike functionality results in this clone.
Code_clones_count_by_clone_type:
{code_clone_count_by_df}
Clone_functions:
{clone_functions}
"""
with open("code_clone_report.txt", "w") as f:
f.write(report_str)
# print(report_str)
self.log.info("<------------- Final code clone report: -------------------> \\n"+str(report_str))
self.log.info("<------------- clone_functions code clone report: -------------------> \\n"+str(clone_functions))
report_dict=dict({"Code_directory":code_dir,"total_files":total_files,
"total_locs":total_locs,"total_functions":total_functions,"total_clones":total_clones,
"total_tokens":total_tokens,
"Code_clones_count_by_clone_type":code_clone_count_dict,"clone_functions": clone_function_dict})
report_df= pd.DataFrame([report_dict.keys(), report_dict.values()]).T
report_df.columns = ["Code_directory", "total_files","total_locs","total_functions","total_clones","total_tokens","Code_clones_count_by_clone_type","clone_functions"]
report_df.to_csv("code_clone_report_df.csv")
return report_str,report_dict,report_df,json.dumps(report_str)
except Exception as e:
self.log.info("Error in clone detection function call. Error Message: \\n"+str(e))
raise Exception("Error in clone detection function.")
#For testing and code instance privacy
if __name__=='__main__':
## For testing purpose.Uncomment n use.
root_directory = r"C:\\AION_Works\\Anomaly_Detection\\anomalydetectionpackage\\code_clone_testing_pyfiles\\code_clone_testing_pyfiles_large"
embedd_storage_path = r"C:\\AION_Works\\ccddir"
generativeai_credentials={'openai_baseurl':"",
'openai_key':"",
'openai_api_type':"",
'openai_api_version':"",
'generativeai_embedding_engine':"",
'generativeai_embedding_model':"",
'generativeai_chat_model':"",
'generativeai_deploymentId':""}
openai_baseurl = generativeai_credentials['openai_baseurl']
openai_key = generativeai_credentials['openai_key']
openai_api_type = generativeai_credentials['openai_api_type']
openai_api_version = generativeai_credentials['openai_api_version']
generativeai_embedding_engine = generativeai_credentials['generativeai_embedding_engine']
generativeai_embedding_model = generativeai_credentials['generativeai_embedding_model']
generativeai_chat_model = generativeai_credentials['generativeai_chat_model']
generativeai_deploymentId = generativeai_credentials['generativeai_deploymentId']
codeclonedetection_obj = CodeCloneDetectionFiles(root_directory,openai_baseurl, openai_key,openai_api_type,openai_api_version,embedd_storage_path,generativeai_embedding_engine,generativeai_embedding_model,generativeai_chat_model,generativeai_deploymentId)
report_str,report_dict,report_json = codeclonedetection_obj.getCloneReport()
print("End of code clone detection....\\n")
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sqlite3
from pathlib import Path
import json
import os
import rsa
import boto3 #usnish
import pandas as pd
import time
import sqlite3
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
def table_exists(self, name):
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
return len(listOfTables) > 0
def read_data(self, table_name):
query = f"SELECT * FROM {table_name}"
row = self.cursor.execute(query).fetchall()
return list(row)
#return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def delete_record(self,table_name,col_name, col_value):
try:
query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'"
self.conn.execute(query)
self.conn.commit()
return 'success'
except Exception as e :
print(str(e))
print("Deletion Failed")
return 'error'
def get_data(self,table_name,col_name,col_value):
query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"
row = self.cursor.execute(query).fetchone()
if(row == None):
return []
return list(row)
def write_data(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def close(self):
self.conn.close()
def add_new_GCSBucket(request):
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
print(request.POST["aionreferencename"])
print(request.POST["serviceaccountkey"])
print(request.POST["bucketname"])
if request.POST["aionreferencename"] =='' or request.POST["serviceaccountkey"] == '' or request.POST["bucketname"] == '' :
return 'error'
newdata = {}
newdata['Name'] = [request.POST["aionreferencename"]]
newdata['GCSServiceAccountKey'] = [request.POST["serviceaccountkey"]]
newdata['GCSbucketname'] = [request.POST["bucketname"]]
name = request.POST["aionreferencename"]
if sqlite_obj.table_exists("gcsbucket"):
if(len(sqlite_obj.get_data("gcsbucket",'Name',name))>0):
return 'error1'
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'gcsbucket')
except:
return 'error'
def get_gcs_bucket():
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
temp_data = sqlite_obj.read_data('gcsbucket')
data = []
for x in temp_data:
data_dict = {}
data_dict['Name'] = x[0]
data_dict['GCSServiceAccountKey'] = x[1]
data_dict['GCSbucketname'] = x[2]
data.append(data_dict)
except Exception as e:
print(e)
data = []
return data
def read_gcs_bucket(name,filename,DATA_FILE_PATH):
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
data = sqlite_obj.get_data("gcsbucket",'Name',name)
except:
data = []
found = False
if len(data)!=0:
GCSServiceAccountKey = data[1]
GCSbucketname = data[2]
found = True
#print(found)
#print(name)
try:
if found:
import io
from google.cloud import storage
#print(GCSServiceAccountKey)
#print(GCSbucketname)
try:
storage_client = storage.Client.from_service_account_json(GCSServiceAccountKey)
bucket = storage_client.get_bucket(GCSbucketname)
blob = bucket.blob(filename)
data = blob.download_as_string()
df = pd.read_csv(io.BytesIO(data), encoding = 'utf-8', sep = ',',encoding_errors= 'replace')
except Exception as e:
return "Error",str(e), pd.DataFrame()
return 'Success',"",df
except Exception as e:
print(e)
return 'Error',"Please check bucket configuration",pd.DataFrame()
def remove_gcs_bucket(name):
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
return sqlite_obj.delete_record('gcsbucket','Name',name)
<s> '''
*
* |
=============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confide |
deploy_dir, chunk_size):
self.files_dir = files_dir
self.deploy_dir = deploy_dir
self.chunk_size = chunk_size
try:
self.ccdreportpath = os.path.join(self.deploy_dir, "codeCloneReport")
os.makedirs(self.ccdreportpath, exist_ok = True)
except OSError as error:
print("Directory 'codeCloneReport' cann't be created.Error msg: ",error)
try:
current_datetime = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
str_current_datetime = str(current_datetime)
log_file_name = 'codeclonelog_sklearn'+f"_{str_current_datetime}"+".log"
logpath = os.path.join(self.ccdreportpath,log_file_name)
logging.basicConfig(level=logging.INFO,filename=logpath,filemode='w',format='%(message)s')
self.log = logging.getLogger()
except Exception as e:
print("code clone log object creation error.",e)
pass
def get_function_names(self,filename):
""" Get the function names from python files """
try:
with open(filename, 'r') as file:
content = file.read()
tree = ast.parse(content)
function_names = []
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
function_names.append(node.name)
except Exception as e:
self.log.info("function name read error: "+str(e))
return function_names
def get_function_code(self,filename, function_name):
""" To get the function codes """
try:
with open(filename, 'r') as file:
content = file.read()
tree = ast.parse(content)
function_code = ""
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name == function_name:
function_code = ast.unparse(node)
except Exception as e:
self.log.info("function name read error: "+str(e))
return function_code
def get_python_files(self,root_dir):
""" Walk thru the directory user given, get all py files. """
try:
code_files = [y for x in os.walk(root_dir) for y in glob(os.path.join(x[0], '*.py'))]
except Exception as e:
self.log.info("Python file read error: "+str(e))
return code_files
def chunk_functions(self,function_code, chunk_size):
""" Check the function size based on chunk size. """
try:
if (len(function_code) > 20):
chunks = [function_code[i:i + chunk_size] for i in range(0, len(function_code), chunk_size)]
else:
chunks = list((function_code,))
except Exception as e:
self.log.info("function chunk based on chunk_size error: "+str(e))
total_tokens = round(len(function_code)/4)
return chunks,total_tokens
def get_clone(self):
""" Main code clone detection function using sklearn tfidf_vectorizer and cosine_similarity.
return values:report_dict which contains total_clones, """
try:
start_time = time.time()
chunk_size = int(self.chunk_size)
ccdreportpath = os.path.join(self.deploy_dir, "codeCloneReport")
python_files = self.get_python_files(self.files_dir)
total_files = len(python_files)
# print('python_files: \\n',python_files)
function_codes = []
function_n = []
file_name=[]
# files_info=[]
total_tokens_used = []
for file in python_files:
function_names = self.get_function_names(file)
for i,function_name in enumerate(function_names):
file_name.append(file)
function_n.append(function_name)
function_code = self.get_function_code(file, function_name)
chunks,total_tokens = self.chunk_functions(function_code, chunk_size)
total_tokens_used.append(total_tokens)
function_codes.extend(chunks)
total_functions = len(function_n)
files_info=list(zip(file_name, function_n,function_codes))
tfidf_vectorizer = TfidfVectorizer()
## we can use other vectorizer models also.
# tfidf_vectorizer = HashingVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(function_codes)
similarity_matrix = cosine_similarity(tfidf_matrix)
#Uncomment if you want to send two different code clonne blocks at a time for similarity comparison
# similarity_matrix = cosine_similarity(tfidf_matrix, tfidf_matrix)
clone_d = dict()
total_clones = 0
final_report=list()
#getting funtion and next function for comparison
for i in range(len(similarity_matrix)):
for j in range(i + 1, len(similarity_matrix)):
if(similarity_matrix[i, j] >= 0.90 and similarity_matrix[i, j] <= 0.95):
clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone1_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone1_path':files_info[j][0]},'cloneType':'parametricClone'}})
report_json = json.dumps(clone_d, indent = 4)
total_clones=total_clones+1
elif(similarity_matrix[i, j] > 0.95):
clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone_path':files_info[j][0]
},'cloneType':'exactClone'}})
report_json = json.dumps(clone_d, indent = 4)
final_report.append(clone_d)
total_clones=total_clones+1
elif(similarity_matrix[i, j] > 0.80 and similarity_matrix[i, j] < 0.90):
clone_d.update({f'codeclone_{total_clones+1}':{f'function{i}':{'clone_fun_name':function_n[i],'clone_path':files_info[i][0]},f'function{j}':{'clone_fun_name':function_n[j],'clone_path':files_info[j][0]
},'cloneType':'NearMissClones'}})
report_json = json.dumps(clone_d, indent = 4)
final_report.append(clone_d)
total_clones=total_clones+1
else:
##add other conditionas in future
pass
## To get clone type
clone_type = [list(item.values())[2] for item in list(clone_d.values())]
report_str = json.dumps(final_report)
json_l=json.loads(report_str)
json_keys = list(json_l[0].keys())
json_values = list(json_l[0].values())
end_time = time.time()
total_time_taken = end_time - start_time
# self.log.info("ccd_report: \\n"+str(ccd_report))
f_df=pd.DataFrame(list(zip(json_keys, json_values,clone_type)),
columns =['Clone', 'CloneDetails','CloneType'])
codeclonereport_file = os.path.join(self.ccdreportpath,'clone_detection_report_sklearn.csv')
f_df.to_csv(codeclonereport_file, index=False)
ccd_report = f_df.to_markdown(tablefmt='psql')
self.log.info("total_clones: \\n"+str(total_clones))
exact_clone_count = f_df['CloneType'].str.count("exactClone").sum()
parametric_clone_count = f_df['CloneType'].str.count("parametricClone").sum()
nearmiss_clone_count = f_df['CloneType'].str.count("NearMissClones").sum()
total_tokens = sum(total_tokens_used)
# nearmiss_clone_count =0
self.log.info("exact_clone_count: \\n"+str(exact_clone_count))
self.log.info("parametric_clone_count: \\n"+str(parametric_clone_count))
self.log.info("nearmiss_clone_count: \\n"+str(nearmiss_clone_count))
self.log.info("Total tokens used: \\n"+str(total_tokens))
self.log.info("Total time taken to excute code clone detction: \\t"+str(total_time_taken))
clone_info="1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces,\\
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments and less similarity threshold (0.90-0.95), result in this clone,\\
3. Near-miss clone: Near-miss clone are clones detected with less similarity threshold."
clone_count = {"Exact Clone":exact_clone_count,"Parametric Clone":parametric_clone_count,"Nearmiss Clone":nearmiss_clone_count}
report_str = f"""Code_directory: {self.files_dir}
Files: {total_files}
Functions: {total_functions}
Total_code_clones_detected: {total_clones}
Tokens used: {total_tokens}
Three_types_of_clone:
1. Exact clone: Two code fragments similar to each other with little transformation in comments, layout, or whitespaces.
2. Parameterized clone: Changes made in names of variables, keywords, identifiers, or bypassing parameter during function call in code fragments and less similarity threshold (0.90-0.95), result in this clone.
3. Near-miss clone: Near-miss clone are clones detected with less similarity threshold.
Code_clones_count_by_clone_type:
{clone_count}
Clone_functions:
{ccd_report}
total_time_taken: {total_time_taken}
"""
codeclonereport_txt = os.path.join(self.ccdreportpath,'code_clone_report.txt')
with open(codeclonereport_txt, "w") as f:
f.write(report_str)
report_dict = {"clone_info":clone_info,"total_clones":total_clones,'total_files':total_files,"exact_clone_count":exact_clone_count,'total_functions':total_functions,"total_tokens":total_tokens, "parametric_clone_count":parametric_clone_count,"nearmiss_clone_count":nearmiss_clone_count,"result_df":f_df }
self.log.info("ccd_report: \\n"+str(ccd_report))
# print("report_dict:\\n\\n",report_dict)
# end_time = time.time()
# total_time = (end_time - start_time)
return report_dict
except Exception as e:
self.log.info("Clone detection function error. error msg: "+str(e))
# import traceback
# print("traceback error: \\n",traceback.print_exc())
if __name__ == "__main__":
print("code clone detection started....")
##Use this for standalone fn debuging.<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import time
import os
import subprocess
import base64
import sys
import re
from appbe.dataIngestion import getcommonfields
from appbe.dataIngestion import getusercasestatus
def startSummarization(request,DEFAULT_FILE_PATH,CONFIG_PATH,DATA_FILE_PATH):
try:
if request.FILES:
Datapath = request.FILES['summarypath']
ext = str(Datapath).split('.')[-1]
filetimestamp = str(int(time.time()))
if ext.lower() in ['txt','pdf','doc','docs']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
configFile = os.path.join(DEFAULT_FILE_PATH,'aion_textSummerization.json')
filetimestamp = str(int(time.time()))
config_json_filename = os.path.join(CONFIG_PATH, 'AION_' + filetimestamp + '.json')
f = open(configFile)
data = json.load(f)
f.close()
data['basic']['dataLocation'] = dataFile
type = request.POST.get('type')
model = request.POST.get('model')
slength = request.POST.get('length')
types = data['basic']['analysisAproach']['textSummarization']
for x in list(types.keys()):
data['basic']['analysisAproach']['textSummarization'][x] = 'False'
data['basic']['analysisAproach']['textSummarization'][type] = 'True'
format = request.POST.get('format')
algorithm = data['basic']['algorithms']['textSummarization']
for x in list(algorithm.keys()):
data['basic']['algorithms']['textSummarization'][x] = 'False'
data['basic']['algorithms']['textSummarization'][model]='True'
length = data['advance']['textSummarization']['summaryLength']
for x in list(types.keys()):
data['advance']['textSummarization']['summaryLength'][x] = 'False'
data['advance']['textSummarization']['summaryLength'][slength] = 'True'
with open(config_json_filename, "w") as outfile:
json.dump(data, outfile)
outfile.close()
from bin.aion_text_summarizer import |
aion_textsummary
outputStr = aion_textsummary(config_json_filename)
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','bin','aion_text_summarizer.py'))
#outputStr = subprocess.check_output([sys.executable, scriptPath, config_json_filename])
#outputStr = outputStr.decode('utf-8')
#outputStr = re.search(r'Summary:(.*)', str(outputStr), re.IGNORECASE).group(1)
predict_dict = json.loads(str(outputStr))
summary = predict_dict['summary']
except Exception as e:
print(e)
summary = str(e)
context = getcommonfields()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
context.update({'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion})
context.update({'summary':summary})
return context <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import json
import os,sys
from appbe import help_Text as ht
def save(request):
from appbe.dataPath import DEFAULT_FILE_PATH
if request.method == 'POST':
submittype = request.POST.get('AdvanceSubmit')
if submittype != 'AdvanceDefault':
configFile = request.session['config_json']
f = open(configFile, "r+")
configSettingsData = f.read()
configSettings = json.loads(configSettingsData)
try:
if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'false':
numericselectedmethod = request.POST.get('numericfillmethod')
for x in list(configSettings['advance']['profiler']['numericalFillMethod'].keys()):
configSettings['advance']['profiler']['numericalFillMethod'][x] = 'False'
configSettings['advance']['profiler']['numericalFillMethod'][numericselectedmethod] = 'True'
categoricalselectedmethod = request.POST.get('categorialfillmethod')
for x in list(configSettings['advance']['profiler']['categoricalFillMethod'].keys()):
configSettings['advance']['profiler']['categoricalFillMethod'][x] = 'False'
configSettings['advance']['profiler']['categoricalFillMethod'][categoricalselectedmethod] = 'True'
categoryEncodingMethod = request.POST.get('categoryencoding')
for x in list(configSettings['advance']['profiler']['categoryEncoding'].keys()):
configSettings['advance']['profiler']['categoryEncoding'][x] = 'False'
configSettings['advance']['profiler']['categoryEncoding'][categoryEncodingMethod] = 'True'
outlierDetection = request.POST.get('outlierDetection')
for x in list(configSettings['advance']['profiler']['outlierDetection'].keys()):
configSettings['advance']['profiler']['outlierDetection'][x] = 'False'
if outlierDetection != 'Disable':
configSettings['advance']['profiler']['outlierDetection'][outlierDetection] = 'True'
#configSettings['advance']['profiler']['outlierDetectionStatus'] = request.POST.get('AnamolyDetectionStatus')
#configSettings['advance']['profiler']['outlierDetectionMethod'] = request.POST.get('AnaTreatmentMethod')
configSettings['advance']['profiler']['misValueRatio'] = request.POST.get('MisValueRatio')
#configSettings['advance']['profiler']['categoricalToNumeric'] = request.POST.get('CategoricalToNumeric')
configSettings['advance']['profiler']['numericFeatureRatio'] = request.POST.get('NumFeatureRatio')
configSettings['advance']['profiler']['categoryMaxLabel'] = request.POST.get('CatMaxLabels')
configSettings['advance']['selector']['categoryMaxLabel'] = request.POST.get('CatMaxLabels')
normalizationtypes = configSettings['advance']['profiler']['normalization']
for k in normalizationtypes.keys():
configSettings['advance']['profiler']['normalization'][k] = 'False'
if request.POST.get('NormalizationMethod').lower() != 'none':
configSettings['advance']['profiler']['normalization'][request.POST.get('NormalizationMethod')] = 'True'
#configSettings['advance']['profiler']['normalizationMethod'] = request.POST.get('NormalizationMethod')
configSettings['advance']['profiler']['removeDuplicate'] = request.POST.get('removeDuplicate')
# ---------------------------------------------- Debiasing Changes ----------------------------------------------
configSettings['advance']['profiler']['deBiasing']['FeatureName'] = request.POST.get('InputFeature')
configSettings['advance']['profiler']['deBiasing']['ClassName'] = request.POST.get('InputClass')
configSettings['advance']['profiler']['deBiasing']['Algorithm'] = request.POST.get('InputAlgorithm')
configSettings['advance']['profiler']['deBiasing']['TargetFeature'] = configSettings['basic']['targetFeature']
# ---------------------------------------------- ----------------------------------------------
problemtypes = configSettings['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettings['basic']['analysisType'][k] == 'True':
problem_type = k
break
if configSettings['basic']['analysisType']['llmFineTuning'].lower() == 'false' and configSettings['basic']['onlineLearning'].lower() == 'false' and configSettings['basic']['distributedLearning'].lower() == 'false':
configSettings['advance']['profiler']['textCleaning']['removeNoise'] = request.POST.get('noiseStatus')
# -------------------------------- 12301:Remove Noise Config related Changes S T A R T --------------------------------
if request.POST.get('noiseStatus') == 'True':
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['decodeHTML'] = request.POST.get('DecodeHTML')
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHyperLinks'] = request.POST.get('removeHyperlinks')
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeMentions'] = request.POST.get('RemoveMentions')
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHashtags'] = request.POST.get('removeHashtags')
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeEmoji'] = request.POST.get('removeEmoji')
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['unicodeToAscii'] = request.POST.get('unicodeToAscii')
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeNonAscii'] = request.POST.get('removeNonAscii')
else:
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['decodeHTML'] = "False"
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHyperLinks'] = "False"
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeMentions'] = "False"
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeHashtags'] = "False"
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeEmoji'] = "False"
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['unicodeToAscii'] = "False"
configSettings['advance']['profiler']['textCleaning']['removeNoiseConfig']['removeNonAscii'] = "False"
# ---------------------------------------------------------------- E N D ----------------------------------------------------------------
configSettings['advance']['profiler']['textCleaning']['expandContractions'] = request.POST.get(
'expandContractions')
configSettings['advance']['profiler']['textCleaning']['normalize'] = request.POST.get('normalize')
if (request.POST.get('normalizeMethod') == 'Lemmatization'):
configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['lemmatization'] = "True"
configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['stemming'] = "False"
else:
configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['stemming'] = "True"
configSettings['advance']['profiler']['textCleaning']['normalizeMethod']['lemmatization'] = "False"
configSettings['advance']['profiler']['textCleaning']['replaceAcronym'] = request.POST.get('replaceAcronym')
if request.POST.get('acronymDict') != '' and request.POST.get('acronymDict') != 'None':
configSettings['advance']['profiler']['textCleaning']['acronymConfig']['acronymDict'] = eval(request.POST.get(
'acronymDict'))
configSettings['advance']['profiler']['textCleaning']['correctSpelling'] = request.POST.get(
'correctSpelling')
configSettings['advance']['profiler']['textCleaning']['removeStopwords'] = request.POST.get(
'removeStopwords')
if (request.POST.get('ExtendOrReplace') == 'NA'):
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = "False"
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = "False"
elif (request.POST.get('ExtendOrReplace') == 'Extend'):
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = "True"
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = "False"
else:
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['extend'] = "False"
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig']['replace'] = "True"
configSettings['advance']['profiler']['textCleaning']['stopWordsConfig'][
'stopwordsList'] = request.POST.get('stopwordsList')
configSettings['advance']['profiler']['textCleaning']['removePunctuation'] = request.POST.get(
'removePunctuation')
configSettings['advance']['profiler']['textCleaning']['removePunctuationConfig'][
'removePuncWithinTokens'] = request.POST.get('removePuncWithinTokens')
configSettings['advance']['profiler']['textCleaning']['removeNumericTokens'] = request.POST.get(
'removeNumericTokens')
configSettings['advance']['profiler']['textCleaning']['removeNumericConfig'][
'removeNumeric_IncludeSpecialCharacters'] = request.POST.get('removeNumeric_IncludeSpecialCharacters')
if (request.POST.get('tokenizationLib') == 'nltk'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'gensim'] = "False"
elif (request.POST.get('tokenizationLib') == 'textblob'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'textblob'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'gensim'] = "False"
elif (request.POST.get('tokenizationLib') == 'spacy'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'gensim'] = "False"
elif (request.POST.get('tokenizationLib') == 'keras'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'gensim'] = "False"
else:
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['spacy'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['keras'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['tokenizationLib']['gensim'] = "True"
if (request.POST.get('lemmatizationLib') == 'nltk'):
configSettings['advance |
']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][
'spacy'] = "False"
elif (request.POST.get('lemmatizationLib') == 'textblob'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][
'textblob'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][
'spacy'] = "False"
else:
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib'][
'textblob'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['lemmatizationLib']['spacy'] = "True"
if (request.POST.get('stopwordsRemovalLib') == 'nltk'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'nltk'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'gensim'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'spacy'] = "False"
elif (request.POST.get('stopwordsRemovalLib') == 'gensim'):
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'gensim'] = "True"
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'spacy'] = "False"
else:
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'nltk'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'gensim'] = "False"
configSettings['advance']['profiler']['textCleaning']['libConfig']['stopwordsRemovalLib'][
'spacy'] = "True"
configSettings['advance']['profiler']['textFeatureExtraction']['n_grams'] = request.POST.get('n_grams')
configSettings['advance']['profiler']['textFeatureExtraction']['n_grams_config'][
'min_n'] = int(request.POST.get('range_min_n'))
configSettings['advance']['profiler']['textFeatureExtraction']['n_grams_config'][
'max_n'] = int(request.POST.get('range_max_n'))
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags'] = request.POST.get('pos_tags')
if (request.POST.get('pos_tags_lib') == 'nltk'):
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = "True"
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = "False"
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = "False"
elif (request.POST.get('pos_tags_lib') == 'textblob'):
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = "False"
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = "True"
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = "False"
else:
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['nltk'] = "False"
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['textblob'] = "False"
configSettings['advance']['profiler']['textFeatureExtraction']['pos_tags_lib']['spacy'] = "True"
textconvertionmethods = configSettings['advance']['profiler']['textConversionMethod']
for k in textconvertionmethods.keys():
configSettings['advance']['profiler']['textConversionMethod'][k] = 'False'
if problem_type.lower() not in ['similarityidentification','contextualsearch']:
configSettings['advance']['profiler']['textConversionMethod'][request.POST.get('textConvertionMethod')] = 'True'
if 'embeddingSize' in configSettings['advance']['profiler']:
glove = configSettings['advance']['profiler']['embeddingSize']['Glove']
for k in glove.keys():
configSettings['advance']['profiler']['embeddingSize']['Glove'][k] = 'False'
configSettings['advance']['profiler']['embeddingSize']['Glove'][request.POST.get('txtglovedimensions')] = 'True'
fastText = configSettings['advance']['profiler']['embeddingSize']['FastText']
for k in fastText.keys():
configSettings['advance']['profiler']['embeddingSize']['FastText'][k] = 'False'
configSettings['advance']['profiler']['embeddingSize']['FastText'][request.POST.get('txtFastTextdimensions')] = 'True'
if 'LatentSemanticAnalysis' in configSettings['advance']['profiler']['embeddingSize']:
LatentSemanticAnalysis = configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis']
for k in LatentSemanticAnalysis.keys():
configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'][k] = 'False'
configSettings['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'][request.POST.get('txttfidfdimensions')] = 'True'
if 'TF_IDF' in configSettings['advance']['profiler']['embeddingSize']:
configSettings['advance']['profiler']['embeddingSize']['TF_IDF']['maxFeatures'] = request.POST.get('tfidfmaxfeatures')
if 'CountVectors' in configSettings['advance']['profiler']['embeddingSize']:
configSettings['advance']['profiler']['embeddingSize']['CountVectors']['maxFeatures'] = request.POST.get('cvmaxfeatures')
if problem_type.lower() == 'imageclassification':
configSettings['advance']['image_config']['img_width'] = int(request.POST.get('img_width'))
configSettings['advance']['image_config']['img_height'] = int(request.POST.get('img_height'))
configSettings['advance']['image_config']['img_channel'] = int(request.POST.get('img_channel'))
configSettings['advance']['image_config']['lr'] = float(request.POST.get('lr'))
configSettings['advance']['image_config']['epochs'] = int(request.POST.get('epochs'))
configSettings['advance']['image_config']['test_split_ratio'] = float(request.POST.get('test_split_ratio'))
if problem_type.lower() == "llmfinetuning":
configSettings = llmadvancesettings(configSettings,request)
if problem_type.lower() == 'objectdetection' or problem_type.lower() == 'imageclassification':
configSettings['advance']['ImageAugmentation']['Enable'] = request.POST.get('advance_ImageAugmentation_Enable')
configSettings['advance']['ImageAugmentation']['KeepAugmentedImages'] = request.POST.get('advance_ImageAugmentation_keepAugmentedImages')
configSettings['advance']['ImageAugmentation']['Noise']['Blur'] = request.POST.get('advance_ImageAugmentation_Noise_Blur')
configSettings['advance']['ImageAugmentation']['Noise']['Brightness'] = request.POST.get('advance_ImageAugmentation_Noise_Brightness')
configSettings['advance']['ImageAugmentation']['Noise']['Contrast'] = request.POST.get('advance_ImageAugmentation_Noise_Contrast')
configSettings['advance']['ImageAugmentation']['Transformation']['Flip'] = request.POST.get('advance_ImageAugmentation_Transformation_Flip')
configSettings['advance']['ImageAugmentation']['Transformation']['Rotate'] = request.POST.get('advance_ImageAugmentation_Transformation_Rotate')
configSettings['advance']['ImageAugmentation']['Transformation']['Shift'] = request.POST.get('advance_ImageAugmentation_Transformation_Shift')
configSettings['advance']['ImageAugmentation']['Transformation']['Crop'] = request.POST.get('advance_ImageAugmentation_Transformation_Crop')
configSettings['advance']['ImageAugmentation']['configuration']['Blur']['noOfImages'] = request.POST.get('noofblurimages')
configSettings['advance']['ImageAugmentation']['configuration']['Blur']['limit'] = request.POST.get('limitblurimage')
configSettings['advance']['ImageAugmentation']['configuration']['Brightness']['noOfImages'] = request.POST.get('noofbrightnessimages')
configSettings['advance']['ImageAugmentation']['configuration']['Brightness']['limit'] = request.POST.get('limitbrightnessimage')
configSettings['advance']['ImageAugmentation']['configuration']['Contrast']['noOfImages'] = request.POST.get('noofcontrastimages')
configSettings['advance']['ImageAugmentation']['configuration']['Contrast']['limit'] = request.POST.get('limitcontrastimage')
configSettings['advance']['ImageAugmentation']['configuration']['Flip']['noOfImages'] = request.POST.get('noofflipimages')
configSettings['advance']['ImageAugmentation']['configuration']['Rotate']['noOfImages'] = request.POST.get('noofrotateimages')
configSettings['advance']['ImageAugmentation']['configuration']['Shift']['noOfImages'] = request.POST.get('noofshiftimages')
configSettings['advance']['ImageAugmentation']['configuration']['Crop']['noOfImages'] = request.POST.get('noofcropimages')
configSettings['advance']['selector']['selectionMethod']['featureSelection'] = 'False'
configSettings['advance']['selector']['selectionMethod']['featureEngineering'] = 'False'
configSettings['advance']['selector']['featureSelection']['allFeatures'] = 'False'
configSettings['advance']['selector']['featureSelection']['statisticalBased'] = 'False'
configSettings['advance']['selector']['featureSelection']['modelBased'] = 'False'
if(request.POST.get('selectionMethod') == 'FeatureSelection'):
configSettings['advance']['selector']['selectionMethod']['featureSelection'] = 'True'
else:
configSettings['advance']['selector']['selectionMethod']['featureEngineering'] = 'True'
if request.POST.get('allFeatures'):
configSettings['advance']['selector']['featureSelection']['allFeatures'] = request.POST.get('allFeatures')
if request.POST.get('statisticalBased'):
configSettings['advance']['selector']['featureSelection']['statisticalBased'] = request.POST.get('statisticalBased')
if request.POST.get('modelBased'):
configSettings['advance']['selector']['featureSelection']['modelBased'] = request.POST.get('modelBased')
dimentionalityreductionmethod = request.POST.get('dimentionalityreductionmethod')
for x in list(configSettings['advance']['selector']['featureEngineering'].keys()):
if x != 'numberofComponents':
configSettings['advance']['selector']['featureEngineering'][x] = 'False'
configSettings['advance']['selector']['featureEngineering'][dimentionalityreductionmethod] = 'True'
configSettings['advance']['selector']['featureEngineering']['numberofComponents'] = request.POST.get('numberofComponents')
#configSettings['advance']['selector']['categoricalFeatureRatio'] = request.POST.get('CatFeatureRatio')
configSettings['advance']['selector']['statisticalConfig']['correlationThresholdFeatures'] = request.POST.get('correlationThresholdFeatures')
configSettings['advance']['selector']['statisticalConfig']['correlationThresholdTarget'] = request.POST.get('correlationThresholdTarget')
configSettings['advance']['selector']['statisticalConfig']['pValueThresholdFeatures'] = request.POST.get('pValueThresholdFeatures')
configSettings['advance']['selector']['statisticalConfig']['pValueThresholdTarget'] = request.POST.get('pValueThresholdTarget')
configSettings['advance']['selector']['statisticalConfig']['varianceThreshold'] = request.POST.get('VarianceThreshold')
if problem_type.lower() == 'recommendersystem':
configSettings['advance']['recommenderparam']['svd_params']= eval(request.POST.get('svd_params'))
configSettings['advance']['associationrule']['modelParams']['apriori'] = eval(request.POST.get('apriori'))
configSettings['advance']['textSimilarityConfig'] = eval(request.POST.get('textsimilarity'))
if configSettings['basic']['distributedLearning'].lower() == 'true':
configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('classDistributedXGBoost'))
configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('classDistributedLightGBM'))
configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('DistributedXGBoostreg'))
configSettings['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('DistributedLightGBMreg'))
if configSettings['basic']['onlineLearning'].lower() != 'true' and configSettings['basic']['distributedLearning'].lower() != 'true':
if (problem_type.lower() == 'classification') or (problem_type.lower() == 'regression') or (problem_type.lower() == 'clustering') or (problem_type.lower() == 'topicmodelling'):
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Logistic Regression'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression'] = eval(request.POST.get('classification_LogisticRegression'))
if problem_type.lower() == |
'classification' and configSettings['basic']['algorithms']['classification']['Naive Bayes'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes'] = eval(request.POST.get('classification_GaussianNB'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Support Vector Machine'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Support Vector Machine'] = eval(request.POST.get('classification_SVC'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['K Nearest Neighbors'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors'] = eval(request.POST.get('classification_KNeighborsClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Decision Tree'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree'] = eval(request.POST.get('classification_DecisionTreeClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Random Forest'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest'] = eval(request.POST.get('classification_RandomForestClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Gradient Boosting'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting'] = eval(request.POST.get('classification_GradientBoostingClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Extreme Gradient Boosting (XGBoost)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('classification_ExtremeGradientBoostingClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Light Gradient Boosting (LightGBM)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('classification_LightGradientBoostingClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Categorical Boosting (CatBoost)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Categorical Boosting (CatBoost)'] = eval(request.POST.get('classification_CategoricalBoostingClassifier'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Linear Regression'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression'] = eval(request.POST.get('regression_LinearRegression'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Lasso'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Lasso'] = eval(request.POST.get('regression_Lasso'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Ridge'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Ridge'] = eval(request.POST.get('regression_Ridge'))
if problem_type.lower() == 'topicmodelling' and configSettings['basic']['algorithms']['topicModelling']['LDA'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['topicModellingParams']['LDA']= eval(request.POST.get('topicmodeling_lda'))
if problem_type.lower() == 'clustering' and configSettings['basic']['algorithms']['clustering']['KMeans'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['clusteringModelParams']['KMeans']= eval(request.POST.get('cluster_kmeans'))
if problem_type.lower() == 'clustering' and configSettings['basic']['algorithms']['clustering']['DBSCAN'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['clusteringModelParams']['DBSCAN']= eval(request.POST.get('cluster_DBSCAN'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Decision Tree'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree'] = eval(request.POST.get('regression_DecisionTreeRegressor'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Random Forest'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest'] = eval(request.POST.get('regression_RandomForestRegressor'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Extreme Gradient Boosting (XGBoost)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Extreme Gradient Boosting (XGBoost)'] = eval(request.POST.get('regression_XGBoostRegressor'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Light Gradient Boosting (LightGBM)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Light Gradient Boosting (LightGBM)'] = eval(request.POST.get('regression_LightGBMRegressor'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Categorical Boosting (CatBoost)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Categorical Boosting (CatBoost)'] = eval(request.POST.get('regression_CatBoostRegressor'))
configSettings['advance']['mllearner_config']['modelparamsfile'] = request.POST.get('ModelParamFile')
configSettings['advance']['mllearner_config']['optimizationMethod'] = request.POST.get('OptimizationMethod')
configSettings['advance']['mllearner_config']['optimizationHyperParameter'][
'iterations'] = request.POST.get('iterations')
configSettings['advance']['mllearner_config']['optimizationHyperParameter'][
'trainTestCVSplit'] = request.POST.get('trainTestCVSplit')
configSettings['advance']['mllearner_config']['thresholdTunning'] = request.POST.get('thresholdTunning')
configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] = request.POST.get('EnsembleStacking')
configSettings['advance']['mllearner_config']['Voting (Ensemble)'] = request.POST.get('EnsembleVoting')
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['enable'] = request.POST.get('ensemple_bagging_lr_enable')
if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Logistic Regression']['param'] = eval(request.POST.get('classi_ensemple_bagging_lr_param'))
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['enable'] = request.POST.get('ensemple_bagging_naivebayes_enable')
if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Naive Bayes']['param'] = eval(request.POST.get('classi_ensemple_bagging_naivebayes_param'))
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['enable'] = request.POST.get('ensemple_bagging_svm_enable')
if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Support Vector Machine']['param'] = eval(request.POST.get('classi_ensemple_bagging_svm_param'))
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['enable'] = request.POST.get('ensemple_bagging_knn_enable')
if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['K Nearest Neighbors']['param'] = eval(request.POST.get('classi_ensemple_bagging_knn_param'))
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] = request.POST.get('ensemple_bagging_dt_enable')
if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Decision Tree']['param'] = eval(request.POST.get('classi_ensemple_bagging_dt_param'))
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['enable'] = request.POST.get('ensemple_bagging_rf_enable')
if configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']['Random Forest']['param'] = eval(request.POST.get('classi_ensemple_bagging_rf_param'))
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['enable'] = request.POST.get('ensemple_bagging_lir_enable')
if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Linear Regression']['param'] = eval(request.POST.get('reg_ensemple_bagging_lir_param'))
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] = request.POST.get('ensemple_bagging_dit_enable')
if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Decision Tree']['param'] = eval(request.POST.get('reg_ensemple_bagging_dit_param'))
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['enable'] = request.POST.get('ensemple_bagging_ridge_enable')
if configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['enable'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']['Ridge']['param'] = eval(request.POST.get('reg_ensemple_bagging_ridge_param'))
if problem_type.lower() == 'classification':
if configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)'] = eval(request.POST.get('ensamblestackingClassifierparams'))
if problem_type.lower() == 'regression':
if configSettings['advance']['mllearner_config']['Stacking (Ensemble)'] == 'True':
configSettings['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)'] = eval(request.POST.get('ensamblestackingRegressorparams'))
configSettings['basic']['filterExpression'] = request.POST.get('filterExpression')
#configSettings['advance']['mllearner_config']['trainPercentage'] = request.POST.get('trainPercentage')
if (problem_type.lower() == 'classification') or (problem_type.lower() == 'regression'):
configSettings['advance']['modelEvaluation']['smcStrategy'] = request.POST.get('smcStrategy')
configSettings['advance']['modelEvaluation']['smcMaxDepth'] = request.POST.get('smcMaxDepth')
configSettings['advance']['modelEvaluation']['smcCondition'] = request.POST.get('smcCondition')
configSettings['advance']['modelEvaluation']['miCondition'] = request.POST.get('miCondition')
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Neural Network'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network'] = eval(
request.POST.get('dl_classification_SNN'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network'] = eval(
request.POST.get('dl_classification_RNN'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (GRU)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (GRU)'] = eval(
request.POST.get('dl_classification_GRURNN'))
if problem_type.lower() == ' |
classification' and configSettings['basic']['algorithms']['classification']['Recurrent Neural Network (LSTM)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)'] = eval(
request.POST.get('dl_classification_LSTMRNN'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Convolutional Neural Network (1D)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Convolutional Neural Network (1D)'] = eval(
request.POST.get('dl_classification_CNN'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification'].get('Neural Architecture Search') == 'True':
configSettings['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Architecture Search'] = eval(
request.POST.get('dl_classification_NAS'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Neural Network'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network'] = eval(
request.POST.get('dl_regression_SNN'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network'] = eval(
request.POST.get('dl_regression_RNN'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network (GRU)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)'] = eval(
request.POST.get('dl_regression_GRURNN'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Recurrent Neural Network (LSTM)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)'] = eval(
request.POST.get('dl_regression_LSTMRNN'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Convolutional Neural Network (1D)'] == 'True':
configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Convolutional Neural Network (1D)'] = eval(
request.POST.get('dl_regression_CNN'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression'].get('Neural Architecture Search') == 'True':
configSettings['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Architecture Search'] = eval(
request.POST.get('dl_regression_NAS'))
#configSettings['advance']['dllearner_config']['optimizationMethod'] = request.POST.get('DLOptimizationMethod')
else:
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Logistic Regression'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Logistic Regression'] = eval(request.POST.get('OnlineLogisticRegression'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Decision Tree Classifier'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Decision Tree Classifier'] = eval(request.POST.get('OnlineDecisionTreeClassifier'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online Softmax Regression'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Softmax Regression'] = eval(request.POST.get('OnlineSoftmaxRegression'))
if problem_type.lower() == 'classification' and configSettings['basic']['algorithms']['classification']['Online KNN Classifier'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online KNN Classifier'] = eval(request.POST.get('OnlineKNNClassifier'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online Linear Regression'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Linear Regression'] = eval(request.POST.get('OnlineLinearRegression'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online Decision Tree Regressor'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Decision Tree Regressor'] = eval(request.POST.get('OnlineDecisionTreeRegressor'))
if problem_type.lower() == 'regression' and configSettings['basic']['algorithms']['regression']['Online KNN Regressor'] == 'True':
configSettings['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online KNN Regressor'] = eval(request.POST.get('OnlineKNNRegressor'))
configSettings['advance']['profiler']['targetEncodingParams'] = eval(request.POST.get('targetEncodingParams'))
configSettings['advance']['profiler']['outlierDetectionParams'] = eval(request.POST.get('outlierDetectionParams'))
if problem_type.lower() == 'objectdetection':
configSettings['advance']['objectDetection']['pretrainedModel']= request.POST.get('objectdetectionpretrainedmodel')
configSettings['advance']['objectDetection']['n_epoch'] = int(request.POST.get('objectDetection_n_epoch'))
configSettings['advance']['objectDetection']['batch_size'] = int(request.POST.get('objectDetection_batch_size'))
if problem_type.lower() == 'timeseriesforecasting': #task 11997 #task 13052
configSettings['advance']['timeSeriesForecasting']['fix_seasonality'] = request.POST.get('seasionality') # task 13052
configSettings['advance']['timeSeriesForecasting']['fix_stationarity'] =request.POST.get('stationarity') # task 13052
configSettings['advance']['timeSeriesForecasting']['modelParams']['ARIMA'] = eval(request.POST.get('ARIMA')) #task 11997
configSettings['advance']['timeSeriesForecasting']['modelParams']['FBPROPHET'] = eval(request.POST.get('FBPROPHET')) #task 11997
configSettings['advance']['timeSeriesForecasting']['modelParams']['LSTM'] = eval(request.POST.get('TSLSTM')) #task 11997
configSettings['advance']['timeSeriesForecasting']['modelParams']['Encoder_Decoder_LSTM_MVI_UVO'] = eval(request.POST.get('TSLSTMencoderdecoder'))
configSettings['advance']['timeSeriesForecasting']['modelParams']['MLP'] = eval(request.POST.get('TSMLP')) #task 11997
if problem_type.lower() == 'timeseriesanomalydetection':
configSettings['advance']['timeSeriesAnomalyDetection']['modelParams']['AutoEncoder'] = eval(request.POST.get('autoEncoderAD')) #task 11997
configSettings['advance']['timeSeriesAnomalyDetection']['modelParams']['DBScan'] = eval(request.POST.get('dbscanAD')) #task 13316
if problem_type.lower() == 'anomalydetection':
configSettings['advance']['anomalyDetection']['modelParams']['IsolationForest'] = eval(request.POST.get('IsolationForest'))
configSettings['advance']['anomalyDetection']['modelParams']['oneclassSVM'] = eval(request.POST.get('oneclassSVM'))
configSettings['advance']['anomalyDetection']['modelParams']['DBScan'] = eval(request.POST.get('DBScanAD'))
updatedConfigSettingsJson = json.dumps(configSettings)
f.seek(0)
f.write(updatedConfigSettingsJson)
f.truncate()
f.close()
errormsg = 'NA'
request.session['ModelStatus'] = 'Not Trained'
except Exception as e:
import sys
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
errormsg = 'Input value error'
print(e)
if 'NoOfRecords' in request.session:
records = request.session['NoOfRecords']
else:
records = 'NA'
if request.session['datatype'] in ['Video', 'Image','Document']:
folderLocation = str(request.session['datalocation'])
dataFilePath = os.path.join(folderLocation, request.session['csvfullpath'])
else:
dataFilePath = str(request.session['datalocation'])
# dataFilePath = configSettings['basic']['dataLocation']
#df = pd.read_csv(dataFilePath, encoding='latin1')
featuresList = configSettings['basic']['featureList']
config = {}
config['modelName'] = configSettings['basic']['modelName']
config['modelVersion'] = configSettings['basic']['modelVersion']
config['datetimeFeatures'] = configSettings['basic']['dateTimeFeature']
config['sequenceFeatures'] = configSettings['basic']['indexFeature']
config['FeaturesList'] = featuresList
config['unimportantFeatures'] = list(set(featuresList) - set(configSettings['basic']['trainingFeatures']))
config['targetFeature'] = configSettings['basic']['targetFeature']
scoring = configSettings['basic']['scoringCriteria']
scoringCriteria = ""
for k in scoring.keys():
if configSettings['basic']['scoringCriteria'][k] == 'True':
scoringCriteria = k
break
config['scoringCriteria'] = scoringCriteria
temp = {}
temp['ModelName'] = configSettings['basic']['modelName']
temp['Version'] = configSettings['basic']['modelVersion']
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
context = {'tab': 'advconfig', 'config': config, 'temp': temp, 'advconfig': configSettings,
'noOfRecords': records, 'advance_status_msg': 'Configuration Done',
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'errormsg':errormsg,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'selected': 'modeltraining'}
return context
elif submittype == 'AdvanceDefault':
try:
MachineLearningModels = []
configFile = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+")
configSettingsData = f.read()
updateconfigSettingsJson = json.loads(configSettingsData)
configSettingsJson = json.loads(configSettings)
temp = {}
temp['ModelName'] = request.session['UseCaseName']
temp['Version'] = request.session['ModelVersion']
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = updateconfigSettingsJson['basic']['dateTimeFeature']
config['sequenceFeatures'] = updateconfigSettingsJson['basic']['indexFeature']
config['FeaturesList'] = updateconfigSettingsJson['basic']['trainingFeatures']
config['unimportantFeatures'] = ''
config['targetFeature'] = updateconfigSettingsJson['basic']['targetFeature']
problemtypes = updateconfigSettingsJson['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if updateconfigSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
selectAlgo = ""
if problem_type in ['classification','regression','timeSeriesForecasting',
'timeSeriesAnomalyDetection',
'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition']: #task 11997
for key in updateconfigSettingsJson['basic']['algorithms'][problem_type]:
if updateconfigSettingsJson['basic']['algorithms'][problem_type][key] == 'True':
if selectAlgo != "":
selectAlgo += ','
selectAlgo += key
if problem_type not in ['classification','regression']:
break
for key in updateconfigSettingsJson['basic']['algorithms'][problem_type]:
if updateconfigSettingsJson['basic']['algorithms'][problem_type][key] == 'True':
MachineLearningModels.append(key)
if problem_type == 'objectDetection':
from AION import pretrainedModels
ptmObj = pretrainedModels()
obModels = ptmObj.get_info(selectAlgo)
else:
obModels = {}
problemType = problem_type
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 2
if request.session['finalstate'] <= 2:
request.session['finalstate'] = 2
outlierDetection = 'False'
updateconfigSettingsJson['advance'] = configSettingsJson['advance']
for x in list(updateconfigSettingsJson['advance']['profiler']['outlierDetection'].keys()):
if updateconfigSettingsJson['advance']['profiler']['outlierDetection'][x] == 'True':
outlierDetection = 'True'
if outlierDetection == 'False':
updateconfigSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'True'
else:
updateconfigSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'False'
updateconfigSettingsJson = advanceConfigfields(updateconfigSettingsJson)
#print(configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['ExtremeGradientBoostingClassifier'])
updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = 'None'
normalizationtypes = updateconfigSettingsJson['advance']['profiler']['normalization']
for k in normalizationtypes.keys():
if updateconfigSettings |
Json['advance']['profiler']['normalization'][k] == 'True':
updateconfigSettingsJson['advance']['profiler']['normalizationMethod'] = k
break
#---------------- default Hypermarameter changes--- ----------Usnish--------------
hyperparamFile = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config', 'hyperparam_config.json'))
with open(hyperparamFile) as json_file:
hyperparamConfig = json.load(json_file)
context = {'tab': 'advconfig','temp': temp,'advconfig': updateconfigSettingsJson,
'config': config, 'selected_use_case': selected_use_case,'MachineLearningModels':MachineLearningModels,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"obModels":obModels,"problemType":problemType,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'selected': 'modeltraning','advance_help':ht.advance_help,'hyperparamConfig':hyperparamConfig}
return context
except Exception as e:
print(e)
def llmadvancesettings(configSettings,request):
algo = ''
for x in list(configSettings['basic']['algorithms']['llmFineTuning'].keys()):
if configSettings['basic']['algorithms']['llmFineTuning'][x] == 'True':
algo = x
if algo == 'LLaMA-2':
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['fineTuningMethod'] = request.POST.get('llama2fullfinemethod')
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['epochs'] = request.POST.get('llama2epochs')
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['learning_rate'] = request.POST.get('llama2learningrate')
if request.POST.get('llama2fullfinemethod') != 'Full Fine-Tuning':
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['lora_rank'] = request.POST.get('llama2lorarank')
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2']['lora_alpha'] = request.POST.get('llama2loraalpha')
if algo == 'LLaMA-2-Chat':
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['fineTuningMethod'] = request.POST.get('llama2chatfullfinemethod')
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['epochs'] = request.POST.get('llmllama2chatepochs')
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['learning_rate'] = request.POST.get('llama2chatlearningrate')
if request.POST.get('llama2chatfullfinemethod') != 'Full Fine-Tuning':
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['lora_rank'] = request.POST.get('llama2chatlorarank')
configSettings['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']['lora_alpha'] = request.POST.get('llama2chatloraalpha')
if algo == 'CodeLLaMA-2':
configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['fineTuningMethod'] = request.POST.get('CodeLLaMA2fullfinemethod')
configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['epochs'] = request.POST.get('CodeLLaMA2epochs')
configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['learning_rate'] = request.POST.get('CodeLLaMA2learningrate')
if request.POST.get('CodeLLaMA2fullfinemethod') != 'Full Fine-Tuning':
configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['lora_rank'] = request.POST.get('CodeLLaMA2lorarank')
configSettings['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']['lora_alpha'] = request.POST.get('CodeLLaMA2loraalpha')
if algo == 'Falcon':
configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['fullFineTuning'] = request.POST.get('falconfullfinetuning')
configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['epochs'] = request.POST.get('falconepochs')
configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['learning_rate'] = request.POST.get('falconlearningrate')
configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['lora_rank'] = request.POST.get('falconlorarank')
configSettings['advance']['llmFineTuning']['modelParams']['Falcon']['lora_alpha'] = request.POST.get('falconloraalpha')
return configSettings
def advanceConfigfields(configSettingsJson):
try:
configSettingsJson['advance']['mllearner_config']['EnsembleStacking'] = \\
configSettingsJson['advance']['mllearner_config']['Stacking (Ensemble)']
configSettingsJson['advance']['mllearner_config']['EnsembleVoting'] = \\
configSettingsJson['advance']['mllearner_config']['Voting (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'LogisticRegression'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GaussianNB'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['SVC'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'Support Vector Machine']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'KNeighborsClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'DecisionTreeClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'RandomForestClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'GradientBoostingClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'ExtremeGradientBoostingClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'LightGradientBoostingClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'CategoricalBoostingClassifier'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams'][
'Categorical Boosting (CatBoost)']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SimpleRNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][
'Recurrent Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['GRURNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][
'Recurrent Neural Network (GRU)']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['LSTMRNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][
'Recurrent Neural Network (LSTM)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleStacking'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'LogisticRegression'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'Logistic Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'NaiveBayes'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'Naive Bayes']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'SVM'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'Support Vector Machine']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'KNN'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'K Nearest Neighbors']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'DecisionTree'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'Decision Tree']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'RandomForest'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging'][
'Random Forest']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Recurrent Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Recurrent Neural Network (GRU)']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Recurrent Neural Network (LSTM)']
configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DQN'] = \\
configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network']
configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DDQN'] = \\
configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams'][
'Dueling Deep Q Network']
configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DQN'] = \\
configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network']
configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DDQN'] = \\
configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams'][
'Dueling Deep Q Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['CNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][
'Convolutional Neural Network (1D)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LinearRegression'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][
'DecisionTreeRegressor'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][
'RandomForestRegressor'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['XGBoostRegressor'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][
'Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LightGBMRegressor'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][
'Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['CatBoostRegressor'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams'][
'Categorical Boosting (CatBoost)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleStacking'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][
|
'LinearRegression'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][
'Linear Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][
'DecisionTree'] = \\
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging'][
'Decision Tree']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['NAS'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Neural Architecture Search']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['NAS'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'][
'Neural Architecture Search']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Recurrent Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Recurrent Neural Network (GRU)']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Recurrent Neural Network (LSTM)']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['CNN'] = \\
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'][
'Convolutional Neural Network (1D)']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'OnlineLogisticRegression'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'Online Logistic Regression']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'OnlineDecisionTreeClassifier'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'Online Decision Tree Classifier']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'OnlineSoftmaxRegression'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'Online Softmax Regression']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'OnlineKNNClassifier'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams'][
'Online KNN Classifier']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][
'OnlineLinearRegression'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][
'Online Linear Regression']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][
'OnlineDecisionTreeRegressor'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][
'Online Decision Tree Regressor']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][
'OnlineKNNRegressor'] = \\
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams'][
'Online KNN Regressor']
configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] = \\
configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis']
configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] = \\
configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis']
if 'llmFineTuning' in configSettingsJson['advance']:
configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2'] = \\
configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2']
configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2Chat'] = \\
configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2-Chat']
configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA2'] = \\
configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA-2']
configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2'] = \\
configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2']
configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2Chat'] = \\
configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2-Chat']
configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = \\
configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \\
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2']
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2Chat'] = \\
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2-Chat']
configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA2'] = \\
configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA-2']
if 'distributedlearner_config' in configSettingsJson['advance']:
configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][
'DistributedXGBoost'] = \\
configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][
'Distributed Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][
'DistributedLightGBM'] = \\
configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams'][
'Distributed Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][
'DistributedXGBoost'] = \\
configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][
'Distributed Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][
'DistributedLightGBM'] = \\
configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams'][
'Distributed Light Gradient Boosting (LightGBM)']
problem_type = ""
problemtypes = configSettingsJson['basic']['analysisType']
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
deepLearning = 'False'
machineLearning = 'False'
reinforcementLearning = 'False'
selectAlgo = ""
if problem_type.lower() in ['classification','regression']:
for key in configSettingsJson['basic']['algorithms'][problem_type]:
if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True':
if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Neural Architecture Search']:
deepLearning = 'True'
if key in ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest','Support Vector Machine','K Nearest Neighbors','Gradient Boosting','Extreme Gradient Boosting (XGBoost)','Light Gradient Boosting (LightGBM)','Categorical Boosting (CatBoost)','Linear Regression','Lasso','Ridge','Decision Tree','Random Forest','Bagging (Ensemble)']:
machineLearning = 'True'
if key in ['Deep Q Network','Dueling Deep Q Network']:
reinforcementLearning = 'True'
elif problem_type.lower() in ['clustering','topicmodelling']:#clustering(Bug 12611)
machineLearning = 'True'
configSettingsJson['basic']['deepLearning'] = deepLearning
configSettingsJson['basic']['machineLearning'] = machineLearning
configSettingsJson['basic']['reinforcementLearning'] = reinforcementLearning
except Exception as e:
print(e)
return (configSettingsJson)
def basicconfignex(request):
#pemfilename = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','modelTraining','static','key','AION_GPU.pem'))
try:
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
#---------------- default Hypermarameter changes-------------Usnish--------------
hyperparamFile = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config', 'hyperparam_config.json'))
with open(hyperparamFile) as json_file:
hyperparamConfig = json.load(json_file)
#---------------- default Hypermarameter changes end-------------Usnish--------------
# ------------------ Debiasing Changes ------------------
categorical_features = []
class_list = []
MachineLearningModels = []
check_traget = configSettingsJson['basic']['targetFeature']
selectedDebiasingFeature = 'None'
selectedDebiasingClass = 'None'
selectedDebiasingAlgorithm = ''
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if request.method == 'GET':
for key in configSettingsJson['basic']['algorithms'][problem_type]:
if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True':
MachineLearningModels.append(key)
else:
MachineLearningModels = request.POST.getlist('MachineLearningModels')
if problem_type.lower() in ['classification','regression']:
if check_traget != '':
try:
if 'deBiasing' in configSettingsJson['advance']['profiler']:
deBiasing = configSettingsJson['advance']['profiler']['deBiasing']
selectedDebiasingFeature = deBiasing.get('FeatureName','None')
selectedDebiasingClass = deBiasing.get('ClassName','None')
selectedDebiasingAlgorithm = deBiasing.get('Algorithm','')
if selectedDebiasingFeature != 'None':
df = pd.read_csv(configSettingsJson['basic']['dataLocation'],encoding='utf8',encoding_errors= 'replace')
classeslist = []
classeslist = df[selectedDebiasingFeature].unique().tolist()
for item in classeslist:
class_list.append(item)
else:
class_list.append('None')
except:
pass
feature_dict = configSettingsJson['advance']['profiler']['featureDict']
for feature_config in feature_dict:
if feature_config.get('type', '') == 'categorical' and feature_config['feature'] != check_traget:
categorical_features.append(feature_config['feature'])
# ------------------ ------------------
#print(categorical_features)
temp = {}
temp['ModelName'] = request.session['UseCaseName']
temp['Version'] = request.session['ModelVersion']
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = configSettingsJson['basic']['dateTimeFeature']
config['sequenceFeatures'] = configSettingsJson['basic']['indexFeature']
config['FeaturesList'] = configSettingsJson['basic']['trainingFeatures']
config['unimportantFeatures'] = ''
config['targetFeature'] = configSettingsJson['basic']['targetFeature']
deepLearning = 'False'
machineLearning = 'False'
reinforcementLearning = 'False'
selectAlgo = ""
print(problem_type)
if problem_type.lower() in ['classification','regression']:
for key in configSettingsJson['basic']['algorithms'][problem_type]:
if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True':
if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Neural Architecture Search']:
deepLearning = 'True'
if key in ['Logistic Regression','Naive Bayes','Decision Tree','Random Forest','Support Vector Machine','K Nearest Neighbors','Gradient Boosting','Extreme Gradient Boosting (XGBoost)','Light Gradient Boosting (LightGBM)','Categorical Boosting (CatBoost)','Linear Regression','Lasso','Ridge','Decision Tree','Random Forest','Bagging (Ensemble)']:
machineLearning = 'True'
if key in ['Deep Q Network','Dueling Deep Q Network']:
reinforcementLearning = 'True'
elif problem_type.lower() in ['clustering','topicmodelling']:#clustering(Bug 12611)
machineLearning = 'True'
configSettingsJson['basic']['deepLearning'] = deepLearning
configSettingsJson['basic']['machineLearning'] = machineLearning
configSettingsJson['basic']['reinforcementLearning'] = reinforcementLearning
if problem_type in ['classification','regression','timeSeriesForecasting',
'timeSeriesAnomalyDetection',
'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition']: #task 11997
for key in configSettingsJson['basic']['algorithms'][problem_type]:
if configSettingsJson['basic']['algorithms'][problem_type][key] == 'True':
if selectAlgo != "":
selectAlgo += ','
selectAlgo += key
|
if problem_type not in ['classification','regression']:
break
if problem_type == 'objectDetection':
from AION import pretrainedModels
ptmObj = pretrainedModels()
obModels = ptmObj.get_info(selectAlgo)
else:
obModels = {}
problemType = problem_type
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 2
#configSettingsJson['advance']['remoteTraining']['ssh']['keyFilePath'] = pemfilename
if request.session['finalstate'] <= 2:
request.session['finalstate'] = 2
outlierDetection = 'False'
for x in list(configSettingsJson['advance']['profiler']['outlierDetection'].keys()):
if configSettingsJson['advance']['profiler']['outlierDetection'][x] == 'True':
outlierDetection = 'True'
if outlierDetection == 'False':
configSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'True'
else:
configSettingsJson['advance']['profiler']['outlierDetection']['Disable'] = 'False'
if 'distributedLearning' not in configSettingsJson['basic']:
configSettingsJson['basic']['distributedLearning'] = 'False'
configSettingsJson['advance']['mllearner_config']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['Stacking (Ensemble)']
configSettingsJson['advance']['mllearner_config']['EnsembleVoting']=configSettingsJson['advance']['mllearner_config']['Voting (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['LogisticRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Logistic Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GaussianNB'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Naive Bayes']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['SVC'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Support Vector Machine']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['KNeighborsClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['K Nearest Neighbors']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['DecisionTreeClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Decision Tree']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['RandomForestClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Random Forest']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['GradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Gradient Boosting']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['ExtremeGradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['LightGradientBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['CategoricalBoostingClassifier'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Categorical Boosting (CatBoost)']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (GRU)']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Recurrent Neural Network (LSTM)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']=configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Bagging (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['Stacking (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['LogisticRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Logistic Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['NaiveBayes'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Naive Bayes']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['SVM'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Support Vector Machine']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['KNN'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['K Nearest Neighbors']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['DecisionTree'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Decision Tree']
configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['RandomForest'] = configSettingsJson['advance']['mllearner_config']['modelParams']['classifierModelParams']['EnsembleBagging']['Random Forest']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)']
configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Deep Q Network']
configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['DDQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['classifierModelParams']['Dueling Deep Q Network']
configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Deep Q Network']
configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['DDQN'] = configSettingsJson['advance']['rllearner_config']['modelParams']['regressorModelParams']['Dueling Deep Q Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['CNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['Convolutional Neural Network (1D)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LinearRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Linear Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['DecisionTreeRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Decision Tree']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['RandomForestRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Random Forest']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['XGBoostRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['LightGBMRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['CatBoostRegressor'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Categorical Boosting (CatBoost)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleStacking']=configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Stacking (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']=configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['Bagging (Ensemble)']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['LinearRegression'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['Linear Regression']
configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['DecisionTree'] = configSettingsJson['advance']['mllearner_config']['modelParams']['regressorModelParams']['EnsembleBagging']['Decision Tree']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['NAS'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams'].get('Neural Architecture Search')
configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams']['NAS'] = configSettingsJson['advance']['dllearner_config']['modelParams']['classifierModelParams'].get('Neural Architecture Search')
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['SimpleRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['GRURNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (GRU)']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['LSTMRNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Recurrent Neural Network (LSTM)']
configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['CNN'] = configSettingsJson['advance']['dllearner_config']['modelParams']['regressorModelParams']['Convolutional Neural Network (1D)']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineLogisticRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Logistic Regression']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineDecisionTreeClassifier'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Decision Tree Classifier']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineSoftmaxRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online Softmax Regression']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['OnlineKNNClassifier'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['classifierModelParams']['Online KNN Classifier']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineLinearRegression'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Linear Regression']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineDecisionTreeRegressor'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online Decision Tree Regressor']
configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['OnlineKNNRegressor'] = configSettingsJson['advance']['onlinelearner_config']['modelParams']['regressorModelParams']['Online KNN Regressor']
configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis'] = configSettingsJson['advance']['profiler']['textConversionMethod']['LatentSemanticAnalysis']
configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis'] = configSettingsJson['advance']['profiler']['embeddingSize']['LatentSemanticAnalysis']
if 'llmFineTuning' in configSettingsJson['advance']:
configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2']
configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA2Chat'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['LLaMA-2-Chat']
configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA2'] = configSettingsJson['basic']['algorithms']['llmFineTuning']['CodeLLaMA-2']
configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA-2']
configSettingsJson['advance']['llmFineTuning']['modelParams']['LLaMA2Chat'] = configSettingsJson['advance']['llmFineTuning']['modelParams'][' |
LLaMA-2-Chat']
configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA2'] = configSettingsJson['advance']['llmFineTuning']['modelParams']['CodeLLaMA-2']
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2'] = \\
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2']
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA2Chat'] = \\
configSettingsJson['basic']['modelSize']['llmFineTuning']['LLaMA-2-Chat']
configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA2'] = \\
configSettingsJson['basic']['modelSize']['llmFineTuning']['CodeLLaMA-2']
if 'distributedlearner_config' in configSettingsJson['advance']:
configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['DistributedXGBoost'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['DistributedLightGBM'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['classifierModelParams']['Distributed Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['DistributedXGBoost'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['Distributed Extreme Gradient Boosting (XGBoost)']
configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['DistributedLightGBM'] = configSettingsJson['advance']['distributedlearner_config']['modelParams']['regressorModelParams']['Distributed Light Gradient Boosting (LightGBM)']
configSettingsJson['advance']['profiler']['normalizationMethod'] = 'None'
normalizationtypes = configSettingsJson['advance']['profiler']['normalization']
for k in normalizationtypes.keys():
if configSettingsJson['advance']['profiler']['normalization'][k] == 'True':
configSettingsJson['advance']['profiler']['normalizationMethod'] = k
break
context = {'temp': temp, 'advconfig': configSettingsJson, 'MachineLearningModels':MachineLearningModels,'hyperparamConfig':hyperparamConfig,'config': config, 'selected_use_case': selected_use_case,
'categorical_features': categorical_features, 'selectedDebiasingFeature': selectedDebiasingFeature, 'selectedDebiasingAlgorithm': selectedDebiasingAlgorithm, 'Class_list': class_list, 'selectedDebiasingClass': selectedDebiasingClass, #Debiasing Changes
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,"obModels":obModels,"problemType":problemType,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'selected': 'modeltraning','advance_help':ht.advance_help}
return context
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
context={'erroradvance':'Fail to load advance config Json file'}
return context
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
from os.path import expanduser
import platform
import json
import subprocess
import re
import sys
import pandas as pd
from django.http import HttpResponse
from appbe.dataPath import DATA_DIR
Usecaselocation = os.path.join(DATA_DIR,'Usecases')
def mlstyles(request):
try:
from appbe.aion_config import settings
usecasetab = settings()
selectid = request.GET['usecaseid']
configFile = os.path.join(Usecaselocation, 'usecases.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
#usecase = configSettingsJson['usecaselist']
desciption=""
usecasename=""
found = False
for v_id in configSettingsJson['verticallist']:
for p_id in v_id['usecaselist']:
usecaseid = p_id.get('usecaseid')
if str(usecaseid) == str(selectid) :
usecasename = p_id.get('usecasename')
desciption = p_id.get('desciption')
usecaseid = p_id.get('usecaseid')
iconname = p_id.get('iconname')
prediction_input = p_id.get('prediction_input')
outputtype = p_id.get('outputtype')
smalldescription = p_id.get('smalldescription')
trainingFeatures = p_id.get('trainingFeatures','None')
if trainingFeatures != 'None':
trainingFeatures = trainingFeatures.split(',')
found = True
break
if found == True:
break
#print(usecaseid,selectid)
context ={'usecasename':usecasename,'desciption':desciption,'prediction_input':prediction_input,'usecaseid':usecaseid,'trainingFeatures':trainingFeatures,'iconname':iconname,'smalldescription':smalldescription,'outputtype':outputtype,'usecasetab':usecasetab}
return context
except Exception as inst:
print(inst)
context = { 'error3':'error3','error1': "No UseCases to show"}
return context
def getusecasedetails(selectid):
configFile = os.path.join(Usecaselocation, 'usecases.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
#usecase = configSettingsJson['usecaselist']
desciption=""
usecasename=""
found = False
for v_id in configSettingsJson['verticallist']:
for p_id in v_id['usecaselist']:
usecaseid = p_id.get('usecaseid')
if str(usecaseid) == str(selectid) :
usecasename = p_id.get('usecasename')
desciption = p_id.get('desciption')
usecaseid = p_id.get('usecaseid')
modelConfig = p_id.get('modelConfig')
folder = p_id.get('folder')
prediction = p_id.get('prediction')
prediction_input = p_id.get('prediction_input')
ai_modeldata = p_id.get('modeldata')
outputtype = p_id.get('outputtype')
smalldescription = p_id.get('smalldescription')
prediction_template = p_id.get('prediction_template')
trainingFeatures = p_id.get('trainingFeatures','None')
if trainingFeatures != 'None':
trainingFeatures = trainingFeatures.split(',')
found = True
break
if found == True:
break
#print(usecasename)
return(usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures)
def mlpredict(request):
selectid=request.POST.get('usecaseid')
mlpredict =request.POST.get('mlpredict')
usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures = getusecasedetails(selectid)
from appbe.aion_config import settings
usecasetab = settings()
usecasename = usecasename
desciption = desciption
input=''
for x in prediction_input:
if input != '':
input += ','
input = request.POST.get(x['name'])
if mlpredict in ['prediction','predictsingle']:
if mlpredict == 'prediction':
dataFile = request.POST.get('predictfilePath')
if(os.path.isfile(dataFile) == False) or dataFile=="":
context = {'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption , 'error1': 'Please enter a valid csv filepath','usecasetab':usecasetab}
return context, mlpredict
else:
inputFieldsDict = {}
for feature in trainingFeatures:
inputFieldsDict[feature] = request.POST.get(feature)
dataFile = json.dumps(inputFieldsDict)
try:
predictionScriptPath= os.path.join(Usecaselocation,folder,'model',prediction)
# predictionScriptPath = os.path.join(predictionscript, 'aion_prediction.py')
outputStr = subprocess.check_output([sys.executable, predictionScriptPath, dataFile,input])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'predictions:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
predict_dict = json.loads(outputStr)
#print(predict_dict)
heading =''
timetaken=''
print(predict_dict)
if (predict_dict['status'] == 'SUCCESS'):
predictionResults = predict_dict['data']
#print(predictionResults)
if 'heading' in predict_dict:
heading = predict_dict['heading']
if 'Time' in predict_dict:
timetaken = round(predict_dict['Time'],2)
if outputtype.lower() in ['similarityidentification','contextualsearch']:
data = predictionResults[0]
predictionResults= []
Results={}
prediction = data['prediction']
i = 1
for x in prediction:
te = ''
for y in x:
info = (str(x[y])[:100] + '...') if len(str(x[y])) > 100 else str(x[y])
te += y+': '+info+'\\n\\n'
Results[i] = te
i = i+1
predictionResults.append(Results)
else:
context = {'usecaseid':selectid ,'dataFile':dataFile,'prediction_input':prediction_input,'usecasename':usecasename,'desciption':desciption , 'error': 'Failed To perform prediction','usecasetab':usecasetab}
return context, mlpredict
print(heading)
context = {'usecasename':usecasename,'desciption':desciption,'prediction_input':prediction_input,'usecaseid':selectid ,'dataFile':dataFile,'predictionResults': predictionResults,'outputtype':outputtype,'heading':heading,'timetaken':timetaken,'usecasetab':usecasetab,'trainingFeatures':trainingFeatures}
return context, mlpredict
except Exception as inst:
print(inst)
context = { 'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'errorp': 'Failed To perform prediction','usecasetab':usecasetab}
return context, mlpredict
if mlpredict == 'download_predict':
# predictionResults = 'C:\\\\DataSets\\\\Classification\\\\bug_severity_class.csv'
try:
csvdata= os.path.join(Usecaselocation,folder,'Data',prediction_template)
if os.path.isfile(csvdata) and os.path.exists(csvdata):
df = pd.read_csv(csvdata,encoding='utf8',encoding_errors= 'replace')
downloadFileName = usecasename.replace(" ", "_") + '_predict.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+downloadFileName
df.to_csv(response, index=False)
return response,mlpredict
else:
context = {'usecaseid':selectid ,'dataFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'error': 'File not found','usecasetab':usecasetab}
return context, mlpredict
except Exception as inst:
context = { 'usecaseid':selectid ,'usecasename':usecasename,'desciption':desciption, 'error3':'error3','error1': 'Failed To Download','usecasetab':usecasetab}
return context, mltrain
def process(data):
cleaned_data = {"verticallist":[]}
for vertical in data['verticallist']:
updated_list = []
for usecase in vertical['usecaselist']:
if usecase['prediction'] and usecase['prediction'] != "Not Implemented":
updated_list.append(usecase)
if updated_list:
cleaned_data['verticallist'].append({'id':vertical['id'],'name':vertical['name'],'usecaselist':updated_list})
return cleaned_data
def Aiusecases(request,selectedoption='Implemented'):
try:
from appbe.aion_config import settings
usecasetab = settings()
configFile = os.path.join(Usecaselocation, 'usecases.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
if selectedoption == 'Implemented':
configSettingsJson = process(configSettingsJson)
usecasedetails = configSettingsJson['verticallist']
context ={'desciption1':usecasedetails,'selected':'AIusecases','usecasetab':usecasetab}
return context
except Exception as e:
print(e)
context ={'error':"No Usecases to Show",'selected':'AIusecases','usecasetab':usecasetab}
return context
def mltrain(request):
from appbe.aion_config import settings
usecasetab = settings()
selectid =request.POST.get('usecaseid1')
mltrain =request.POST.get('mltrain')
usecasename,desciption,usecaseid,modelConfig,folder,prediction,prediction_input,ai_modeldata,outputtype,smalldescription,prediction_template,trainingFeatures = getusecasedetails(selectid)
usecasename = usecasename
desciption = des |
ciption
if mltrain == 'training':
dataFile = request.POST.get('trainfilePath')
if(os.path.isfile(dataFile) == False) or dataFile=="":
context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption ,'error3':'error3','error1': 'Please enter a valid csv filepath'}
return context, mltrain
try:
scriptPath = os.path.join(Usecaselocation,folder,'config','aion_train.py')
print(scriptPath,dataFile)
outputStr = subprocess.check_output([sys.executable, scriptPath, dataFile])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
train = json.loads(outputStr)
status = train['status']
DeployLocation = train['data']['deployLocation']
ModelType = train['data']['ModelType']
BestModel = train['data']['BestModel']
BestScore = train['data']['BestScore']
ScoreType = train['data']['ScoreType']
FeaturesUsed = train['data']['featuresused']
context={'result':train,'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption,'status':status,'DeployLocation':DeployLocation,'ModelType':ModelType,'BestModel':BestModel,'BestScore':BestScore,'ScoreType':ScoreType,'FeaturesUsed':FeaturesUsed,'result':'result','usecasetab':usecasetab}
return context,mltrain
except Exception as inst:
context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'errort': 'Failed To perform Training','usecasetab':usecasetab}
return context, mltrain
if mltrain == 'download_train':
try:
csvdata= os.path.join(Usecaselocation,folder,'data',ai_modeldata)
#print(csvdata)
if os.path.isfile(csvdata) and os.path.exists(csvdata):
df = pd.read_csv(csvdata,encoding='utf8',encoding_errors= 'replace')
downloadFileName = usecasename.replace(" ", "_") + '_training.csv'
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename='+downloadFileName
df.to_csv(response, index=False)
return response,mltrain
else:
context = {'usecaseid':selectid ,'datatrainFile':dataFile,'usecasename':usecasename,'desciption':desciption, 'error': 'File not found','usecasetab':usecasetab}
return context, mltrain
except Exception as inst:
context = { 'usecaseid':selectid ,'usecasename':usecasename,'desciption':desciption, 'error3':'error3','error1': 'Failed To Download','usecasetab':usecasetab}
return context, mltrain
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import pandas as pd
import requests
import re
import json
import sys
import time
from appbe.aion_config import get_llm_data
from appbe.dataPath import LOG_LOCATION
from appbe.log_ut import logg
import logging
import openai
import tiktoken
openai.api_key = ''
openai.api_base = ''
openai.api_type = ''
openai.api_version = ''
deployment_name="Text-Datvinci-03"
def generateLabelPerRecord(OrgData):
OrgData['LabelFromGPT'] = OrgData['Head_Description'].apply(lambda x: \\
generate_gpt3_response\\
("I am giving you the title and short description \\
in the format [Title:Description], \\
give me the related low level topics in one word in the \\
format[Topic: your primary topic] along with top 5 important keywords in the \\
format[Keywords: keywords]'{}' ".format(x)))
#Cleaning the output as it is from ChatGPT
OrgData['temp1'] = OrgData['LabelFromGPT'].apply(lambda x: (x.split('Topic:')[1]).replace(']',''))
OrgData['LabelFromGPT'] = OrgData['temp1'].apply(lambda x: (x.split('Keywords:')[0]).replace(']','').rstrip())
OrgData['Keywords'] = OrgData['temp1'].apply(lambda x: (x.split('Keywords:')[1]).replace(']',''))
OrgData = OrgData.drop(['temp1','Head_Description'], axis=1)
return OrgData
def generateLabelForChunkedRecords(OrgData):
import io
# OrgData = OrgData.head(120)
Head_Description = {"Head_Description": [] }
Head_Description2 = {"Head_Description": [] }
Head_Description['Head_Description'] = OrgData['Head_Description']
strt_ind = 0
brk_ind = 0
# encoding = tiktoken.get_encoding('p50k_base')
encoding = tiktoken.encoding_for_model("text-davinci-003")
chunks = []
_cur_token_count = 0
_chunk_token_count = 0
for ind in Head_Description['Head_Description'].index:
tokenized_text = encoding.encode(Head_Description['Head_Description'][ind])
_cur_token_count = len(tokenized_text)
if _cur_token_count >= 600:
OrgData['Head_Description'][ind] = OrgData['Head_Description'][ind][:1000]
upto_ind = ind + 1
Head_Description2['Head_Description'] = OrgData['Head_Description'][brk_ind:ind]
_chunk_token_count = encoding.encode(Head_Description2['Head_Description'].to_string())
if len(_chunk_token_count) >= 1200:
brk_ind = ind
# print(brk_ind)
chunks.append(ind-1)
_start_count = 0
if len(chunks) == 0:
output = generate_gpt3_response("I am giving you datatable of text records \\
for each record give me the related low level topics in one word as a data column called Topic\\
and important top five keywords as a data column called Keywords. \\
Provide me record number as Record and these two data columns as datatable for each record in the given datatable and number of records should be equivalent to the number of records in the given datatable of text records. '{}' ".format(Head_Description['Head_Description']))
out = io.StringIO(output[2:])
df = pd.read_csv(out, sep='\\t')
else:
chunks.append(len(Head_Description['Head_Description']))
for ind_val in chunks:
_cur_ind_val = ind_val
_recordsSent = 0
Head_Description = {"Head_Description": [] }
if _start_count == 0:
Head_Description['Head_Description'] = OrgData['Head_Description'][strt_ind:_cur_ind_val].to_string()
_recordsSent = len(OrgData['Head_Description'][strt_ind:_cur_ind_val])
else:
Head_Description['Head_Description'] = OrgData['Head_Description'][_pre_ind_val:_cur_ind_val].to_string()
_recordsSent = len(OrgData['Head_Description'][_pre_ind_val:_cur_ind_val])
_pre_ind_val = ind_val
# if _start_count <= 5:
output = generate_gpt3_response("I am giving you datatable of text records \\
for each record give me the related low level topics in one word as a data column called Topic\\
and important top five keywords as a data column called Keywords. \\
Provide me record number as Record and these two data columns as datatable for each record in the given datatable and number of records should be equivalent to the number of records in the given datatable of text records. '{}' ".format(Head_Description['Head_Description']))
out = io.StringIO(output[2:])
if _start_count == 0:
df = pd.read_csv(out, sep='\\t')
else:
df_tmp = pd.read_csv(out, sep='\\t')
if len(df_tmp) > _recordsSent:
df_tmp = df_tmp.head(_recordsSent)
# df = df.append(df_tmp, ignore_index=True)
df = pd.concat([df, df_tmp], ignore_index=True)
_start_count += 1
OrgData['LabelFromGPT'] = df['Topic']
OrgData['Keywords'] = df['Keywords']
OrgData = OrgData.drop(['Head_Description'], axis=1)
return OrgData
# Text Data Labelling using LLM related changes
# --------------------------------------------------------
def generateTextLabel(request, DATA_FILE_PATH):
log = logging.getLogger('log_ux')
key,url,api_type,api_version = get_llm_data()
openai.api_key = key
openai.api_base = url
openai.api_type = api_type
openai.api_version = api_version
try:
features = request.POST.getlist('InputFeatures')
datapath = request.session['textdatapath']
OrgData = pd.read_csv(datapath)
# OrgData = OrgData.head(2000)
OrgData.fillna("", inplace = True)
OrgData['Head_Description'] = OrgData[features[0]]
if (len(features) > 1):
for indx in range(len(features)):
if (indx > 0):
OrgData['Head_Description'] = OrgData['Head_Description'] + " "+ OrgData[features[indx]]
# OrgData = generateLabelPerRecord(OrgData)
OrgData = generateLabelForChunkedRecords(OrgData)
df = OrgData
filetimestamp = str(int(time.time()))
datasetName = 'AION_TextLabelled' + filetimestamp+'.csv'
dataFile = os.path.join(DATA_FILE_PATH,datasetName)
df.to_csv(dataFile)
request.session['texttopicdatapath'] = dataFile
df_json = df.to_json(orient="records")
df_json = json.loads(df_json)
from appbe.dataPath import DATA_DIR
from appbe.sqliteUtility import sqlite_db
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
newdata = {}
newdata['datapath'] = [dataFile]
newdata['datasetname'] = [datasetName]
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata), 'dataingest')
################################################
context = {'data_topic':df_json, 'selected':'DataOperations'}
return context
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
errormsg = str(e)
if 'Invalid URL' in errormsg or 'No connection adapters' in errormsg or 'invalid subscription key' in errormsg:
errormsg = 'Access denied due to invalid subscription key or wrong API endpoint. Please go to settings and make sure to provide a valid key for an active subscription and use a correct regional API endpoint for your resource.'
if 'Max retries exceeded with url' in errormsg:
errormsg = 'Please make sure you have good internet connection and access to API endpoint for your resource.'
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
context = {'error': 'Failed to communicate LLM','LLM' : 'openAI', 'selected':'DataOperations', 'errormessage':errormsg}
log.info('generateTextLabel -- Error : Failed to generate Text-Label.. '+str(e))
log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return context
#function to return the queried response
def generate_gpt3_response(user_text, print_output=False):
"""
Query OpenAI GPT-3 for the specific key and get back a response
:type user_text: str the user's text to query for
:type print_output: boolean whether or not to print the raw output JSON
"""
time.sleep(2)
completions = openai.Completion.create(
# engine='Text-Datvinci-03', # Determines the quality, speed, and cost. engine='text-davinci-003',
engine=deployment_name, # Determines the quality, speed, and cost. engine='text-davinci-003',
temperature=0, # Level of creativity in the response
prompt=user_text, # What the user typed in
max_tokens=2000, # Maximum tokens in the prompt AND response
n=1, # The number of completions to generate
stop=None, # An optional setting to control response generation
)
# Displaying the output can be helpful if things go wrong
if print_output:
print(completions)
# Return the first choice's text
# print(completions.choices[0].text)
return completions.choices[0].text
# --------------------------------------------------------<s> import pandas as pd
import numpy as np
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from statsmodels.tsa.seasonal import seasonal_decompose
import logging
import os
import warnings
warn |
ings.filterwarnings('ignore')
## Main class to find out seassonality and stationary in timeseries data.
class StationarySeasonalityTest:
def __init__(self,df,featurename,datetimefeature):
self.df=df
self.targetFeature=featurename
self.datetimefeature=datetimefeature
## to get the timeseries data stationary information
def stationary_model(self,df,target_feature,stationary_check_method):
stationary_status=None
if (stationary_check_method.lower()=='adfuller'):
stats_model=adfuller(df[target_feature])
statistic, p_value, n_lags, num_bservations,critical_values,info_criterion_best=stats_model[0],stats_model[1],stats_model[2],stats_model[3],stats_model[4],stats_model[5]
if (p_value>0.05):
stationary_status=str("Non-Stationary")
elif(p_value<0.05):
stationary_status=str("Stationary")
##kpss is opposite to ADF in considering null hypothesis. In KPSS, if null hypothesis,then it is stationary as oppose to ADF.
elif (stationary_check_method.lower()=='kpss'):
from statsmodels.tsa.stattools import kpss
stats_model = kpss(df[target_feature])
statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3]
##In kpss, the stationary condition is opposite to Adafuller.
if (p_value>0.05):
stationary_status=str("Stationary")
else:
stationary_status=str("Non-Stationary")
return stats_model,n_lags,p_value,stationary_status
## Get stationary details
def stationary_check(self,target_feature,time_col,method):
df=self.df
df[time_col]=pd.to_datetime(df[time_col])
df=df.set_index(time_col)
try:
stationary_check_method=method
except:
stationary_check_method='adfuller'
if (len(target_feature) == 1):
try:
if isinstance(target_feature,list):
target_feature=''.join(target_feature)
elif isinstance(target_feature,int):
target_feature=str(target_feature)
elif isinstance(target_feature,str):
pass
except Exception as e:
pass
stationary_result={}
stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,target_feature,stationary_check_method)
# stationary_result[target_feature]=stationary_status
stationary_result[target_feature]=stationary_status
elif(len(target_feature) > 1):
stationary_result={}
for col in df.columns:
stats_model,n_lags,p_value,stationary_status=self.stationary_model(df,col,stationary_check_method)
stationary_result[col]=stationary_status
else:
pass
stationary_val=None
for v in stationary_result.values():
stationary_val=v
stationary_combined_res=dict()
c_dict=[k for k,v in stationary_result.items() if 'non-stationary' in v]
if (len(c_dict)>=1):
stationary_combined_res['dataframe_stationarity']='Non-Stationary'
else:
stationary_combined_res['dataframe_stationarity']='Stationary'
return stats_model,n_lags,p_value,stationary_val,stationary_combined_res
#Get seasonality by using seasonal_decompose lib.
def seasonality_model(self,target_feature,df):
seasonality_status=None
try:
try:
stats_model = kpss(df[target_feature])
statistic, p_value, n_lags, critical_values=stats_model[0],stats_model[1],stats_model[2],stats_model[3]
except:
n_lags=1
pass
try:
df_target=self.df[target_feature]
decompose_result_mult = seasonal_decompose(df_target,model='additive', extrapolate_trend='freq', period=n_lags)
except Exception as e:
##If additive model (type of seasonal component) failed, use multiplicative
decompose_result_mult = seasonal_decompose(df_target,model='multiplicative', extrapolate_trend='freq', period=1)
trend = decompose_result_mult.trend
observed=decompose_result_mult.observed
seasonal = decompose_result_mult.seasonal
residual = decompose_result_mult.resid
try:
if isinstance(df_target, pd.Series):
auto_correlation = df_target.autocorr(lag=n_lags)
elif isinstance(df_target, pd.DataFrame):
df_target = df_target.squeeze()
auto_correlation = df_target.autocorr(lag=n_lags)
except:
pass
if (seasonal.sum()==0):
seasonality_status="Non-Seasonal"
else:
seasonality_status="Seasonal"
# #Please use the below plot for GUI show (seasonality components)
# decompose_result_mult.plot().savefig('seasonality_plot.png')
df['observed'] = decompose_result_mult.observed
df['residual'] = decompose_result_mult.resid
df['seasonal'] = decompose_result_mult.seasonal
df['trend'] = decompose_result_mult.trend
except Exception as e:
print("Seasonality function exception: \\t",e)
return df,decompose_result_mult,seasonality_status
##Main function to check seasonlity in data
def seasonal_check(self,target_feature,time_col,seasonal_model):
df=self.df
try:
df[time_col]=pd.to_datetime(df[time_col])
except Exception as e:
pass
df=df.set_index(time_col)
if (len(target_feature)==1):
try:
if isinstance(target_feature,list):
target_feature=''.join(target_feature)
elif isinstance(target_feature,int):
target_feature=str(target_feature)
elif isinstance(target_feature,str):
pass
except Exception as e:
## Because of EDA, all log messages removed. (self.log.info )
pass
## Seasonal component for individual feature based.
seasonality_result=dict()
df,decompose_result_mult,seasonality_status = self.seasonality_model(target_feature,df)
# seasonality_result[target_feature]=seasonality_status
seasonality_result['Feature: '+str(target_feature)]=seasonality_status
elif(len(target_feature) > 1):
seasonality_result=dict()
for col in df.columns:
df,decompose_result_mult,seasonality_status = self.seasonality_model(col,df)
seasonality_result[col]=seasonality_status
else:
pass
# ## Seasonal component for whole dataset
seasonality_val=None
for v in seasonality_result.values():
seasonality_val=v
seasonality_combined_res=dict()
c_dict=[k for k,v in seasonality_result.items() if 'non-seasonality' in v]
if (len(c_dict)>=1):
seasonality_combined_res['dataframe_seasonality']='No Seasonal elements'
else:
seasonality_combined_res['dataframe_seasonality']='contains seasonal elements.'
return df,decompose_result_mult,seasonality_val,seasonality_combined_res
#Main user defined caller for stationary and seasonality (SS)
def analysis(self,seasonality_status,stationarity_status):
seasonal_model="additive"
time_col=self.datetimefeature
stationary_method='adfuller'
if (isinstance(self.targetFeature,list)):
target=self.targetFeature
pass
elif (isinstance(self.targetFeature,str)):
target=list(self.targetFeature.split(','))
if (stationarity_status.lower()=="true"):
stats_model,n_lags,p_value,stationary_result,stationary_combined_res=self.stationary_check(target,time_col,stationary_method)
return stationary_result
if (seasonality_status.lower()=="true"):
df,decompose_result_mult,seasonality_result,seasonality_combined_res=self.seasonal_check(target,time_col,seasonal_model)
return seasonality_result
#Main fn for standalone test purpose
if __name__=='__main__':
print("Inside seasonality-stationary test main function...")
print("Below code used for standalone test purpose.")
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import logging
from appbe.dataIngestion import getcommonfields
from appbe.dataIngestion import getusercasestatus
from appbe import service_url
import json
from appbe.dataIngestion import delimitedsetting
import os,sys
import pandas as pd
from django.http import HttpResponse
import time
from appbe.dataPath import LOG_LOCATION
from appbe.log_ut import logg
def get_instance_id(modelID):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID)
print(data)
if len(data) > 0:
return (data[3]+' instance '+data[2])
else:
return 'Instance ID not available'
else:
return 'Instance ID not available'
def get_instance(modelID):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID)
if len(data) > 0:
return (data[3],data[2],data[5],data[6])
else:
return '','','',''
else:
return '','','',''
def getprompt(promptfeature,contextFeature,responseFeature,promptFriendlyName,responseFriendlyName,data):
if contextFeature != '':
promptData = data[promptfeature].replace('\\n','')
inputData = data[contextFeature].replace('\\n','')
prompt = (
f"Below is an {promptFriendlyName} that describes a task, paired with an Input that provides further context. "
f"Write a {responseFriendlyName} that appropriately completes the request.\\n\\n"
f"### {promptFriendlyName}:\\n{promptData}\\n\\n### Input:\\n{inputData}\\n\\n### {responseFriendlyName}:\\n")
else:
promptData = data[promptfeature].replace('\\n','')
prompt=(
f"Below is an {promptFriendlyName} that describes a task. "
f"Write a {responseFriendlyName} that appropriately completes the request.\\n\\n"
f"### {promptFriendlyName}:\\n{promptData}\\n\\n### {responseFriendlyName}:\\n")
return prompt
def getDataInstance(problem_type,mlmodels,configSettingsJson):
log = logging.getLogger('log_ux')
delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier'])
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict = {'noofforecasts': 10}
elif problem_type == 'recommenderSystem' and mlmodels =='ItemRating':
inputFieldsDict = {"uid": 1, "iid": 31, "rating": 0}
elif problem_type == 'stateTransition':
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'}
else:
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeaturesList = inputFeatures.split(',')
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
if problem_type == 'survivalAnalysis':
inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature'])
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
if os.path.isfile(dataFilePath):
df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,encoding_errors= 'replace')
try:
singleInstanceData = df.loc[0, inputFeaturesList]
except:
singleInstanceData = pd.Series(0, index =inputFeaturesList)
inputFieldsDict = singleInstanceData.to_dict()
else:
inputFieldsDict = {"File":"EnterFileContent"}
inputFields = []
inputFields.append(inputFieldsDict)
return inputFields
def createInstanceFeatures(configSettingsJson,problem_type,mlmodels,usecaseid,version,ser_url):
delimiters,textqualifier = delimitedsetting(configSettingsJson['basic']['fileSettings']['delimiters'],configSettingsJson['basic']['fileSettings']['textqualifier'])
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
if configSettingsJson['basic']['contextFeature'] != '':
inputFeaturesList.append(configSettingsJson['basic']['contextFeature'])
if problem_type == 'llmFineTuning':
inputFeaturesList.append('Temperature')
input |
FeaturesList.append('Max Tokens')
if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997
if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na':
inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature'])
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict = {'noofforecasts': 10}
elif problem_type == 'recommenderSystem' and mlmodels=='ItemRating':
inputFieldsDict = {"uid": 1, "numberOfRecommendation":10}
elif problem_type == 'stateTransition':
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
inputFieldsDict = {inputFeatures:'session',targetFeature:'Activity'}
elif problem_type != 'llmFineTuning':
if os.path.isfile(dataFilePath):
df = pd.read_csv(dataFilePath,encoding='utf8',nrows=2,sep=delimiters,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace')
try:
inputFieldsDict = df.to_dict(orient='index')[0]
except:
inputFieldsDict = pd.Series(0, index =inputFeaturesList).to_dict()
else:
inputFieldsDict = {"File":"EnterFileContent"}
else:
inputFieldsDict = pd.Series('', index =inputFeaturesList).to_dict()
inputFieldsDict['Temperature'] = '0.1'
hypervisor,instanceid,region,image = get_instance(usecaseid+'_'+str(version))
if hypervisor.lower() == 'AWS':
inputFieldsDict['Max Tokens'] = '1024'
else:
inputFieldsDict['Max Tokens'] = '4096'
inputFields = []
inputFields.append(inputFieldsDict)
if problem_type == 'llmFineTuning':
ser_url = get_instance_id(usecaseid+'_'+str(version))
elif problem_type == 'stateTransition':
ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+usecaseid+'&version='+str(version)
else:
ser_url = ser_url+'predict?usecaseid='+usecaseid+'&version='+str(version)
return inputFields,ser_url
def singleInstancePredict(request, Existusecases, usecasedetails):
log = logging.getLogger('log_ux')
modelType=''
context = getcommonfields()
submittype = request.POST.get('predictsubmit')
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
t1 = time.time()
try:
try:
model = Existusecases.objects.get(ModelName=request.session['ModelName'],
Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
training_output = json.loads(training_output)
featureused = training_output['data']['featuresused']
except:
featureused = []
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Prediction','Yes')
usecasename = request.session['usecaseid'].replace(" ", "_")
context.update({'usecasename':usecasename})
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r", encoding = "utf-8")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
if inputFeatures != '':
inputFeaturesList = inputFeatures.split(',')
else:
inputFeaturesList = []
if targetFeature in inputFeaturesList:
inputFeaturesList.remove(targetFeature)
if configSettingsJson['basic']['contextFeature'] != '':
inputFeaturesList.append(configSettingsJson['basic']['contextFeature'])
problemtypes = configSettingsJson['basic']['analysisType']
problem_type = ''
modelSize = ''
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
if problem_type == 'llmFineTuning':
inputFeaturesList.append('Temperature')
inputFeaturesList.append('Max Tokens')
mlmodels =''
algorihtms = configSettingsJson['basic']['algorithms'][problem_type]
for k in algorihtms.keys():
if configSettingsJson['basic']['algorithms'][problem_type][k] == 'True':
if mlmodels != '':
mlmodels += ', '
mlmodels += k
if problem_type == 'llmFineTuning':
ser_url = get_instance_id(usecasename+'_'+str(request.session['ModelVersion']))
if 'modelSize' in configSettingsJson['basic']:
selectedModelSize = configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels]
for k in selectedModelSize.keys():
if configSettingsJson['basic']['modelSize']['llmFineTuning'][mlmodels][k] == 'True':
modelSize = k
break
elif problem_type == 'stateTransition':
ser_url = service_url.read_service_url_params(request)
ser_url = ser_url+'pattern_anomaly_predict?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion'])
else:
ser_url = service_url.read_service_url_params(request)
ser_url = ser_url+'predict?usecaseid='+usecasename+'&version='+str(request.session['ModelVersion'])
if submittype.lower() == 'predict':
inputFieldsDict = {}
if problem_type == 'timeSeriesForecasting': #task 11997
inputFieldsDict['noofforecasts'] = int(request.POST.get('noofforecasts'))
elif problem_type == 'stateTransition':
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
sessionid = request.POST.get('SessionID')
activity = request.POST.get(targetFeature)
inputFieldsDict[inputFeatures] = request.POST.get(inputFeatures)
inputFieldsDict[targetFeature] = request.POST.get(targetFeature)
elif problem_type == 'recommenderSystem' and mlmodels == 'ItemRating':
inputFieldsDict['uid'] = request.POST.get('uid')
inputFieldsDict['numberOfRecommendation'] = int(request.POST.get('numberOfRecommendation')) #Task 11190
else:
if problem_type in ['survivalAnalysis','anomalyDetection', 'timeSeriesAnomalyDetection']: #task 11997
if configSettingsJson['basic']['dateTimeFeature'] != '' and configSettingsJson['basic']['dateTimeFeature'] != 'na':
inputFeaturesList.insert(0,configSettingsJson['basic']['dateTimeFeature'])
for feature in inputFeaturesList:
inputFieldsDict[feature] = request.POST.get(feature)
if problem_type.lower() not in ['contextualsearch','similarityidentification']:
for key, value in inputFieldsDict.items():
if value == 'nan':
inputFieldsDict[key] = ''
if value == '':
if key in featureused:
context.update({'tab': 'predict','ser_url':ser_url, 'error': ' Error : Mandatory field(s) are empty', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion})
return context
inputFieldsJson = json.dumps(inputFieldsDict)
if problem_type == 'llmFineTuning':
modelType = request.POST.get('modelTypeforInferencing')
x = inputFieldsDict.keys()
from appbe.dataPath import DATA_DIR
prompt = inputFieldsDict[configSettingsJson['basic']['trainingFeatures']]
promptobj = {'prompt':prompt}
if configSettingsJson['basic']['contextFeature'] != '':
inputData = inputFieldsDict[configSettingsJson['basic']['contextFeature']]
promptobj.update({'input':inputData})
filetimestamp = str(int(time.time()))
file_path = os.path.join(DATA_DIR,'logs',filetimestamp+'.json')
f= open(file_path,"w",encoding="utf-8")
#print(promptobj)
json.dump(promptobj,f)
f.close()
from llm.llm_inference import LLM_predict
cloudconfig = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config','compute_conf.json'))
hypervisor,instanceid,region,image = get_instance(usecasename+'_'+str(request.session['ModelVersion']))
if hypervisor and instanceid:
if modelSize != '':
mlmodels = mlmodels+'-'+modelSize
cachepath = os.path.join(DATA_DIR,'sqlite','cachePrompt.db')
import sqlite3
conn = sqlite3.connect(cachepath)
from llm.llm_cache import CachePrompt
cachepromptObj = CachePrompt(conn)
searchFlag,result = cachepromptObj.selectFromCache(prompt,usecasename+'_'+str(request.session['ModelVersion']),modelType,temperature=inputFieldsDict['Temperature'],max_token=inputFieldsDict['Max Tokens'])
if searchFlag:
buf = LLM_predict(cloudconfig,instanceid,file_path,hypervisor,mlmodels,usecasename+'_'+str(request.session['ModelVersion']),region,image,inputFieldsDict['Temperature'],inputFieldsDict['Max Tokens'],modelType)
import re
outputStr = buf.split('ModelOutput:')[1]
cachepromptObj.insertRecord(prompt,outputStr,usecasename+'_'+str(request.session['ModelVersion']),modelType,temperature=inputFieldsDict['Temperature'],max_token=inputFieldsDict['Max Tokens'])
else:
outputStr = result
if configSettingsJson['basic']['folderSettings']['fileType'].lower() != 'llm_document':
outputStr = outputStr.split('### '+configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response']+':')[1]
singlePredictionResults = []
singlePredictionsummary=""
Results={}
Results['Response'] = outputStr
singlePredictionResults.append(Results)
else:
context.update(
{'tab': 'tabconfigure', 'error': 'Prediction Error: Instance ID not found ', 'selected': 'prediction',
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,
'ModelVersion': ModelVersion,'mlmodels':mlmodels})
log.info('Predict Instance :' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Prediction Error, Instance ID not found')
return context
else:
try:
import requests
#response = requests.post(ser_url,auth=(aion_service_username,aion_service_password),data=inputFieldsJson,headers={"Content-Type":"application/json",})
response = requests.post(ser_url,data=inputFieldsJson,headers={"Content-Type":"application/json",})
if response.status_code != 200:
outputStr=response.content
context.update({'tab': 'tabconfigure', 'error': outputStr.decode('utf-8'), 'selected': 'prediction'})
log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : '+str(outputStr.decode('utf-8')))
return context
except Exception as inst:
if 'Failed to establish a new connection' in str(inst):
context.update({'tab': 'tabconfigure', 'error': 'AION service need to be started', 'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion})
log.info('Predict Instance :'+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0'+' sec'+' : '+'Error : AION service need to be started, '+str(inst))
return context
else:
context.update({'tab': 'tabconfigure', 'error': 'Prediction Error '+str(inst),'selected': 'prediction', 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion})
log.info('Predict Instance :'+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : Prediction Error, '+str(inst))
return context
outputStr=response.content
outputStr = outputStr.decode('utf-8','ignore')
outputStr = outputStr.strip()
predict_dict = json.loads(str(outputStr))
#print(predict_dict)
singlePredictionsummary=""
if (predict_dict['status'] == 'SUCCESS'):
data = predict_dict['data']
singlePredictionResults = []
Results = {}
if problem_type == 'multiModalLearning':
data = data[0]
Results['prediction'] = data['predict']
singlePredictionResults.append(Results)
if problem_type == 'textSummarization':
data = data[0]
Results['msg'] = predict_dict['msg']
singlePredictionResults.append(Results)
Results['prediction'] = predict_dict['data']
singlePredictionResults.append(Results)
Results1 = {}
Results1['prediction'] = predict_dict['data']
print("prdata------------",predict_dict['data'])
singlePredictionsummary=predict_dict['data']
print("singlePredictionsummary",singlePredictionsummary)
t2 = time.time()
elif problem_type == 'multiLabelPrediction':
prediction = ''
for x in data:
for y in x:
if 'predict' in y:
if prediction != '':
prediction += ','
prediction += str(y)+':'+str(x[y])
Results['prediction'] = prediction
singlePredictionResults.append(Results)
elif problem_type == 'timeSeriesFore |
casting': #task 11997
Results['prediction'] = json.dumps(data)
singlePredictionResults.append(Results)
elif problem_type == 'stateTransition':
if str(data['Anomaly']) == 'False':
Results['prediction'] = 'No Anomaly'
else:
Results['prediction'] = str(data['Remarks'])
singlePredictionResults.append(Results)
elif problem_type.lower() in ['similarityidentification','contextualsearch']:
data = data[0]
prediction = data['prediction']
i = 1
for x in prediction:
te = ''
for y in x:
info = (str(x[y])[:50] + '...') if len(str(x[y])) > 50 else str(x[y])
te += y+': '+info+'\\n\\n'
Results[i] = te
i = i+1
singlePredictionResults.append(Results)
else:
data = data[0]
if 'prediction' in data:
Results['prediction'] = data['prediction']
if 'probability' in data:
Results['probability'] = data['probability']
if 'remarks' in data:
Results['remarks'] = json.loads(data['remarks'])
singlePredictionResults.append(Results)
t2 = time.time()
log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+str(round(t2-t1))+' sec'+' : '+'Success')
else:
context.update({'tab': 'tabconfigure', 'error': 'Prediction Error '+str(predict_dict['message']), 'selected': 'prediction','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion})
log.info('Predict Instance : '+str(selected_use_case) + ' : ' + str(ModelVersion) + ' : '+'0 '+'sec'+' : '+'Error : Prediction Error')
return context
inputFields = []
inputFields.append(inputFieldsDict)
##Below added by sjayaram for llm langkit evaluation metrics Task:17109
prompt_response_results = ''
if problem_type == 'llmFineTuning':
try:
response_msg = outputStr
prompt_msg = prompt
except:
response_msg = ''
prompt_msg = ''
from appbe.evaluate_prompt import evaluate_prompt_response_inputs
final_output_json,prompt_response_results = evaluate_prompt_response_inputs(prompt_msg,response_msg)
#ser_url = service_url.read_service_url_params(request)
#ser_url = ser_url+'predict?usecaseid='+usecasename+'&version='+str(ModelVersion)
context.update({'tab': 'predict','mlmodels':mlmodels,'fineTunedModelType':modelType,'ser_url':ser_url, 'inputFields': inputFields,'singlePredictionResults': singlePredictionResults,'singlePredictionsummary':singlePredictionsummary,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction',
'prompt_response_results':prompt_response_results})
return context
elif submittype.lower() == 'script':
scriptdata="'''\\n"
scriptdata+="* =============================================================================\\n"
scriptdata+="* COPYRIGHT NOTICE\\n"
scriptdata+="* =============================================================================\\n"
scriptdata+="* @ Copyright HCL Technologies Ltd. 2021, 2022, 2023\\n"
scriptdata+="* Proprietary and confidential. All information contained herein is, and\\n"
scriptdata+="* remains the property of HCL Technologies Limited. Copying or reproducing the\\n"
scriptdata+="* contents of this file, via any medium is strictly prohibited unless prior\\n"
scriptdata+="* written permission is obtained from HCL Technologies Limited.\\n"
scriptdata+="'''\\n"
scriptdata+='import sys\\n'
scriptdata+='import json\\n'
scriptdata+='import requests\\n'
scriptdata+='import pandas as pd\\n'
scriptdata+='from pandas import json_normalize\\n'
scriptdata+='ser_url ="'+ser_url+'"\\n\\n'
scriptdata+="def predict(data):\\n"
scriptdata+=" if data.endswith('.tsv'):\\n"
scriptdata+=" df=pd.read_csv(data,encoding='utf-8',encoding_errors= 'replace',sep='\\\\t')\\n"
scriptdata+=" else:\\n"
scriptdata+=" df=pd.read_csv(data,encoding='utf-8',encoding_errors= 'replace')\\n"
scriptdata+=' features = "'+",".join([feature for feature in inputFeaturesList])+'"\\n'
scriptdata+=" features = features.split(',')\\n"
scriptdata+=" df = df[features]\\n"
scriptdata+=" data = df.to_json(orient='records')\\n"
scriptdata+=" try:\\n"
scriptdata+=' response = requests.post(ser_url,data=data,headers={"Content-Type":"application/json",})\\n'
scriptdata+=" if response.status_code == 200:\\n"
scriptdata+=" outputStr=response.content\\n"
scriptdata+=" outputStr = outputStr.decode('utf-8')\\n"
scriptdata+=" outputStr = outputStr.strip()\\n"
scriptdata+=" predict_dict = json.loads(str(outputStr))\\n"
scriptdata+=" print(predict_dict)\\n"
scriptdata+=" except Exception as e:\\n"
scriptdata+=' print(e)\\n'
scriptdata+='\\nif __name__ == "__main__":\\n'
scriptdata+=' predict(sys.argv[1])'
response = HttpResponse()
response['content_type'] = 'text/plain'
response['Content-Disposition'] = 'attachment; filename=prediction.py'
response.write(scriptdata)
return response
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
context.update({'tab': 'tabconfigure', 'error': 'Failed To perform prediction','selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ModelVersion': ModelVersion, 'selected': 'prediction'})
log.info('Predict Instance :' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + ' 0 ' + 'sec' + ' : ' + 'Error : Failed To perform prediction, '+ str(inst))
return context
<s> import os
import openai
from langchain.llms import AzureOpenAI
from sentence_transformers.SentenceTransformer import SentenceTransformer
import time
import datetime
import pandas as pd
import sys
import subprocess
import importlib
from appbe.aion_config import get_llm_data
from appbe.dataPath import DATA_FILE_PATH
remote_data_dir = "/home/aion/data/storage/llm_testing_data"
remote_data_processeddata_dir = '/home/aion/data/storage/processed_data'
remote_config_dir = '/home/aion/data/config'
sh_file_path = '/home/aion/llm/sbin/llm_testing.sh'
prompt_command = '/home/aion/llm/sbin/llm_testing.sh'
PRE_CONTEXT = "Answer the following question in a concise manner.\\n"
DEFAULT_PARAMS = {
'OPENAI_API_TYPE' : "azure",
'OPENAI_API_BASE' : "",
'OPENAI_API_KEY' : "",
'OPENAI_API_VERSION' : "2023-03-15-preview"
}
faq=""
def getAMIDetails(config,selectedAMI):
y = {}
for x in config:
print(x)
if x['id'] == selectedAMI:
return x
return y
class test_LLM():
def __init__(self,
deployment_name='Text-Datvinci-03', params=DEFAULT_PARAMS, transformer=None,
sentence_txfr_model='sentence-transformers/paraphrase-mpnet-base-v2'):
self.deployment_name=deployment_name
self.set_params( params)
self.transformer = transformer
self.sentence_txfr_model = sentence_txfr_model
def fiddlerAuditorCheck(self):
status = importlib.util.find_spec('auditor')
if not status:
subprocess.check_call([sys.executable, "-m", "pip","uninstall", "-q","-y","notebook"])
subprocess.check_call([sys.executable, "-m", "pip", "install","-q", "notebook==6.4.5" ])
subprocess.check_call([sys.executable, "-m", "pip", "install","-q","fiddler-auditor==0.0.2"])
subprocess.check_call([sys.executable, "-m", "pip", "install","-q","notebook==7.0.2"])
status = importlib.util.find_spec('auditor')
return status
def set_params(self, params={}):
valid_params = ['OPENAI_API_TYPE','OPENAI_API_KEY','OPENAI_API_BASE','OPENAI_API_VERSION']
for key, value in params.items():
if 'OPENAI_API_TYPE' == key:
openai.api_type = value
os.environ['OPENAI_API_TYPE'] = openai.api_type
elif 'OPENAI_API_KEY' == key:
openai.api_key = value
os.environ['OPENAI_API_KEY'] = openai.api_key
elif 'OPENAI_API_BASE' == key:
openai.api_base = value
os.environ['OPENAI_API_BASE'] = openai.api_base
elif key in valid_params:
os.environ[key] = value
def run(self,modelName, temperature, similarity_threshold, perturbations_per_sample, prompts, reference_generation,pre_context=PRE_CONTEXT):
if not self.fiddlerAuditorCheck():
raise ValueError('Fiddler-auditor is not instlled "python -m pip install fiddler-auditor==0.0.2"')
openai_llm = AzureOpenAI(deployment_name=self.deployment_name, temperature=temperature, openai_api_key=openai.api_key)
from auditor.perturbations import Paraphrase
from auditor.evaluation.expected_behavior import SimilarGeneration
from auditor.evaluation.evaluate import LLMEval
# For Azure OpenAI, it might be the case the api_version for chat completion
# is different from the base model so we need to set that parameter as well.
if self.transformer:
azure_perturber = self.transformer
else:
azure_perturber = Paraphrase(
model="GPT-35-Turbo",
api_version="2023-03-15-preview",
num_perturbations=perturbations_per_sample,
)
sent_xfmer = SentenceTransformer(self.sentence_txfr_model)
similar_generation = SimilarGeneration(
similarity_model=sent_xfmer,
similarity_threshold=similarity_threshold,)
llm_eval = LLMEval(
llm=openai_llm,
expected_behavior=similar_generation,
transformation=azure_perturber,)
test_result = llm_eval.evaluate_prompt_correctness(
prompt=prompts,
pre_context=pre_context,
reference_generation=reference_generation,
perturbations_per_sample=perturbations_per_sample
)
return test_result
def runmultiple(self,modelName, temperature, similarity_threshold, perturbations_per_sample, prompts, reference_generation,pre_context=PRE_CONTEXT,faq=faq):
if not self.fiddlerAuditorCheck():
raise ValueError('Fiddler-auditor is not instlled "python -m pip install fiddler-auditor==0.0.2"')
from auditor.evaluation.expected_behavior import SimilarGeneration
from auditor.evaluation.evaluate import LLMEval
openai_llm = AzureOpenAI(deployment_name=self.deployment_name, temperature=temperature, openai_api_key=openai.api_key)
from auditor.perturbations import Paraphrase
# For Azure OpenAI, it might be the case the api_version for chat completion
# is different from the base model so we need to set that parameter as well.
if self.transformer:
azure_perturber = self.transformer
else:
azure_perturber = Paraphrase(
model="GPT-35-Turbo",
api_version="2023-03-15-preview",
num_perturbations=perturbations_per_sample,
)
sent_xfmer = SentenceTransformer(self.sentence_txfr_model)
similar_generation = SimilarGeneration(
similarity_model=sent_xfmer,
similarity_threshold=similarity_threshold,)
llm_eval = LLMEval(
llm=openai_llm,
expected_behavior=similar_generation,
transformation=azure_perturber,)
rows = faq.shape[0]
prompts = list(faq['Question'])
listofDf = []
for i in range(rows):
test_result = llm_eval.evaluate_prompt_robustness(
prompt=prompts[i],
pre_context=pre_context,
)
try:
now = datetime.datetime.now().strftime("%H%M%S")
name = str(i)+str(now)+'.html'
test_result.save(name)
df_iter=pd.read_html(name)
df_actual = df_iter[0]
listofDf.append(df_actual)
except:
pass
perturbatedDF = pd.concat(listofDf)
return perturbatedDF
def run_offline_model(self, usecasename,modelName, temperature, similarity_threshold, perturbations_per_sample, reference_generation, prompts,isfinetuned):
from appbe.compute import readComputeConfig
from appbe.prediction import get_instance
cloud_infra = readComputeConfig()
dataFile = os.path.join(DATA_FILE_PATH, 'prompt.csv')
remoteFile = os.path.join(remote_data_dir, 'prompt.csv')
|
if not reference_generation:
reference_generation = ''
prompt = pd.DataFrame([{'prompts':prompts, 'reference_generation':reference_generation}])
prompt.to_csv(dataFile, index=False)
hypervisor, instanceid, region, image = get_instance(usecasename)
key, url, api_type, api_version = get_llm_data()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
LLM_DIR = os.path.normpath(os.path.join(currentDirectory, '..', 'llm'))
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import start_instance
# print(aws_access_key_id, aws_secret_key, instanceid, region)
status, msg, ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region)
if status.lower() == 'success':
pem_file = os.path.join(LLM_DIR, amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
# cope file to server for sinfle prompt
from AION.llm.ssh_command import copy_files_to_server
copy_files_to_server(ip,pem_file,dataFile,'',username,'',remote_data_dir,remote_config_dir)
if isfinetuned:
command = prompt_command + ' ' + usecasename + ' ' + str(modelName) \\
+ ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\
+ str(perturbations_per_sample) + \\
' '+ str(key) + \\
' '+ str(url) + \\
' '+ str(api_type) + \\
' '+ str(api_version)+ \\
' '+ str("single")
else:
command = prompt_command + ' ' + 'BaseModel' + ' ' + str(modelName) \\
+ ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\
+ str(perturbations_per_sample) + \\
' '+ str(key) + \\
' '+ str(url) + \\
' '+ str(api_type) + \\
' '+ str(api_version)+ \\
' '+ str("single")
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
print(buf)
return buf
def run_multiple_offline_model(self, usecasename,modelName, temperature, similarity_threshold, perturbations_per_sample, faq,isfinetuned):
dataFile = os.path.join(DATA_FILE_PATH, 'prompt.csv')
remoteFile = os.path.join(remote_data_dir, 'prompt.csv')
faq.to_csv(dataFile, index=False)
print("This is done")
from appbe.compute import readComputeConfig
from appbe.prediction import get_instance
cloud_infra = readComputeConfig()
hypervisor, instanceid, region, image = get_instance(usecasename)
key, url, api_type, api_version = get_llm_data()
if hypervisor == 'AWS':
aws_access_key_id = cloud_infra['awsCredentials']['accessKey']
aws_secret_key = cloud_infra['awsCredentials']['secretAccessKey']
currentDirectory = os.path.dirname(os.path.abspath(__file__))
LLM_DIR = os.path.normpath(os.path.join(currentDirectory, '..', 'llm'))
if image != '' and image != 'NA':
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['amis'], image)
else:
amiDetails = getAMIDetails(cloud_infra['AWS_EC2']['instances'], instanceid)
if region == '' or region == 'NA':
region = amiDetails['regionName']
from llm.aws_instance_api import start_instance
# print(aws_access_key_id, aws_secret_key, instanceid, region)
status, msg, ip = start_instance(aws_access_key_id, aws_secret_key, instanceid, region)
if status.lower() == 'success':
pem_file = os.path.join(LLM_DIR, amiDetails['ssh']['keyFilePath'])
username = amiDetails['ssh']['userName']
#print(ip,pem_file,promptfile,'',username,'',remote_data_dir,remote_config_dir)
from AION.llm.ssh_command import copy_files_to_server
copy_files_to_server(ip,pem_file,dataFile,'',username,'',remote_data_dir,remote_config_dir)
if isfinetuned:
command = prompt_command + ' ' + usecasename + ' ' + str(modelName) \\
+ ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\
+ str(perturbations_per_sample) + \\
' '+ str(key) + \\
' '+ str(url) + \\
' '+ str(api_type) + \\
' '+ str(api_version)+ \\
' '+ str("multiple")
else:
command = prompt_command + ' ' + 'BaseModel' + ' ' + str(modelName) \\
+ ' ' + str(temperature) + ' ' + str(similarity_threshold) + ' ' \\
+ str(perturbations_per_sample) + \\
' '+ str(key) + \\
' '+ str(url) + \\
' '+ str(api_type) + \\
' '+ str(api_version)+ \\
' '+ str("multiple")
from llm.ssh_command import run_ssh_cmd
buf = run_ssh_cmd(ip, pem_file, username, '', '', command)
print(buf)
return buf
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import csv
import logging
import pandas as pd
class csv_validator:
def __init__(self):
self.log = logging.getLogger('eion')
def __text_header(self, filename, threshold=0.75):
df = pd.read_csv(filename, header=None,nrows=1000)
numeric_columns = df.dtypes[df.dtypes != object]
if not len(numeric_columns):
first_row_len = df.iloc[0].str.len()
index = 0
for c in df:
if (df[c].map(len).mean() * threshold <= first_row_len[index]):
return False
index += 1
return True
return False
def validate_header(self, filename,delimiter,textqualifier,threshold=0.75):
with open(filename, 'rt',encoding='utf-8') as csvfile:
has_header = csv.Sniffer().has_header(csvfile.read(8192))
csvfile.seek(0)
if not has_header:
has_header = self.__text_header(filename, threshold)
reader = csv.reader(csvfile, delimiter=delimiter,quotechar=textqualifier)
good_csv = True
col_len = len(next(reader))
bad_lines = []
offset = 2 # +1 for first read and +1 for python index start at 0
for index, row in enumerate(reader):
if len(row) != col_len:
good_csv = False
if(index == 1 and has_header):
offset += 1
bad_lines.append(index + offset)
return has_header, good_csv, bad_lines
if __name__ == '__main__':
import sys
val = csv_validator()
print(val.validate_header(sys.argv[1]))
<s> import json
import os
import random
import time
from avro.datafile import DataFileReader
from avro.io import DatumReader
from pyarrow.parquet import ParquetFile
from snorkel.labeling.model import LabelModel
from snorkel.labeling import PandasLFApplier, LFAnalysis
import pandas as pd
import pandavro as pdx
import pyarrow as pa
import numpy as np
import platform
from os.path import expanduser
home = expanduser("~")
if platform.system() == 'Windows':
DATA_FILE_PATH = os.path.join(home,'AppData','Local','Programs','HCLTech','AION','data','storage')
else:
DATA_FILE_PATH = os.path.join(home,'HCLT','AION','data')
def get_join(condition):
if condition["join"] == 'and':
return "&"
elif condition["join"] == 'or':
return "|"
else:
return ""
def create_labelling_function(rule_list, label_list):
lfs_main_func = 'def lfs_list_create():\\n'
lfs_main_func += '\\tfrom snorkel.labeling import labeling_function\\n'
lfs_main_func += '\\timport numpy as np\\n'
lfs_main_func += '\\timport json\\n'
lfs_main_func += '\\tABSTAIN = -1\\n'
lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n'
lfs_list = '\\tlfs_list=['
for rule in rule_list:
lfs_list += 'lf_' + rule["rule_name"] + ','
lfs = '\\t@labeling_function()\\n'
lfs += '\\tdef lf_' + rule["rule_name"] + '(data):\\n'
lfs += '\\t\\treturn np.where('
for condition in rule["conditions"]:
if "string" in condition["sel_datatype"]:
if condition["sel_condition"] in ["==", "!="]:
cond_statement = '(data["' + condition["sel_column"] + '"]' + condition[
"sel_condition"] + '("' + str(condition["input_value"]) + '"))' + get_join(condition)
else:
cond_statement = '(data["' + condition["sel_column"] + '"].' + condition[
"sel_condition"] + '("' + str(condition["input_value"]) + '"))' + get_join(condition)
else:
cond_statement = '(data["' + condition["sel_column"] + '"]' + condition["sel_condition"] + \\
str(condition["input_value"]) + ')' + get_join(condition)
lfs += cond_statement
lfs += ', labels.index("' + rule["label"] + '"), ABSTAIN)\\n'
lfs_main_func += lfs
if lfs_list.endswith(","):
lfs_list = lfs_list.rstrip(lfs_list[-1])
lfs_list += ']\\n'
else:
lfs_list += ']\\n'
lfs_main_func += lfs_list
lfs_main_func += '\\treturn lfs_list\\n'
lfs_main_func += 'lfs_list_create()'
f = open(os.path.join(DATA_FILE_PATH, 'lfs_list.txt'), 'w')
f.write(lfs_main_func)
f.close()
return lfs_main_func
def label_dataset(rule_list, file_ext, label_list, not_satisfy_label):
file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext)
if file_ext in ["csv", "tsv"]:
df = pd.read_csv(file_path)
elif file_ext == "json":
df = pd.json_normalize(pd.read_json(file_path).to_dict("records"))
elif file_ext == "avro":
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
df = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
elif file_ext == "parquet":
df = pd.read_parquet(file_path, engine="pyarrow")
labelling_functions = create_labelling_function(rule_list, label_list)
exec(labelling_functions)
lfs = eval('lfs_list_create()')
applier = PandasLFApplier(lfs)
l_data = applier.apply(df)
label_model = LabelModel(cardinality=len(label_list) + 1, verbose=True)
label_model.fit(l_data, n_epochs=500, log_freq=50, seed=123)
df["label"] = label_model.predict(L=l_data, tie_break_policy="abstain")
df.loc[df["label"] == -1, "label"] = not_satisfy_label
for item in label_list:
df.loc[df["label"] == label_list.index(item), "label"] = item
if file_ext in ["csv", "tsv"]:
df.to_csv(os.path.join(DATA_FILE_PATH, "result_file." + file_ext), index=False)
elif file_ext == "parquet":
df.to_parquet(os.path.join(DATA_FILE_PATH, "result_file." + file_ext),
engine="pyarrow", index=False)
elif file_ext == "avro":
pdx.to_avro(os.path.join(DATA_FILE_PATH, "result_file." + file_ext), df)
else:
raise ValueError("Invalid file format")
num_records = len(df.index)
size_take = 100
if num_records <= size_take:
size_take = num_records
display_df = df.sample(n=size_take)
return display_df.to_html(classes='table table-striped text-left', justify='left', index=False)
def create_sample_function(rule, label_list, not_satisfy_label):
lfs_main_func = 'def lf_rule_apply(data |
):\\n'
lfs_main_func += '\\timport numpy as np\\n'
lfs_main_func += '\\tABSTAIN = -1\\n'
lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n'
lfs = '\\treturn np.where('
for condition in rule["conditions"]:
if "string" in condition["sel_datatype"]:
if condition["sel_condition"] in ["==", "!="]:
cond_statement = '(data["' + condition["sel_column"] + '"]' + condition["sel_condition"] + '("' + str(
condition["input_value"]) + '"))' + get_join(condition)
else:
cond_statement = '(data["' + condition["sel_column"] + '"].str.' + condition[
"sel_condition"] + '("' + str(condition["input_value"]) + '"))' + get_join(condition)
print(cond_statement)
else:
cond_statement = '(data["' + condition["sel_column"] + '"]' + condition["sel_condition"] + \\
str(condition["input_value"]) + ')' + get_join(condition)
lfs += cond_statement
lfs += ', "' + rule["label"] + '", "' + not_satisfy_label + '")\\n'
lfs_main_func += lfs
return lfs_main_func
def get_sample_result_of_individual_rule(rule_json, file_ext, label_list, not_satisfy_label):
file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext)
size_take = 100
if file_ext in ["csv", "tsv"]:
num_records = sum(1 for line in open(file_path)) - 1
if num_records > size_take:
skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take))
else:
skip = 0
df = pd.read_csv(file_path, skiprows=skip)
elif file_path.endswith(".json"):
df = pd.read_json(file_path)
df = pd.json_normalize(df.to_dict("records"))
elif file_path.endswith(".avro"):
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
df = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
elif file_path.endswith(".parquet"):
pf = ParquetFile(file_path)
take_rows = next(pf.iter_batches(batch_size=size_take))
df = pa.Table.from_batches([take_rows]).to_pandas()
# file_content = pd.read_parquet(file_path, engine="pyarrow")
else:
raise ValueError("Invalid file format")
rule_applier_func = create_sample_function(rule_json, label_list, not_satisfy_label)
exec(rule_applier_func)
df[rule_json["rule_name"]] = eval('lf_rule_apply')(df)
return df.to_html(classes='table table-striped text-left', justify='left', index=False)
def create_sample_function_ver2(rule_json, label_list, not_satisfy_label):
lfs_main_func = 'def lf_rule_apply(data):\\n'
lfs_main_func += '\\timport numpy as np\\n'
lfs_main_func += '\\tABSTAIN = -1\\n'
lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n'
counter = 0
for condition in rule_json["conditions"]:
lfs_return = condition["sel_label"]
if counter > 0:
lfs_return_condition = '\\telif'
else:
lfs_return_condition = '\\tif'
for label_condition in condition["label_condition"]:
if label_condition["sel_datatype"] == "string":
if label_condition["sel_condition"] == "contains":
lfs_return_condition += '((' + str(label_condition["input_value"]) + ') in data["' + \\
label_condition["sel_column"] + '"])' + get_join(label_condition)
elif label_condition["sel_condition"] in ["==", "!="]:
lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[
"sel_condition"] + '("' + str(
label_condition["input_value"]) + '"))' + get_join(label_condition)
else:
lfs_return_condition += '(data["' + label_condition["sel_column"] + '"].' + label_condition[
"sel_condition"] + '("' + str(label_condition["input_value"]) + '"))' + get_join(
label_condition)
else:
lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[
"sel_condition"] + str(label_condition["input_value"]) + ')' + get_join(label_condition)
if get_join(label_condition) == "":
lfs_return_condition += ":\\n"
lfs_return_condition += '\\t\\treturn "' + lfs_return + '"\\n'
lfs_main_func += lfs_return_condition
counter += 1
lfs_return_condition = '\\n\\telse:\\n'
lfs_return_condition += '\\t\\treturn "' + not_satisfy_label + '"'
lfs_main_func += lfs_return_condition
return lfs_main_func
def get_sample_result_of_individual_rule_ver2(rule_json, file_ext, label_list, not_satisfy_label):
file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext)
size_take = 100
if file_ext in ["csv", "tsv"]:
num_records = sum(1 for line in open(file_path)) - 1
if num_records > size_take:
skip = sorted(random.sample(range(1, num_records + 1), num_records - size_take))
else:
skip = 0
df = pd.read_csv(file_path, skiprows=skip)
elif file_path.endswith(".json"):
df = pd.read_json(file_path)
df = pd.json_normalize(df.to_dict("records"))
elif file_path.endswith(".avro"):
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
df = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
elif file_path.endswith(".parquet"):
pf = ParquetFile(file_path)
take_rows = next(pf.iter_batches(batch_size=size_take))
df = pa.Table.from_batches([take_rows]).to_pandas()
# file_content = pd.read_parquet(file_path, engine="pyarrow")
else:
raise ValueError("Invalid file format")
rule_applier_func = create_sample_function_ver2(rule_json, label_list, not_satisfy_label)
exec(rule_applier_func)
df[rule_json["rule_name"]] = df.apply(eval('lf_rule_apply'), axis=1)
return df.to_html(classes='table table-striped text-left', justify='left', index=False)
def create_labelling_function_ver2(rule_list, label_list):
lfs_main_func = 'def lfs_list_create():\\n'
lfs_main_func += '\\tfrom snorkel.labeling import labeling_function\\n'
lfs_main_func += '\\timport numpy as np\\n'
lfs_main_func += '\\timport json\\n'
lfs_main_func += '\\tABSTAIN = -1\\n'
lfs_main_func += '\\tlabels = json.loads(json.dumps(' + json.dumps(label_list) + '))\\n'
lfs_list = '\\tlfs_list=['
for rule in rule_list:
lfs_list += 'lf_' + rule["rule_name"] + ','
lfs = '\\t@labeling_function()\\n'
lfs += '\\tdef lf_' + rule["rule_name"] + '(data):\\n'
counter = 0
for condition in rule["conditions"]:
lfs_return = 'labels.index("' + condition["sel_label"] + '")'
if counter > 0:
lfs_return_condition = '\\t\\telif'
else:
lfs_return_condition = '\\t\\tif'
for label_condition in condition["label_condition"]:
if label_condition["sel_datatype"] == "string":
if label_condition["sel_condition"] == "contains":
lfs_return_condition += '((' + str(label_condition["input_value"]) + ') in data["' + \\
label_condition["sel_column"] + '"])' + get_join(label_condition)
elif label_condition["sel_condition"] in ["==", "!="]:
lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[
"sel_condition"] + '("' + str(
label_condition["input_value"]) + '"))' + get_join(label_condition)
else:
lfs_return_condition += '(data["' + label_condition["sel_column"] + '"].' + label_condition[
"sel_condition"] + '("' + str(label_condition["input_value"]) + '"))' + get_join(
label_condition)
else:
lfs_return_condition += '(data["' + label_condition["sel_column"] + '"]' + label_condition[
"sel_condition"] + str(label_condition["input_value"]) + ')' + get_join(label_condition)
if get_join(label_condition) == "":
lfs_return_condition += ":\\n"
lfs_return_condition += '\\t\\t\\treturn ' + lfs_return + '\\n'
lfs += lfs_return_condition
counter += 1
lfs_return_condition = '\\n\\t\\telse:\\n'
lfs_return_condition += '\\t\\t\\treturn ABSTAIN\\n'
lfs += lfs_return_condition
lfs_main_func += lfs
if lfs_list.endswith(","):
lfs_list = lfs_list.rstrip(lfs_list[-1])
lfs_list += ']\\n'
else:
lfs_list += ']\\n'
lfs_main_func += lfs_list
lfs_main_func += '\\treturn lfs_list\\n'
lfs_main_func += 'lfs_list_create()'
# f = open(os.path.join(DATA_FILE_PATH, 'lfs_list.txt'), 'w')
# f.write(lfs_main_func)
# f.close()
return lfs_main_func
def get_rule_name_list(rule_list):
rule_name_list = []
for rule in rule_list:
rule_name_list.append(rule["rule_name"])
return rule_name_list
def label_dataset_ver2(request,rule_list, file_ext, label_list, not_satisfy_label, label_weightage, include_proba):
file_path = os.path.join(DATA_FILE_PATH, "uploaded_file." + file_ext)
if file_ext in ["csv", "tsv"]:
df = pd.read_csv(file_path)
elif file_ext == "json":
df = pd.json_normalize(pd.read_json(file_path).to_dict("records"))
elif file_ext == "avro":
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
df = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
elif file_ext == "parquet":
df = pd.read_parquet(file_path, engine="pyarrow")
labelling_functions = create_labelling_function_ver2(rule_list, label_list)
exec(labelling_functions)
lfs = eval('lfs_list_create()')
applier = PandasLFApplier(lfs)
l_data = applier.apply(df)
label_model = LabelModel(cardinality=len(label_list), verbose=True)
label_model.fit(l_data, n_epochs=500, log_freq=50, seed=123, class_balance=label_weightage)
df["label"] = label_model.predict(L=l_data, tie_break_policy="abstain")
if include_proba:
prediction_of_prob = label_model.predict_proba(L=l_data)
for label in label_list:
df[label + "_prob"] = np.around(prediction_of_prob[:, label_list.index(label)], 2) * 100
df.loc[df["label"] == -1, "label"] = not_satisfy_label
filetimestamp = str(int(time.time()))
datasetName = "AION_labelled_"+filetimestamp + '.' + file_ext
request.session['AION_labelled_Dataset'] = datasetName
for item in label_list:
df.loc[df["label"] == label_list.index(item), "label"] = item
if file_ext in ["csv", "tsv"]:
df.to_csv(os.path.join(DATA_FILE_PATH, datasetName), index=False)
elif file_ext == "parquet":
df.to_parquet(os.path.join(DATA_FILE_PATH, datasetName),
engine="pyarrow", index=False)
elif file_ext == "avro":
pdx.to_avro(os.path.join(DATA_FILE_PATH, datasetName), df)
else:
raise ValueError("Invalid file format")
#### saving file to database
from appbe.dataPath import DATA_DIR
from appbe.sqliteUtility import sqlite_db
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
newdata = {}
newdata['datapath'] = [os.path.join(DATA_FILE_PATH, datasetName)]
newdata['datasetname'] = [datasetName]
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata), 'dataingest')
num_records = len(df.index)
size_take = 100
if num_records <= size_take:
|
size_take = num_records
display_df = df.sample(n=size_take)
weightage = np.around(label_model.get_weights(), 2)
rule_name_list = get_rule_name_list(rule_list)
analysis_df = LFAnalysis(l_data, lfs).lf_summary()
analysis_df["Rule"] = analysis_df.index
analysis_df["Rule"] = analysis_df["Rule"].str.replace("lf_", "")
analysis_df = analysis_df[["Rule", "Polarity", "Coverage", "Overlaps", "Conflicts"]]
weightage_dict = dict(zip(rule_name_list, weightage))
analysis_json = analysis_df.to_dict(orient="records")
for item in analysis_json:
item["Weightage"] = weightage_dict[item["Rule"]]
analysis_df = pd.json_normalize(analysis_json)
# rules_weightage = []
# for key in weightage_dict:
# rules_weightage.append({
# "label": key,
# "y": weightage_dict[key],
# "legendText": key
# })
response = {
# "rule_name_list": rule_name_list,
# "weightage_list": list(weightage),
"analysis_df": analysis_df.to_html(classes='table table-striped text-left', justify='left', index=False),
"result_html": display_df.to_html(classes='table table-striped text-left', justify='left', index=False)
}
return response
def get_label_and_weightage(test_file_ext, marked_label_column,file_delim_test, custom_test_delim ):
file_path = os.path.join(DATA_FILE_PATH, "test_data_file." + test_file_ext)
if test_file_ext in ["csv", "tsv"]:
df = pd.read_csv(file_path)
elif test_file_ext == "json":
df = pd.json_normalize(pd.read_json(file_path).to_dict("records"))
elif test_file_ext == "avro":
reader = DataFileReader(open(file_path, "rb"), DatumReader())
schema = json.loads(reader.meta.get('avro.schema').decode('utf-8'))
df = pdx.read_avro(file_path, schema=schema, na_dtypes=True)
elif test_file_ext == "parquet":
df = pd.read_parquet(file_path, engine="pyarrow")
json_df = pd.DataFrame(df[marked_label_column].value_counts(normalize=True) * 100)
json_dict = json.loads(json_df.to_json())
label_with_weightage = []
for k in json_dict[marked_label_column]:
label_with_weightage.append(
{"label_name": k, "label_weightage": np.around(json_dict[marked_label_column][k], 2)})
return label_with_weightage
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import time
from pathlib import Path
import logging
from datetime import datetime as dt
class logg():
from appbe.dataPath import LOG_LOCATION
def __init__(self, LOG_LOCATION):
self.log_location = LOG_LOCATION
def create_log(self,version):
log_file_path = Path(self.log_location)
log_file_path.mkdir(parents=True, exist_ok=True)
time_stamp = dt.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S')
fileName='log_ux_'+time_stamp+'.log'
filehandler = logging.FileHandler(log_file_path/fileName, 'a','utf-8')
formatter = logging.Formatter('%(asctime)s %(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('log_ux')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('********** AION_'+str(version)+' **********')
return log<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
def read_service_url_params(request):
hosturl =request.get_host()
url='http://'+hosturl+'/api/'
return url
def read_monitoring_service_url_params(request):
hosturl =request.get_host()
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config'))
file = open(file_path, "r")
data = file.read()
file.close()
service_url = '127.0.0.1'
service_port='60050'
for line in data.splitlines():
if 'aion_service_url=' in line:
service_url= line.split('=',1)[1]
if 'aion_service_port=' in line:
service_port= line.split('=',1)[1]
url='http://'+hosturl+'/api/'
return url
def read_performance_service_url_params(request):
hosturl =request.get_host()
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config'))
file = open(file_path, "r")
data = file.read()
file.close()
service_url = '127.0.0.1'
service_port='60050'
for line in data.splitlines():
if 'aion_service_url=' in line:
service_url= line.split('=',1)[1]
if 'aion_service_port=' in line:
service_port= line.split('=',1)[1]
url='http://'+hosturl+'/api/'
return url
def read_pattern_anomaly_url_params(request):
hosturl =request.get_host()
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config'))
file = open(file_path, "r")
data = file.read()
file.close()
service_url = '127.0.0.1'
service_port='60050'
for line in data.splitlines():
if 'aion_service_url=' in line:
service_url= line.split('=',1)[1]
if 'aion_service_port=' in line:
service_port= line.split('=',1)[1]
url='http://'+hosturl+'/api/pattern_anomaly_predict/'
return url
def read_pattern_anomaly_setting_url_params(request):
hosturl =request.get_host()
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','aion.config'))
file = open(file_path, "r")
data = file.read()
file.close()
service_url = '127.0.0.1'
service_port='60050'
for line in data.splitlines():
if 'aion_service_url=' in line:
service_url= line.split('=',1)[1]
if 'aion_service_port=' in line:
service_port= line.split('=',1)[1]
url='http://'+hosturl+'/api/pattern_anomaly_settings/'
return url<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import shutil
import subprocess
import sys
import glob
from pathlib import Path
import json
from django.http import FileResponse
from django.http import HttpResponse
from importlib.metadata import version
COMMON_PACKAGES = "'setuptools >=62.3.0','pandas==1.5.3','numpy==1.24.2','joblib==1.2.0','Cython==0.29.33','scipy==1.10.1',' scikit-learn==1.2.1','word2number==1.1','category_encoders==2.6.0'"
DL_COMMON_PACKAGE = "'tensorflow==2.11.0'"
TEXT_PACKAGES = "'spacy==3.5.0','nltk==3.8.1','textblob==0.15.3','demoji==1.1.0','bs4==0.0.1','text-unidecode==1.3','pyspellchecker==0.6.2','contractions==0.1.73','protobuf==3.19.6','lxml'"
def createPackagePackage(request,id,version,usecasedetails,Existusecases):
from appbe.pages import get_usecase_page
#print('2')
usecasedetail = usecasedetails.objects.get(id=id)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version)
modelid = models[0].id
p = Existusecases.objects.get(id=modelid)
deploymentfolder = str(p.DeployPath)
modelname = p.ModelName.usecaseid
version = p.Version
deployed_code = 'AION'
dockerimage = os.path.join(deploymentfolder,'publish','docker_image')
dockersetup = os.path.join(deploymentfolder,'publish','docker_setup')
tempPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'temp_'+modelname+'_'+str(version))
try:
shutil.rmtree(tempPath,ignore_errors=True)
except:
pass
shutil.copytree(deploymentfolder,tempPath)
shutil.rmtree(os.path.join(tempPath,'publish'), ignore_errors=True)
try:
Path(os.path.join(deploymentfolder,'publish')).mkdir(parents=True, exist_ok=True)
os.mkdir(dockersetup)
except:
shutil.rmtree(dockersetup,ignore_errors=True)
os.mkdir(dockersetup)
try:
os.mkdir(dockerimage)
except:
shutil.rmtree(dockerimage,ignore_errors=True)
os.mkdir(dockerimage)
shutil.copytree(tempPath, os.path.join(dockersetup,deployed_code))
shutil.rmtree(tempPath)
docker_setup = os.path.join(dockersetup,'AION')
try:
os.mkdir(dockerimage)
except:
pass
requirementfilename = os.path.join(dockersetup,'requirements.txt')
installfilename = os.path.join(dockersetup,'install.py')
dockerfile = os.path.join(dockersetup,'Dockerfile')
dockerdata='FROM python:3.10-slim-buster'
dockerdata+='\\n'
dockerdata+='WORKDIR /app'
dockerdata+='\\n'
dockerdata+='COPY AION AION'
dockerdata+='\\n'
dockerdata+='''RUN apt-get update \\
&& apt-get install -y build-essential manpages-dev \\
&& apt-get install -y libgomp1 \\
&& python -m pip install --no-cache-dir -r AION/requirements.txt
'''
f = open(dockerfile, "w")
f.write(str(dockerdata))
f.close()
try:
try:
import docker
client = docker.from_env()
client.containers.list()
except:
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = 'Error'
context['Msg'] = 'Docker should be installed and running on your machine. To build the docker image manually, the setup script is available at the following location: \\\\n'+dockersetup.replace('\\\\', '/')
return context
command = 'docker pull python:3.10-slim-buster'
os.system(command);
subprocess.check_call(["docker", "build", "-t",modelname.lower()+":"+str(version),"."], cwd=dockersetup)
subprocess.check_call(["docker", "save", "-o",modelname.lower()+"_"+str(version)+".tar",modelname.lower()+":"+str(version)], cwd=dockersetup)
dockerfilepath = os.path.join(dockersetup,modelname.lower()+"_"+str(version)+".tar")
shutil.copyfile(dockerfilepath, os.path.join(dockerimage,modelname.lower()+"_"+str(version)+".tar"))
shutil.rmtree(dockersetup)
msg = 'Done'
Status = 'SUCCESS'
except Exception as e:
msg = 'Error in docker images creation. To build manually docker image setup available in following location: '+dockersetup.replace('\\\\', '\\\\\\\\')
Status = 'Fail'
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = Status
context['Msg'] = msg
return context
def downloadPackage(request,id,version,usecasedetails,Existusecases):
try:
if 'downloadstatus' in request.session:
if request.session['downloadstatus'] == 'Downloading':
return HttpResponse(json.dumps("Error Creating Package"), content_type="application/error")
request.session['downloadstatus'] = 'Downloading'
usecasedetail = usecasedetails.objects.get(id=id)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status=' |
SUCCESS',Version=version)
modelid = models[0].id
p = Existusecases.objects.get(id=modelid)
deployPath = str(p.DeployPath)
if os.path.isdir(os.path.join(deployPath,'publish','package')):
for f in os.listdir(os.path.join(deployPath,'publish','package')):
if f.endswith('whl'):
os.remove(os.path.join(deployPath,'publish','package',f))
usecasename = p.ModelName.usecaseid
Version = p.Version
deployed_code = usecasename
targetname = usecasename+'_'+str(Version)
whl_dir_name = 'WHEEL_'+usecasename+'_'+str(Version)
deployLocation = os.path.join (deployPath,'..',whl_dir_name)
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
os.makedirs(deployLocation)
shutil.copytree(deployPath,os.path.join(deployLocation,deployed_code))
initstring = 'import os'
initstring += '\\n'
initstring += 'import sys'
initstring += '\\n'
initstring += 'sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))'
filename = os.path.join(deployLocation,deployed_code,'__init__.py')
f = open(filename, "w")
f.write(str(initstring))
f.close()
textdata=0
learner_type = 'ml'
requirementfile = os.path.join(deployPath,'requirements.txt')
install_requires = ''
if os.path.exists(requirementfile):
fileobj = open(requirementfile, 'r')
requirePackages = fileobj.readlines()
fileobj.close()
for package in requirePackages:
if install_requires != '':
install_requires = install_requires+','
install_requires = install_requires+'\\''+package.strip()+'\\''
setup_string = 'from setuptools import setup,find_packages'
setup_string += '\\n'
setup_string += 'setup(name=\\''+deployed_code+'\\','
setup_string += '\\n'
setup_string += 'version=\\'1\\','
setup_string += '\\n'
setup_string += 'packages = find_packages(),'
setup_string += '\\n'
setup_string += 'install_requires = ['+install_requires+'],'
setup_string += '\\n'
setup_string += 'package_data={"'+deployed_code+'.pytransform":["*.*"],"'+deployed_code+'":["*.sav","*.json"],"":["*","*/*","*/*/*"]}'
setup_string += '\\n'
setup_string += ')'
filename = os.path.join(deployLocation,'setup.py')
f = open(filename, "w")
f.write(str(setup_string))
f.close()
subprocess.check_call([sys.executable, "setup.py", "bdist_wheel"], cwd=deployLocation)
shutil.copytree(os.path.join(deployLocation,'dist'),os.path.join(deployPath,'publish','package'),dirs_exist_ok=True)
shutil.rmtree(deployLocation)
if os.path.isdir(os.path.join(deployPath,'publish','package')):
for f in os.listdir(os.path.join(deployPath,'publish','package')):
if f.endswith('whl'):
package = f
zip_file = open(os.path.join(deployPath,'publish','package',package), 'rb')
request.session['downloadstatus'] = 'Done'
return FileResponse(zip_file)
except Exception as e:
print(e)
request.session['downloadstatus'] = 'Done'
return HttpResponse(json.dumps("Error Creating Package"), content_type="application/error")
def installPackage(model,version,deployedPath):
deployedPath = os.path.join(deployedPath,'publish','package')
whlfilename='na'
if os.path.isdir(deployedPath):
for file in os.listdir(deployedPath):
if file.endswith(".whl"):
whlfilename = os.path.join(deployedPath,file)
if whlfilename != 'na':
subprocess.check_call([sys.executable, "-m", "pip", "uninstall","-y",model])
subprocess.check_call([sys.executable, "-m", "pip", "install","--no-dependencies",whlfilename])
status,pid,ip,port = checkModelServiceRunning(model)
if status == 'Running':
stopService(pid)
startService(model,ip,port)
return('Success')
else:
return('Installation Package not Found')
def getMIDFromUseCaseVersion(id,version,usecasedetails,Existusecases):
usecasedetail = usecasedetails.objects.get(id=id)
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS',Version=version)
return(models[0].id)
def stopService(pid):
import psutil
p = psutil.Process(int(pid))
p.terminate()
def checkModelServiceRunning(package_name):
from os.path import expanduser
home = expanduser("~")
if platform.system() == 'Windows':
modelServices = os.path.join(home,'AppData','Local','HCLT','AION','services')
else:
modelServices = os.path.join(home,'HCLT','AION','target','services')
filename = package_name+'_service.py'
modelservicefile = os.path.join(modelServices,filename)
status = 'Not Initialized'
ip = ''
port = ''
pid = ''
if os.path.exists(modelservicefile):
status = 'Not Running'
import psutil
for proc in psutil.process_iter():
pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline','connections'])
if 'python' in pinfo['name']:
if filename in pinfo['cmdline'][1]:
status = 'Running'
pid = pinfo['pid']
for x in pinfo['connections']:
ip = x.laddr.ip
port = x.laddr.port
return(status,pid,ip,port)
def startService(package_name,ip,portNo):
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','bin','model_service.py'))
from os.path import expanduser
home = expanduser("~")
if platform.system() == 'Windows':
modelServices = os.path.join(home,'AppData','Local','HCLT','AION','services')
else:
modelServices = os.path.join(home,'HCLT','AION','target','services')
if not os.path.isdir(modelServices):
os.makedirs(modelServices)
filename = package_name+'_service.py'
modelservicefile = os.path.join(modelServices,filename)
status = 'File Not Exist'
if os.path.exists(modelservicefile):
status = 'File Exist'
r = ([line.split() for line in subprocess.check_output("tasklist").splitlines()])
for i in range(len(r)):
if filename in r[i]:
status = 'Running'
if status == 'File Not Exist':
shutil.copy(file_path,modelservicefile)
with open(modelservicefile, 'r+') as file:
content = file.read()
file.seek(0, 0)
line = 'from '+package_name+' import aion_performance'
file.write(line+"\\n")
line = 'from '+package_name+' import aion_drift'
file.write(line+ "\\n")
line = 'from '+package_name+' import featureslist'
file.write(line+ "\\n")
line = 'from '+package_name+' import aion_prediction'
file.write(line+ "\\n")
file.write(content)
file.close()
status = 'File Exist'
if status == 'File Exist':
command = "python "+modelservicefile+' '+str(portNo)+' '+str(ip)
os.system('start cmd /c "'+command+'"')
def checkInstalledPackge(package_name):
import importlib.util
spec = importlib.util.find_spec(package_name)
if spec is None:
return('Not Installed','','')
else:
if len(spec.submodule_search_locations) > 0:
displaypath = os.path.join(spec.submodule_search_locations[0],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
if 'usecasename' in config:
modelName = config['usecasename']
else:
modelName = 'NA'
if 'version' in config:
version = config['version']
else:
version = 'NA'
return('Installed',modelName,version)<s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from appbe.data_io import sqlite_db
from os.path import expanduser
import platform
import pandas as pd
import os
from appbe.dataPath import DATA_DIR
PUBLISH_PATH = os.path.join(DATA_DIR,'publish')
DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite')
def chech_publish_info(usecasename):
version = 0
status = 'Not Published'
inputDriftStatus = 'No Drift'
MODEL_DEPLOY_DATABASE_PATH = os.path.join(PUBLISH_PATH,usecasename,'database')
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
if sqlite_dbObj.table_exists('publish'):
data = sqlite_dbObj.read('publish',"usecase = '"+usecasename+"' and status = 'Published'")
if data.shape[0] > 0:
model_sqlite_dbObj = sqlite_db(MODEL_DEPLOY_DATABASE_PATH,'deploy.db')
version = data['version'].iloc[0]
status = 'Published'
if model_sqlite_dbObj.table_exists('monitoring'):
data = model_sqlite_dbObj.read('monitoring',"version = '"+str(version)+"'")
if data.shape[0] > 0:
msg = data['Msg'].iloc[-1]
if 'Affected Columns' in msg:
inputDriftStatus = 'Input Drift Found'
return version,status,inputDriftStatus
def check_input_data(usecasename):
MODEL_DEPLOY_DATABASE_PATH = os.path.join(PUBLISH_PATH,usecasename,'database')
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
data = pd.DataFrame()
if sqlite_dbObj.table_exists('publish'):
dataa = sqlite_dbObj.read('publish',"usecase = '"+usecasename+"' and status = 'Published'")
if dataa.shape[0] > 0:
modelsqlite_dbObj = sqlite_db(MODEL_DEPLOY_DATABASE_PATH,'deploy.db')
if modelsqlite_dbObj.table_exists('prodData'):
data = modelsqlite_dbObj.read('prodData')
return data
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import os
import pandas as pd
import numpy as np
import subprocess
import sys
import re
import plotly.graph_objects as go
import plotly.figure_factory as ff
def global_explain(request):
try:
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
if problemType.lower() != 'classification' and problemType.lower() != 'regression':
return 'Problem Type Error','Explainable AI only available for classification and regression problem','NA','NA','NA','NA',0,0,'NA','NA','NA','NA',0,'NA','NA',0,'NA','NA','NA','NA','NA','NA'
displaypath = os.path.join( request.session['deploypath'],'etc','display.json')
with open(displaypath) as file:
config = json.load(file)
file.close()
inputFeatures = configSettingsJson['basic']['trainingFeatures']
targetFeature = configSettingsJson['basic']['targetFeature']
inputFeatures = inputFeatures.split(',')
if targetFeature in inputFeatures:
inputFeatures.remove(targetFeature)
dataFilePath = str(configSettingsJson['basic']['dataLocation'])
from utils.file_ops import read_df_compressed
status,df = read_df_compressed(config['postprocessedData'],encoding='utf8',nrows=10)
#print(df)
df.rename(columns=lambda x: x.strip(), inplace=True)
df = df[inputFeatures]
#print(df)
singleInstanceData = df.loc[5, inputFeatures]
inputFieldsDict = singleInstanceData.to_dict()
inputFields = []
inputFields.append(inputFieldsDict)
if 'nrows' in config:
nrows = config['nrows']
else:
nrows = 'Not Available'
if 'ncols' in config:
ncols = config['ncols']
else:
ncols = 'Not Available'
if 'targetFeature' in config:
targetFeature = config['targetFeature']
else:
targetFeature = ''
labelMaps = config['labelMaps']
modelfeatures = config['modelFeatures']
mfcount = len(modelfeatures)
df_pro |
processed = pd.read_csv(dataFilePath)
if 'targetFeature' != '':
target_classes = df_proprocessed[targetFeature].unique()
numberofclasses = len(target_classes)
else:
target_classes = []
numberofclasses = 'Not Available'
dataPoints = df_proprocessed.shape[0]
df_proprocessed = df_proprocessed.head(5)
df_proprocessed = df_proprocessed.to_json(orient="records")
df_proprocessed = json.loads(df_proprocessed)
expainableAIPath = os.path.join(request.session['deploypath'],'aion_xai.py')
outputStr = subprocess.check_output([sys.executable,expainableAIPath,'global'])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_ai_explanation:(.*)',str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
ale_json = json.loads(str(outputStr))
ale_json = ale_json['data']
ale_view = ale_json['data']
sentences = ale_json['sentences']
scoreMessage = ''
feature_importance = ale_json['feature_importance']
dfimp = pd.DataFrame.from_dict(feature_importance)
dfimp = dfimp.sort_values(by=['values'],ascending=False).reset_index()
yaxis_data = dfimp['values'].tolist()
xaxis_data = dfimp['labels'].tolist()
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data,y=yaxis_data,name='Feature Importance'))
cfig.update_layout(barmode='stack',xaxis_title='Features')
bargraph = cfig.to_html(full_html=False, default_height=450,default_width=1000)
dftoprecords = dfimp.head(2)
topTwoFeatures = dfimp['labels'].tolist()
topFeaturesMsg = []
for i in range(0,len(dfimp)):
value = round(dfimp.loc[i, "values"],2)*100
value = round(value,2)
tvalue = str(dfimp.loc[i, "labels"])+' contributing to '+ str(value)+'%'
topFeaturesMsg.append(tvalue)
most_influencedfeature = ale_json['most_influencedfeature']
interceppoint = ale_json['interceptionpoint']
anchorjson = ale_json['anchorjson']
return 'Success','Success',ale_view,sentences,bargraph,inputFields,nrows,ncols,targetFeature,dataPoints,target_classes,df_proprocessed,numberofclasses,modelfeatures,problemType,mfcount,topTwoFeatures,topFeaturesMsg,most_influencedfeature,interceppoint,anchorjson,labelMaps
except Exception as Inst:
print(Inst)
return 'Error','Exception: '+str(Inst),'NA','NA','NA','NA',0,0,'NA','NA','NA','NA',0,'NA','NA',0,'NA','NA','NA','NA','NA','NA'<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import pandas as pd
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k, v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_true_options(d):
options = []
if isinstance(d, dict):
for k, v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
options.append(k)
return options
def check_datetime(config):
dateTime = config['basic']['dateTimeFeature']
if dateTime == '' or dateTime.lower()=='na':
return False
return True
def check_dtype(d):
flag= 1
for item in d:
if item["type"].lower() != "text" and item["type"].lower() != "index":
flag = 0
break
return flag
def check_text(d): #task 12627
flag= 0
for item in d:
if item["type"].lower() == "text":
flag = 1
break
return flag
def check_labelencoding(ftr_dict_list, target_ftr):
for ftr_dict in ftr_dict_list:
if ftr_dict['feature']!=target_ftr and ftr_dict['type'].lower()=='categorical' and ftr_dict['categoryEncoding'].lower()!='labelencoding':
return False
return True
class timeseries():
def __init__(self,config):
self.config=config
if self.config['basic']['analysisType']['timeSeriesForecasting'].lower()=='true': #task 11997
self.problemType = 'timeSeriesForecasting'
elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true':
self.problemType = 'timeSeriesAnomalyDetection' #task 11997
def validate_basic_config(self,status='pass',msg=None):
#task 12627
date_time_status = check_datetime(self.config)
text_status = check_text(self.config['advance']['profiler']['featureDict'])
if not date_time_status and text_status:
msg = 'For time series problem,\\\\n* One feature should be in datetime format\\\\n* Text feature not supported '
return 'error', msg
elif not date_time_status:
msg = 'For time series problem, one feature should be in datetime format'
return 'error', msg
elif text_status:
msg = 'For time series problem, text feature not supported '
return 'error', msg
selected_algos = get_true_options(self.config['basic']['algorithms'][self.problemType]) #task 11997
if isinstance(self.config['basic']['targetFeature'],str):
targetFeature = list(self.config['basic']['targetFeature'].split(','))
if self.problemType=='timeSeriesForecasting': #task 11997
if len(targetFeature) > 1:
if 'ARIMA' in selected_algos:
status = 'error'
msg = "ARIMA is not supported for multilabel (target) feature"
return status, msg
if "FBPROPHET" in selected_algos:
status = 'error'
msg = "FBPROPHET is not supported for multiLabel (target) feature"
return status, msg
if 'MLP' in selected_algos:
status = 'error'
msg = "MLP is not supported for multiLabel (target) feature"
return status, msg
if len(targetFeature) == 1 and 'VAR' in selected_algos:
status = 'error'
msg = "VAR is not supported for singleLabel (target) feature"
return status, msg
elif self.problemType=='timeSeriesAnomalyDetection': #task 11997
anomChecker = anomaly(self.config)
status, msg = anomChecker.validate_basic_config()
return status, msg
class anomaly():
def __init__(self,config):
self.config = config
if self.config['basic']['analysisType']['anomalyDetection'].lower()=='true': #task 11997
self.problemType = 'anomalyDetection'
elif self.config['basic']['analysisType']['timeSeriesAnomalyDetection'].lower()=='true': #task 11997
self.problemType = 'timeSeriesAnomalyDetection'
def validate_basic_config(self,status='pass',msg=None):
#task 12627
date_time_status = check_datetime(self.config)
targetFeature = self.config['basic']['targetFeature']
if self.problemType=='anomalyDetection' and date_time_status:
status = 'error'
msg = 'Date feature detected. For anomaly detection on time series change problem type to Time Series Anomaly Detection or drop Date feature'
return status, msg
if targetFeature.lower()!= 'na' and targetFeature!= "" and self.config['basic']['inlierLabels'] == '':
status = 'error'
msg = 'Please provide inlier label in case of supervised anomaly detection'
return status, msg
class survival():
def __init__(self,config):
self.config = config
self.problemType= 'survivalAnalysis'
def validate_basic_config(self):
dateTimeStatus = check_datetime(self.config)
labelencoding_status = check_labelencoding(self.config['advance']['profiler']['featureDict'], self.config['basic']['targetFeature'])
if not dateTimeStatus and not labelencoding_status:
msg = 'For survival analysis problem,\\\\n* One feature should be in datetime format\\\\n* Encoding of categorical features should be of label encoding '
return 'error', msg
elif not dateTimeStatus:
msg = 'One feature should be in datetime format for survival analysis problem. Please select it from model feature'
return 'error', msg
elif not labelencoding_status:
msg = 'Categorical features are expected to be label encoded for survival analysis problem. Please select it from feature encoding'
return 'error', msg
else:
return 'pass', " "
class associationrule():
def __init__(self,config):
self.config=config
def validate_basic_config(self,status='pass', msg=None):
if self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'].lower() == 'na' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == '' or self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'].lower() == 'na':
return "error","Make sure to configure invoice feature and item feature"
elif self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] == self.config['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature']:
return "error","Make sure to invoice feature and item feature is configure correctly"
else:
return "pass", " "
class itemrating(): #task 6081
def __init__(self,config):
self.config = config
def validate_basic_config(self):
data_loc = self.config['basic']['dataLocation']
data_length = len(pd.read_csv(data_loc))
if data_length >= 1000000:
return 'error', "Recommender System can handle data up to 1 million records. Please try with a smaller dataset."
else:
return "pass"," "
class documentsimilarity():
def __init__(self,config):
self.config=config
def validate_basic_config(self,status='pass', msg=None):
flag = check_dtype(self.config['advance']['profiler']['featureDict'])
if flag == 1:
return "pass", " "
else:
msg="Make sure to change the feature type from Categorical to Text and drop Numerical features for document similarity"
return "error", msg
def validate(config):
try:
problem_type = get_true_option(config['basic']['analysisType'])
status = 'pass'
msg = ''
if 'timeseries' in problem_type.lower(): #task 11997
obj = timeseries(config)
elif problem_type.lower() == 'survivalanalysis':
obj = survival(config)
elif problem_type.lower() == 'anomalydetection':
obj = anomaly(config)
elif problem_type.lower() in ['similarityidentification','contextualsearch']:
obj = documentsimilarity(config)
elif problem_type.lower() == 'recommendersystem':
if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'].lower() == 'true':
obj = associationrule(config)
elif config['basic']['algorithms']['recommenderSystem']['ItemRating'].lower() == 'true': #task 6081
obj = itemrating(config)
else:
return 'pass',""
else:
return 'pass',""
status,msg= obj.validate_basic_config()
print(status, msg, 'io')
return(status,msg)
except Exception as e:
print(e)
def start_check(config):
return validate(config)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import numpy as np
import os
import sys
import scipy.stats as st
def DistributionFinder(data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= |
2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
print(message)
return distributionName, sse<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import os
import rsa
import boto3 #usnish
import pandas as pd
import time
def add_new_bucket(request):
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
if request.POST["aionreferencename"] =='' or request.POST["s3bucketname"] == '' or request.POST["awsaccesskey"] == '' :
return 'error'
pkeydata='''-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1AfnrMv
fVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw0m4e
wQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2PM4Re
n0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHyKxlq
i/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhxWrs/
lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQAB
-----END RSA PUBLIC KEY-----'''
pubkey = rsa.PublicKey.load_pkcs1(pkeydata)
awssecretaccesskey = rsa.encrypt(request.POST["awssecretaccesskey"].encode(), pubkey)
print(awssecretaccesskey)
newdata = {}
newdata['Name'] = request.POST["aionreferencename"]
newdata['AWSAccessKeyID'] = request.POST["awsaccesskey"]
newdata['AWSSecretAccessKey'] = str(awssecretaccesskey)
newdata['S3BucketName'] = request.POST["s3bucketname"]
data.append(newdata)
with open(file_path, 'w') as f:
json.dump(data, f)
f.close()
def get_s3_bucket():
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
return data
def read_s3_bucket(name,filename,DATA_FILE_PATH):
privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIIEqQIBAAKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1Af
nrMvfVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw
0m4ewQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2P
M4Ren0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHy
Kxlqi/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhx
Wrs/lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQABAoIBAC/VbNfQPEqJSO3f
VFPqfR73q2MbGdgiMQOTgeDvLxiF1QdizJ+j/I5mgiIAMviXuOpPU+NbdMHbZZWd
D15kNlD8UCXVg6yyiOuHStjmjK4uHe8I86E1nxTb0hbyZCWZlbk/WizlDHInu+dT
KdIZcq2AIidU6tAxtwA0ingHaRSoXDlSGwOTEigNqmWOKnDTVg0SMscoHOD7siXF
DHm1/lkvD3uvcZk6c7fGxC8SgNX2dj6n/Nbuy0Em+bJ0Ya5wq4HFdLJn3EHZYORF
ODUDYoGaSxeXqYsGg/KHJBc8J7xW9FdN9fGbHfw1YplrmiGL3daATtArjMmAh0EQ
H8Sj7+ECgYkA3oWMCHi+4t8txRPkg1Fwt8dcqYhGtqpAus3NESVurAdi0ZPqEJcQ
4cUbflwQPhX0TOaBlkgzdP8DMdcW/4RalxHsAh5N8ezx/97PQMb3Bht0WsQUBeYJ
xLV7T2astjTRWactGCG7dwTaUYRtU3FqL6//3CysmA12B5EMX0udNBOTKwmaYKww
AwJ5AOISS7f12Q0fgTEVY0H8Zu5hHXNOA7DN92BUzf99iPx+H+codLet4Ut4Eh0C
cFmjA3TC78oirp5mOOQmYxwaFaxlZ7Rs60dlPFrhz0rsHYPK1yUOWRr3RcXWSR13
r+kn+f+8k7nItfGi7shdcQW+adm/EqPfwTHM8QKBiQCIPEMrvKFBzVn8Wt2A+I+G
NOyqbuC8XSgcNnvij4RelncN0P1xAsw3LbJTfpIDMPXNTyLvm2zFqIuQLBvMfH/q
FfLkqSEXiPXwrb0975K1joGCQKHxqpE4edPxHO+I7nVt6khVifF4QORZHDbC66ET
aTHA3ykcPsGQiGGGxoiMpZ9orgxyO3l5Anh92jmU26RNjfBZ5tIu9dhHdID0o8Wi
M8c3NX7IcJZGGeCgywDPEFmPrfRHeggZnopaAfuDx/L182pQeJ5MEqlmI72rz8bb
JByJa5P+3ZtAtzc2RdqNDIMnM7fYU7z2S279U3nZv0aqkk3j9UDqNaqvsZMq73GZ
y8ECgYgoeJDi+YyVtqgzXyDTLv6MNWKna9LQZlbkRLcpg6ELRnb5F/dL/eB/D0Sx
QpUFi8ZqBWL+A/TvgrCrTSIrfk71CKv6h1CGAS02dXorYro86KBLbJ0yp1T/WJUj
rHrGHczglvoB+5stY/EpquNpyca03GcutgIi9P2IsTIuFdnUgjc7t96WEQwL
-----END RSA PRIVATE KEY-----'''
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','s3bucket.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
awssecretaccesskey = ''
found = False
for x in data:
if x['Name'] == name:
awssecretaccesskey = x['AWSSecretAccessKey']
aws_access_key_id = x['AWSAccessKeyID']
bucketName = x['S3BucketName']
found = True
break
if found:
privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM')
awssecretaccesskey = eval(awssecretaccesskey)
awssecretaccesskey = rsa.decrypt(awssecretaccesskey, privkey)
awssecretaccesskey = awssecretaccesskey.decode('utf-8')
#awssecretaccesskey = 'SGcyJavYEQPwTbOg1ikqThT+Op/ZNsk7UkRCpt9g'#rsa.decrypt(awssecretaccesskey, privkey)
client_s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(awssecretaccesskey))
#print(bucketName,filename)
try:
response = client_s3.get_object(Bucket=bucketName, Key=filename)
df = pd.read_csv(response['Body'])
except Exception as e:
print(e)#usnish
return 'Error', pd.DataFrame()
#return 'Error', pd.DataFrame()
return 'Success',df
return 'Error', pd.DataFrame()<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os.path
import time
import subprocess
import sys
from appbe.aion_config import kafka_setting
from appbe.aion_config import running_setting
from appbe import installPackage
from appbe import compute
from appbe.models import getusercasestatus
import json
import pandas as pd
import ntpath
import shutil
import platform
from pathlib import Path
from appbe.dataPath import DATA_DIR
LOG_FILE_PATH = os.path.join(DATA_DIR,'logs')
def encrptpackage_command(request,Existusecases,usecasedetails):
command = request.POST.get('encryptedsubmit')
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure |
= compute.readComputeConfig()
modelID = request.POST.get('modelID')
p = Existusecases.objects.get(id=modelID)
usecasename = p.ModelName.UsecaseName
usecaseid = p.ModelName.usecaseid
runningStatus,pid,ip,port = installPackage.checkModel |
<s> import pandas as pd
import numpy as np
def get_leaderboard(file_content):
matched_lines = [line.replace('Model:-', '') for line in file_content.split('\\n') if "Model:-" in line]
df = pd.DataFrame(columns = ['Model', 'Iterations', 'Score (%)', 'Score Type', 'Best Score (%)'])
import re
try:
for line in matched_lines:
if 'Model Name::' in line:
MODEL = line.split('::')
model = MODEL[1]
if 'ScoringType::' in line:
S = line.split('::')
#SC = ScorTyp[1]
if 'make_scorer'in line:
ST = line.split('make_scorer')
ScorTyp = ST[1]
df['Score Type'] = np.where(df['Model'] == model, ScorTyp,df['Score Type'])
if 'Validation Score::' in line:
BS = line.split('::')
BestSc = round(float(BS[1]), 4)*100
BestSc = abs(BestSc)
df['Best Score (%)'] = np.where(df['Model'] == model, BestSc, df['Best Score (%)'])
if 'Iteration::' in line:
l = line.split('::')
word = re.findall(r'\\[(.*?)\\]', l[1])
if ';, score=' in line:
sc = line.split('score=')
SCR = sc[1].split(' ')
Score = round(float(SCR[0]), 4)*100
Score = abs(Score)
# df = df.concat({'Model': model, 'Iterations': word,'Score (%)': Scor,'Score Type': '', 'Best Score (%)': 0}, ignore_index=True)
newdf = pd.DataFrame([{'Model': model, 'Iterations': word,'Score (%)': Score,'Score Type': '', 'Best Score (%)': 0}])
df = pd.concat([df,newdf],axis=0, ignore_index=True)
LIST = []
for i in range(int(len(df['Score (%)'])/5)):
l = (sum(df['Score (%)'][5*i:5*(i+1)])/5)
#LIST.concat(l)
LIST.append(l)
for i in range(len(LIST)):
df['Score (%)'][5*i:5*(i+1)]=LIST[i]
CL = [line.replace('------->Type of Model :classification', 'Model :classification') for line in file_content.split('\\n') if "------->Type of Model :classification" in line]
for l in CL:
if 'Model :classification' in l:
df = df.sort_values(by = ['Best Score (%)'], ascending=False)
RE = [line.replace('------->Type of Model :regression', 'Model :regression') for line in file_content.split('\\n') if "------->Type of Model :regression" in line]
for l in RE:
if 'Model :regression' in l:
df = df.sort_values(by = ['Best Score (%)'])
except Exception as e:
print(e)
return df
if __name__ == "__main__":
file_path = r"C:\\Users\\richard.mochahari\\AppData\\Local\\Programs\\HCLTech\\AION\\data\\target\\AI0335\\1\\log\\model_training_logs.log"
my_file = open(file_path, 'r')
file_content = my_file.read()
my_file.close()
print(get_leaderboard(file_content))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import json
def getInstanceonGCP(image,instances):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('LLMTuning'):
data = sqlite_obj.read_data('LLMTuning','image="'+image['id']+'"')
for values in data:
instance = {}
instance['type'] = 'instance'
instance['id'] = values[2]
instance['workLoad'] = image['workLoad']
instance['machineImageProjectID'] = image['machineImageProjectID']
instance['ssh'] = image['ssh']
instance['machineConfiguration'] = image['machineConfiguration']
instance['instanceType'] = image['instanceType']
instances.append(instance)
except Exception as e:
print(e)
return instances
def getInstanceonAWS(amiid,instances):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('LLMTuning'):
data = sqlite_obj.read_data('LLMTuning','image="'+amiid['id']+'"')
for values in data:
instance = {}
instance['type'] = 'instance'
instance['id'] = values[2]
instance['workLoad'] = amiid['workLoad']
instance['regionName'] = amiid['regionName']
instance['ssh'] = amiid['ssh']
instance['machineConfiguration'] = amiid['machineConfiguration']
instance['instanceType'] = amiid['instanceType']
instances.append(instance)
except Exception as e:
print(e)
return instances
def updatelocalsetings(request):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('computeInfrastructure'):
updated_data = 'selectedInfrastructure="Local"'
sqlite_obj.update_data(updated_data,'','computeInfrastructure')
def updateToComputeSettings(infratructure):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('computeInfrastructure'):
updated_data = 'selectedInfrastructure="'+infratructure+'"'
sqlite_obj.update_data(updated_data,'','computeInfrastructure')
def updateGCPConfig(request):
try:
credentialsJson = request.POST.get('credentialsJson')
projectID = request.POST.get('gcpProjectid')
machineType = request.POST.get('gcpmachineType')
selectedID = request.POST.get('gcpInstance')
gcpZone = request.POST.get('gcpZone')
workload = request.POST.get('gcpworkload')
noOfInstance = request.POST.get('GCPnoofinstance')
#print(credentialsJson,projectID,machineType,selectedID,gcpZone,workload,noOfInstance)
if credentialsJson != '' and projectID != '':
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('gcpCredentials'):
updated_data = 'credentialsJson="'+credentialsJson+'",projectID="'+projectID+'",machineType="'+machineType+'",selectedID="'+selectedID+'",regionName="'+gcpZone+'",noOfInstance="'+str(noOfInstance)+'",workload="'+workload+'"'
sqlite_obj.update_data(updated_data,'','gcpCredentials')
else:
newdata = {}
newdata.update({'id':['1'],'credentialsJson': [credentialsJson],'projectID': [projectID],'machineType':[machineType],'selectedID':[selectedID],'regionName':[gcpZone],'noOfInstance':[noOfInstance],'workload':[workload]})
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'gcpCredentials')
return('success')
else:
return('error')
except Exception as e:
print(e)
return('error')
def updateComputeConfig(request):
try:
AWSAccessKeyID = request.POST.get('AWSAccessKeyID')
AWSSecretAccessKey = request.POST.get('AWSSecretAccessKey')
workload = request.POST.get('workload')
machineType = request.POST.get('machineType')
selectedID = request.POST.get('amiInstance')
regionName = request.POST.get('regionName')
noOfInstance = request.POST.get('NoOfInstance')
securitygroupid = request.POST.get('AWSSecuritygroupID')
if AWSAccessKeyID != '' and AWSSecretAccessKey != '':
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('awsCredentials'):
column_names = sqlite_obj.column_names('awsCredentials')
if 'securitygroupid' not in column_names:
query = 'Alter Table awsCredentials ADD securitygroupid TEXT'
sqlite_obj.execute_query(query)
updated_data = 'AWSAccessKeyID="'+AWSAccessKeyID+'",AWSSecretAccessKey="'+AWSSecretAccessKey+'",machineType="'+machineType+'",selectedID="'+selectedID+'",regionName="'+regionName+'",noOfInstance="'+noOfInstance+'",workload="'+workload+'",securitygroupid="'+securitygroupid+'"'
sqlite_obj.update_data(updated_data,'','awsCredentials')
else:
newdata = {}
newdata.update({'id':['1'],'AWSAccessKeyID': [AWSAccessKeyID],'AWSSecretAccessKey': [AWSSecretAccessKey],'machineType':[machineType],'selectedID':[selectedID],'regionName':[regionName],'noOfInstance':[noOfInstance],'workload':[workload],'securitygroupid':[securitygroupid]})
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'awsCredentials')
return('success')
else:
return('error')
except Exception as e:
print(e)
return('error')
def selectedInfratructure():
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
selcInfra = 'Local'
if sqlite_obj.table_exists('computeInfrastructure'):
data = sqlite_obj.read_data('computeInfrastructure')
for values in data:
selcInfra = values[1]
return selcInfra
def readComputeConfig():
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','compute_conf.json'))
f = open(file_path, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
selcInfra = 'Local'
if sqlite_obj.table_exists('computeInfrastructure'):
data = sqlite_obj.read_data('computeInfrastructure')
for values in data:
selcInfra = values[1]
else:
data = {}
data.update({'id':['1'],'selectedInfrastructure': ['Local']})
sqlite_obj.write_data(pd.DataFrame.from_dict(data),'computeInfrastructure')
configSettingsJson['computeInfrastructure'] = selcInfra
for ami in configSettingsJson['AWS_EC2']['amis']:
configSettingsJson['AWS_EC2']['instances'] = getInstanceonAWS(ami,configSettingsJson['AWS_EC2']['instances'])
for image in configSettingsJson['GCP']['machineImage']:
configSettingsJson['GCP']['instances'] = getInstanceonGCP(image,configSettingsJson['GCP']['instances'])
AWSAccessKeyID = ''
AWSSecretAccessKey = ''
securitygroupid = ''
machineType = 'AMI'
selectedID = ''
regionName = ''
noofInfra = 1
workLoad = 'LLM'
if sqlite_obj.table_exists('awsCredentials'):
column_names = sqlite_obj.column_names('awsCredentials')
#print(column_names)
if 'workload' not in column_names:
query = 'Alter Table awsCredentials ADD workload TEXT'
sqlite_obj.execute_query(query)
if 'securitygroupid' not in column_names:
query = 'Alter Table awsCredentials ADD securitygroupid TEXT'
sqlite_obj.execute_query(query)
data = sqlite_obj.read_data('awsCredentials')
for values in data:
AWSAccessKeyID = values[1]
AWSSecretAccessKey = values[2]
machineType = values[3]
selectedID = values[4]
regionName = values[5]
noofInfra = values[6]
workLoad = values[7]
securitygroupid = values[8]
selectedAWS = {}
selectedAWS['accessKey'] = AWSAccessKeyID
selectedAWS['secretAccessKey'] = AWSSecretAccessKey
selectedAWS['machineType']=machineType
selectedAWS['selectedID'] = selectedID
selectedAWS['regionName'] = regionName
selectedAWS['noOfInstance']=noofInfra
selectedAWS['workLoad'] = workLoad
selectedAWS['securitygroupid'] = securitygroupid
configSettingsJson['awsCredentials'] = selectedAWS
gcpCredentials=''
projectID = ''
selectedID = ''
machineType = ''
regionName = ''
noOfInstance = 1
workLoad = 'LLM'
if sqlite_obj.table_exists('gcpCredentials'):
column_names = sqlite_obj.column_names('gcpCredentials')
if 'workload' not in column_names:
query = 'Alter Table gcpCredentials ADD workload TEXT'
sqlite_obj.execute_query(query)
data = sqlite_obj.read_data('gcpCredentials')
for values in data:
gcpCredentials |
= values[1]
projectID = values[2]
machineType = values[3]
selectedID = values[4]
regionName = values[5]
noOfInstance = values[6]
workLoad = values[7]
|
False'
elif model in ["Neural Architecture Search"]:
model.xplain = 'False'
model.flserversupport = 'False'
model.onlinelerningsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
supportedmodels = ["Extreme Gradient Boosting (XGBoost)"]
if model.deploymodel in supportedmodels:
model.encryptionsupport = 'True'
else:
model.encryptionsupport = 'False'
supportedmodels = ["Online Decision Tree Classifier","Online Logistic Regression","Online Linear Regression","Online Decision Tree Regressor","Online KNN Regressor","Online Softmax Regression","Online KNN Classifier"]
if model.deploymodel in supportedmodels:
model.onlinelerningsupport = 'True'
onlineoutputPath = os.path.join(str(model.DeployPath),'production','Config.json')
with open(onlineoutputPath) as file:
onlineoutputPath = json.load(file)
file.close()
details = {'Score' :onlineoutputPath['metricList'],'DataSize':onlineoutputPath['trainRowsList']}
dfonline = pd.DataFrame(details)
model.oltrainingdetails = dfonline
else:
model.onlinelerningsupport = 'False'
except Exception as e:
print(e)
pass
return models
def landing_page(usecasedetails,Existusecases,hosturl,usecaseId = None,search_text=None):
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
if usecaseId:
usecase = usecasedetails.objects.filter(id=usecaseId)
else:
if search_text:
usecase = usecasedetails.objects.filter(UsecaseName__contains=str(search_text)).order_by('-id')
else:
#usecase = usecasedetails.objects.all().order_by('-id')[:100] #top 100 records
usecase = usecasedetails.objects.all().order_by('-id') #top 100 records
usecaselist=[]
if not usecaseId:
for x in usecase:
problemType= 'NA'
publish_url = ''
otherModel = {}
models = Existusecases.objects.filter(Status='SUCCESS',publishStatus='Published',ModelName=x.id).order_by('-id')
if len(models) > 0:
#print(models[0])
version = models[0].Version
if os.path.isdir(str(models[0].DeployPath)):
modelPath = os.path.join(str(models[0].DeployPath),'etc','output.json')
with open(modelPath) as file:
outputconfig = json.load(file)
problemType = outputconfig['data']['ModelType']
#print(problemType.lower())
if problemType.lower() == "llm fine-tuning":
cloudconfig = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json'))
print(cloudconfig)
from appbe.models import get_instance
hypervisor,instanceid,region,image,status = get_instance(x.usecaseid+ '_' + str(version))
from llm.llm_inference import get_ip
instanceip = get_ip(cloudconfig,instanceid,hypervisor,region,image) #usnish__ server maynot running
if instanceip != '':
publish_url = 'http://' + instanceip + ':' + '8000' + '/generate'
else:
publish_url = 'service not available'
else:
publish_url = 'http://'+hosturl+':'+str(models[0].portNo)+'/AION/'+x.usecaseid+'/predict'
publish_status = 'Published'
#publish_url = 'http://'+hosturl+':'+str(models[0].portNo)+'/AION/'+x.usecaseid+'/predict'
parentModel = get_model(Existusecases,x.id,int(version))
else:
models = Existusecases.objects.filter(Status='SUCCESS',ModelName=x.id).order_by('-id')
if len(models) > 0:
publish_status = 'Trained'
version = models[0].Version
parentModel = get_model(Existusecases,x.id,int(version))
else:
models = Existusecases.objects.filter(ModelName=x.id).order_by('-id')
if len(models)==0:
publish_status= 'Not Trained'
version = -1
else:
if models[0].Status == 'FAIL':
publish_status= 'Failed'
elif models[0].Status == 'Running':
publish_status = 'Running'
else:
publish_status='Not Trained'
problemType = models[0].ProblemType
version = models[0].Version
parentModel={}
usecasedetails = {'uuid':x.id,'description':x.Description,'usecaseid':x.usecaseid,'usecase':x.UsecaseName,'status':publish_status,'publish_url':publish_url,'version':version,'parentModel':parentModel,'otherModel':otherModel,'problemType':problemType}
usecaselist.append(usecasedetails)
else:
for x in usecase:
otherModel = get_model(Existusecases,x.id)
problemType = otherModel[0].problemType
usecasedetails = {'uuid':x.id,'description':x.Description,'usecase':x.UsecaseName,'status':'','version':'','parentModel':{},'otherModel':otherModel,'problemType':problemType}
usecaselist.append(usecasedetails)
return usecaselist
def get_landing_model(Existusecases):
models = Existusecases.objects.filter(Status='SUCCESS').order_by('-id')
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc','output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
model.problemType = outputconfig['data']['ModelType']
model.maacsupport = 'True'
model.flserversupport = 'False'
model.onlinelerningsupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
supportedmodels = ["Extreme Gradient Boosting (XGBoost)"]
if model.deploymodel in supportedmodels:
model.encryptionsupport = 'True'
else:
model.encryptionsupport = 'False'
supportedmodels = ["Online Decision Tree Classifier","Online Logistic Regression"]
if model.deploymodel in supportedmodels:
model.onlinelerningsupport = 'True'
onlineoutputPath = os.path.join(str(model.DeployPath),'production','Config.json')
with open(onlineoutputPath) as file:
onlineoutputPath = json.load(file)
file.close()
details = {'Score' :onlineoutputPath['metricList'],'DataSize':onlineoutputPath['trainRowsList']}
dfonline = pd.DataFrame(details)
model.oltrainingdetails = dfonline
else:
model.onlinelerningsupport = 'False'
except Exception as e:
pass
return models
def usecase_page(request,usecasedetails,Existusecases,usecaseid,search_text):
try:
from appbe import read_service_url_params
tacking_url = read_service_url_params(request)
except:
tacking_url = '127.0.0.1'
hosturl =request.get_host()
hosturl = hosturl.split(':')
hosturl = hosturl[0]
computeinfrastructure = compute.readComputeConfig()
from appbe.aion_config import settings
usecasetab = settings()
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
status,msg = pushRecordForTraining()
if status == False:
context = {'msg':msg}
context['selected'] = 'License'
return status,context,'licenseexpired.html'
ser_url = service_url.read_service_url_params(request)
packagetip='''
Call From Command Line
1. Click AION Shell
2. python {packageAbsolutePath}/aion_predict.py {json_data}
Call As a Package
1. Go To package_path\\publish\\package
2. python -m pip install {packageName}-py3-none-any.whl
Call the predict function after wheel package installation
1. from {packageName} import aion_predict as p1
2. p1.predict({json_data})'''
if request.method == "POST":
usecasename = request.POST.get('UsecaseName')
description = request.POST.get('Description')
usecaseid = request.POST.get('usecaseid')
#print('1',usecasename)
if (usecasename == ''):
usecase = landing_page(usecasedetails,Existusecases,hosturl)
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
nouc = str(nouc).zfill(4)
description_text = 'This is a usecase for AI' + str(nouc)
context = {'description_text':description_text,'usecase':'usecase','Notallowed':'Usecasename is mandatory','ser_url':ser_url,'packagetip':packagetip,'usecasedetail': usecase,'nouc':nouc, 'ser_url':ser_url,'packagetip':packagetip, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'tacking_url':tacking_url,'usecasetab':usecasetab,
'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting}
return status,context,'usecases.html'
else:
usecase_count = usecasedetails.objects.filter(usecaseid=usecaseid).count()
usecasename_count = usecasedetails.objects.filter(UsecaseName=usecasename).count()
usecase = landing_page(usecasedetails,Existusecases,hosturl)
if (usecase_count > 0) or (usecasename_count > 0):
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
nouc = str(nouc).zfill(4)
Msg = 'Error in usecase creating, try again'
if usecase_count > 0:
Msg = 'Error in usecase creating, try again'
if usecasename_count > 0:
Msg = 'There is already a use case with same name, please provide unique name'
description_text = 'This is a usecase for AI' + str(nouc)
context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc,'Status':'error','Msg': Msg,'tacking_url':tacking_url,'usecasetab':usecasetab,'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'ser_url':ser_url,'packagetip':packagetip,
'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting}
return status,context,'usecases.html'
else:
clusteringModels = Existusecases.objects.filter(Status='SUCCESS',ProblemType='unsupervised').order_by('-id')
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
p = usecasedetails(UsecaseName=usecasename,usecaseid=usecaseid,Description=description)
p.save()
s1 = Existusecases.objects.filter(ModelName=p.id).annotate(maxver=Max('ModelName__existusecases__Version'))
config_list = s1.filter(Version=F('maxver'))
if config_list.count() > 0:
Version = config_list[0].Version
Version = Version + 1
else:
Version = 1
ps = Existusecases(DataFilePath='', DeployPath='', Status='Not Trained',ConfigPath='', Version=Version, ModelName=p,TrainOuputLocation='')
ps.save()
request.session['ModelName'] = p.id
request.session['UseCaseName'] = usecasename
request.session['usecaseid'] = usecaseid
request.session['ModelVersion'] = Version
request.session['ModelStatus'] = 'Not Trained'
request.session['currentstate'] = 0
request.session['finalstate'] = 0
selected_use_case = usecasename
model_status = 'Not Trained'
ModelVersion = Version
from appbe.telemetry import UseCaseCreated
UseCaseCreated(usecaseid+'-'+str(Version))
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
nouc = str(nouc).zfill(4)
description_text = 'This is a usecase for AI' + str(nouc)
context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'newusercase': usecasename,'tacking_url':tacking_url,'finalstate':request.session['finalstate'],
'description': description,'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'clusteringModels':clusteringModels,'s3buckets':get_s3 |
_bucket(),'gcsbuckets':get_gcs_bucket(),'usecasetab':usecasetab,'azurestorage':get_azureStorage(),
'ModelStatus': model_status, 'ModelVersion': ModelVersion, 'selected': 'modeltraning','computeinfrastructure':computeinfrastructure}
return status,context,'upload.html'
else:
models = get_landing_model(Existusecases)
usecase = landing_page(usecasedetails,Existusecases,hosturl,usecaseid,search_text)
if len(usecase) > 0:
nouc = usecasedetails.objects.latest('id')
nouc = (nouc.id)+1
else:
nouc = 1
nouc = str(nouc).zfill(4)
description_text = 'This is a usecase for AI' + str(nouc)
context = {'description_text':description_text,'usecasedetail': usecase, 'nouc': nouc, 'models': models, 'selected_use_case': selected_use_case,'ser_url':ser_url,'packagetip':packagetip,'tacking_url':tacking_url,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'selected': 'usecase','computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab}
if usecaseid:
context.update({'ucdetails':'True'})
return status,context,'usecases.html'
def index_page(request,usecasedetails,Existusecases):
if 'ModelVersion' in request.session:
del request.session['ModelVersion']
if 'UseCaseName' in request.session:
del request.session['UseCaseName']
if 'ModelStatus' in request.session:
del request.session['ModelStatus']
if 'currentstate' in request.session:
del request.session['currentstate']
if 'finalstate' in request.session:
del request.session['finalstate']
return usecases_page(request,usecasedetails,Existusecases)
def usecases_page(request,usecasedetails,Existusecases,usecaseid=None,substring=None):
return usecase_page(request,usecasedetails,Existusecases,usecaseid,substring)
def mllite_page(request):
from appbe.aion_config import settings
usecasetab = settings()
status,msg = pushRecordForTraining()
if status == False:
context = {'selected':'mllite','lerror':msg}
return context
configFile = os.path.join(DEFAULT_FILE_PATH, 'model_converter.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
context = {}
context = {'selected':'mllite','sagemaker':configSettingsJson,'usecasetab':usecasetab}
return context
def mltesting_page(request):
from appbe.aion_config import settings
usecasetab = settings()
status,msg = pushRecordForTraining()
if status == False:
context = {'lerror':msg}
return context
if request.method == "POST":
models = request.POST['model']
datap = request.POST['data']
if(os.path.isfile(models) and os.path.isfile(datap)):
request.session['datalocation'] = datap
df = pd.read_csv(datap,encoding='utf-8',skipinitialspace = True,encoding_errors= 'replace')
trainfea = df.columns.tolist()
featurs = request.POST.getlist('Training')
feature = ",".join(featurs)
filetimestamp = str(int(time.time()))
settingconfig = os.path.join(CONFIG_FILE_PATH, 'MLTest_' + filetimestamp + '.json')
request.session['MLTestResult'] = settingconfig
mltestresult={}
mltestresult['models'] = models
mltestresult['datap'] = datap
mltestresult['feature'] = feature
# features = ['PetalLengthCm','PetalWidthCm']
targ = request.POST['Target']
tar =[targ]
mltestresult['target'] = targ
mltestresult = json.dumps(mltestresult)
with open(settingconfig, "w") as fpWrite:
fpWrite.write(mltestresult)
fpWrite.close()
from pathlib import Path
mltest={}
if Path(models).is_file() and Path(datap).is_file():
try:
from mltest import baseline
outputStr = baseline.baseline_testing(models,datap, feature, targ)
#scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_mltest.py'))
#print(scriptPath, models, datap, feature, targ)
#outputStr = subprocess.check_output([sys.executable, scriptPath, models, datap, feature, targ])
#print(outputStr)
#outputStr = outputStr.decode('utf-8')
#outputStr= outputStr.replace('\\'','\\"')
#print('ou',outputStr)
#outputStr = outputStr.strip()
mltest = json.loads(outputStr)
Problemtype= mltest['Problemtype']
with open(request.session['MLTestResult'], 'r+') as f:
mltestresult = json.load(f)
f.close()
mltestresult['Problemtype'] = Problemtype
mltestresult['ProblemName'] = mltest['ProblemName']
status = mltest['Status']
if status == 'Fail':
errormsg= mltest['Msg']
context = {'error':errormsg,'mltest':'mltest'}
else:
if Problemtype == 'Classification':
mltestresult['Score'] = mltest['Accuracy']
mltestresult['Params'] = mltest['Params']
Problem= mltest['ProblemName']
Parameters= mltest['Params']
round_params = {}
for key, value in Parameters.items():
if isinstance(value, float):
round_params[key] = round(value,2)
else:
round_params[key] = value
matrixconfusion = mltest['Confusionmatrix']
classificationreport = mltest['classificationreport']
classificationreport = json.loads(classificationreport)
matrixconfusion = json.loads(matrixconfusion)
indexName =[]
columnName = []
for i in matrixconfusion.keys():
indexName.append("act:"+str(i))
for j in matrixconfusion[i].keys():
columnName.append("pre:"+str(j))
df3 = pd.DataFrame.from_dict(classificationreport)
df = df3.transpose()
df2 = pd.DataFrame.from_dict(matrixconfusion)
df1 = pd.DataFrame(df2.values,index=indexName,columns=columnName)
report = df.to_html()
report1 = df1.to_html()
recordone = mltest['onerecord']
recordsten = mltest['tenrecords']
recordshund = mltest['hundrecords']
context = {'modelname': models,'datapath':datap,'features':featurs,'target':tar,'Problemtype':Problem,'modeltype':Problemtype,'Parameter':round_params,'Onerecord':recordone,'Tenrecords':recordsten,'Hundrecords':recordshund,'matrixconfusion':report1,'classificationreport':report,'classification':'classification','df':df,'df1':df1,'basemltest':'basemltest','success':'success','trainfea':trainfea,'selected':'mltesting','usecasetab':usecasetab}
elif Problemtype == 'Regression':
Problem= mltest['ProblemName']
mltestresult['Params'] = mltest['Params']
mltestresult['Score'] = mltest['R2']
Parameters= mltest['Params']
round_params = {}
for key, value in Parameters.items():
if isinstance(value, float):
round_params[key] = round(value,2)
else:
round_params[key] = value
Mse = mltest['MSE']
Mae = mltest['MAE']
Rmse = mltest['RMSE']
R2 = mltest['R2']
recordone = mltest['onerecord']
recordsten = mltest['tenrecords']
recordshund = mltest['hundrecords']
context = {'modelname': models,'datapath':datap,'features':featurs,'target':tar, 'Problemtype':Problem,'Parameter':round_params,'Onerecord':recordone,'Tenrecords':recordsten,'Hundrecords':recordshund,'Mse':Mse,'Mae':Mae,'Rmse':Rmse,'R2Score':R2,'regression':'regression','success':"success",'selected': 'mltest','basemltest':'basemltest','usecasetab':usecasetab}
else:
errormsg= mltest['Msg']
context = {'error':errormsg,'mltest':'mltest'}
mltestresult = json.dumps(mltestresult)
with open(settingconfig, "w") as fpWrite:
fpWrite.write(mltestresult)
fpWrite.close()
except Exception as e:
print("-------------"+str(e)+'=================')
e = str(e).replace('\\'','')
errormsg = 'Error: Exception '+str(e)
context = {'error':errormsg,'mltest':'mltest'}
else:
if not (Path(models).is_file() and Path(datap).is_file()):
context = {'error':"Please Check ModelPath & Datapath Format","result":"result",'selected':'mltesting','usecasetab':usecasetab}
elif not Path(models).is_file():
context = {'error':"Please Check ModelPath Format","result":"result",'selected':'mltesting','usecasetab':usecasetab}
elif not Path(datap).is_file():
context = {'error':"Please Check DataPath Format","result":"result",'selected':'mltesting','usecasetab':usecasetab}
else:
context = {'error':'Either model path or data path does not exist','mltest':'mltest','usecasetab':usecasetab}
else:
context = {'selected':'mltesting','usecasetab':usecasetab}
return context<s> from langkit import textstat
from whylogs.experimental.core.udf_schema import udf_schema
import pandas as pd
import whylogs as why
from langkit import light_metrics
from whylogs.experimental.core.udf_schema import udf_schema
from whylogs.experimental.core.udf_schema import register_dataset_udf
import whylogs as why
import json
from sentence_transformers import SentenceTransformer, util
from langkit import lang_config, response_column
def evaluate_prompt_metrics(prompt_msg: any):
""" Evaluate prompt only information."""
text_schema = udf_schema()
llm_schema = light_metrics.init()
df = pd.DataFrame({
"prompt": [
prompt_msg
]})
results = why.log(df, schema=udf_schema()) # .profile()
view = results.view()
automated_readability_index_prompt = view.get_column("prompt.automated_readability_index").to_summary_dict()
automated_readability_index_prompt_mean = automated_readability_index_prompt['distribution/mean']
arip_m = lambda x:1 if x < 1 else (14 if x > 14 else x)
automated_readability_index_prompt_mean = arip_m(automated_readability_index_prompt_mean)
automated_readability_index_prompt_value = get_readability_index_range_value(automated_readability_index_prompt_mean)
flesch_reading_ease_prompt = view.get_column("prompt.flesch_reading_ease").to_summary_dict()
flesch_reading_ease_prompt_mean = flesch_reading_ease_prompt['distribution/mean']
frep_m = lambda x:1 if x < 1 else (100 if x > 100 else x)
flesch_reading_ease_prompt_mean = frep_m(flesch_reading_ease_prompt_mean)
flesch_reading_ease_prompt_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_prompt_mean)
prompt_results = {'prompt_readability_score': str(automated_readability_index_prompt_mean),
'prompt_readability_value': automated_readability_index_prompt_value,
'prompt_reading_ease': str(flesch_reading_ease_prompt_mean),
'prompt_reading_ease_value': flesch_reading_ease_prompt_value}
prompt_results_json = json.dumps(prompt_results, indent=4)
return prompt_results_json,prompt_results
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
@register_dataset_udf(["prompt", "response"], "response.relevance_to_prompt")
def similarity_MiniLM_L6_v2(text):
x = text["prompt"]
y = text["response"]
embedding_1 = model.encode(x, convert_to_tensor=True)
embedding_2 = model.encode(y, convert_to_tensor=True)
similarity = util.pytorch_cos_sim(embedding_1, embedding_2)
result = similarity.item()
return result
def get_readability_index_range_value(readability_value):
if readability_value <= 1:
## Grade level Kindergarden to fourth grade
return "Kindergarten"
elif 1 < readability_value <= 2:
## Grade level Kindergarden to fourth grade
return "First Grade"
elif 2 < readability_value <= 3:
## Grade level Fifth grade to Ninth grade
return "Second Grade"
elif 3 < readability_value <= 4:
## Grade level Fifth grade to Ninth grade
return "Third Grade"
elif 4 < readability_value <= 5:
## Grade level Fifth grade to Ninth grade
return "Fourth Grade"
elif 5 < readability_value <= 6:
## Grade level Fifth grade to Ninth grade
return "Fifth Grade"
elif 6 < readability_value <= 7:
## Grade level Fifth grade to Ninth grade
return "Sixth Grade"
elif 7 < readability_value <= 8:
## Grade level Fifth grade to Ninth grade
|
return "Seventh Grade"
elif 8 < readability_value <= 9:
## Grade level Fifth grade to Ninth grade
return "Eighth Grade"
elif 9 < readability_value <=10:
## Grade level Fifth grade to Ninth grade
return "Ninth Grade"
elif 10 < readability_value <=11:
## Grade level Fifth grade to Ninth grade
return "Tenth Grade"
elif 11 < readability_value <=12:
## Grade level Fifth grade to Ninth grade
return "Eleventh Grade"
elif 12 < readability_value <= 13:
## Grade level Fifth grade to Ninth grade
return "Twelfth Grade"
elif readability_value > 13:
## Grade level Fifth grade to Ninth grade
return "College Grade"
else:
return "College Grade"
def get_flesch_reading_ease_prompt_value(readability_value):
""" Get flesch readability score range approximation"""
if readability_value <= 29:
return "Very Confusing"
elif 29 < readability_value <= 49:
return "Difficult"
elif 49 < readability_value <= 59:
return "Fairly Difficult"
elif 59 < readability_value <= 69:
return "Standard"
elif 69 < readability_value <= 79:
return "Fairly Easy"
elif 79 < readability_value <= 89:
return "Easy"
elif 89 < readability_value <= 100:
return "Very Easy"
else:
return "Very Easy"
def get_relevence_to_response_value(similarity_score):
""" To findout relevence to response results based on similarity score."""
if similarity_score <=0.3:
return "Low"
elif 0.3 < similarity_score <= 0.5:
return "Average"
elif 0.5 < similarity_score <= 0.8:
return "Good"
elif similarity_score > 0.8:
return "High"
def evaluate_prompt_response_inputs (prompt_msg:any, response_msg:any)->str:
""" Predict the text quality, text relevence for both prompt and response messages."""
df = pd.DataFrame({
"prompt": [prompt_msg],
"response": [response_msg]})
results = why.log(df, schema=udf_schema())
view = results.view()
automated_readability_index_prompt = view.get_column("prompt.automated_readability_index").to_summary_dict()
automated_readability_index_prompt_mean = automated_readability_index_prompt['distribution/mean']
arip_m = lambda x:1 if x < 1 else (14 if x > 14 else x)
automated_readability_index_prompt_mean = arip_m(automated_readability_index_prompt_mean)
automated_readability_index_prompt_value = get_readability_index_range_value(automated_readability_index_prompt_mean)
flesch_reading_ease_prompt = view.get_column("prompt.flesch_reading_ease").to_summary_dict()
flesch_reading_ease_prompt_mean = flesch_reading_ease_prompt['distribution/mean']
frep_m = lambda x:1 if x < 1 else (100 if x > 100 else x)
flesch_reading_ease_prompt_mean = frep_m(flesch_reading_ease_prompt_mean)
flesch_reading_ease_prompt_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_prompt_mean)
automated_readability_index_response = view.get_column("response.automated_readability_index").to_summary_dict()
automated_readability_index_response_mean = automated_readability_index_response['distribution/mean']
arir_m = lambda x:1 if x < 1 else (14 if x > 14 else x)
automated_readability_index_response_mean = arir_m(automated_readability_index_response_mean)
automated_readability_index_response_value = get_readability_index_range_value(automated_readability_index_response_mean)
flesch_reading_ease_response = view.get_column("response.flesch_reading_ease").to_summary_dict()
flesch_reading_ease_response_mean = flesch_reading_ease_response['distribution/mean']
frer_m = lambda x:1 if x < 1 else (100 if x > 100 else x)
flesch_reading_ease_response_mean = frer_m(flesch_reading_ease_response_mean)
flesch_reading_ease_response_value = get_flesch_reading_ease_prompt_value(flesch_reading_ease_response_mean)
relevance_to_response = view.get_column("response.relevance_to_prompt").to_summary_dict()
relevance_to_response_mean = relevance_to_response['distribution/mean']
r2r_m = lambda x:0 if x < 0 else (1 if x > 1 else x)
relevance_to_response_mean = r2r_m(relevance_to_response_mean)
relevance_to_response_value = get_relevence_to_response_value(relevance_to_response_mean)
sentence_count_response = view.get_column("response.sentence_count").to_summary_dict()
sentence_count_response_mean = sentence_count_response['distribution/mean']
word_count_response = view.get_column("response.lexicon_count").to_summary_dict()
word_count_response_mean = word_count_response['distribution/mean']
prompt_response_results = {'prompt_readability_score': str(automated_readability_index_prompt_mean),
'prompt_readability_value': automated_readability_index_prompt_value,
'prompt_reading_ease': str(flesch_reading_ease_prompt_mean),
'prompt_reading_ease_value': flesch_reading_ease_prompt_value,
'response_readability': str(automated_readability_index_response_mean),
'response_readability_value': str(automated_readability_index_response_value),
'response_reading_ease': str(flesch_reading_ease_response_mean),
'response_reading_ease_value': str(flesch_reading_ease_response_value),
'response_sentence_count': str(sentence_count_response_mean),
'response_word_count_response': str(word_count_response_mean),
'relevance_to_response': str(relevance_to_response_mean),
'relevance_to_response_value': relevance_to_response_value
}
final_output_json = json.dumps(prompt_response_results, indent=4)
return final_output_json,prompt_response_results
if __name__ == "__main__":
##Test only prompt message information
option = 'predict'
if option == 'evaluate':
prompt_only_response_msg = "A large language model is an advanced artificial intelligence (AI) system designed to process, understand, and generate human-like text based on massive amounts of data. These models are typically built using deep learning techniques, such as neural networks, and are trained on extensive datasets that include text from a broad range, such as books and websites, for natural language processing.Fine-tuning a large language model involves adjusting and adapting a pre-trained model to perform specific tasks or to cater to a particular domain more effectively. The process usually entails training the model further on a smaller, targeted dataset that is relevant to the desired task or subject matter.Few-shot learning (FSL) can be considered as a meta-learning problem where the model learns how to learn to solve the given problem. In this approach, the model is provided with a very limited number of examples (i.e., “few shots”) from the new task, and it uses this information to adapt and perform well on that task. Adapter Training: Adapter training is a method that involves training lightweight modules that are plugged into the pre-trained model, allowing for fine-tuning on a specific task without affecting the original model’s performance on other tasks.Multi-task Learning: Multi-task learning is a method where the pre-trained model is fine-tuned on multiple tasks simultaneously. This approach enables the model to learn and leverage the shared representations across different tasks, leading to better generalization and performance. Task-specific Fine-tuning: Task-specific fine-tuning is a method where the pre-trained model is fine-tuned on a specific task or domain using a task-specific dataset. This method requires more data and time than transfer learning but can result in higher performance on the specific task. Sequential Fine-tuning: Sequential fine-tuning is a method where a pre-trained model is fine-tuned on multiple related tasks or domains sequentially. This allows the model to learn more nuanced and complex language patterns across different tasks, leading to better generalization and performance.A noteworthy avenue of research within LLM fine-tuning explores strategies to reduce the expenses associated with updating model parameters. This endeavor is the essence of parameter-efficient fine-tuning (PEFT), a collection of techniques aiming to curtail the number of parameters requiring adjustments.Various PEFT techniques exist, and one prominent example is a low-rank adaptation (LoRA), a technique gaining popularity among open-source language models."
prompt_res = evaluate_prompt_metrics(prompt_only_response_msg)
elif option == 'predict':
prompt_msg = "What is AION?"
response_msg = "AION (Artificial Intelligence ONline) is an open -source software platform for building, deploying and operating the entire lifecycle of AI applications. It supports various use cases such as predictive analytics , machine learning and deep learning . Key features: 1. Data Ingestion : Supports multiple data sources like text files, excel sheet, database etc."
evaluation_metrics_json = evaluate_prompt_response_inputs(prompt_msg,response_msg)
print("evaluation_metrics_json: \\n",evaluation_metrics_json)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import os
import rsa
import boto3 #usnish
import pandas as pd
import time
def add_new_GCSBucket(request):
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
f.close()
if data == '':
data = []
except:
data = []
print(request.POST["aionreferencename"])
print(request.POST["serviceaccountkey"])
print(request.POST["bucketname"])
if request.POST["aionreferencename"] =='' or request.POST["serviceaccountkey"] == '' or request.POST["bucketname"] == '' :
return 'error'
newdata = {}
newdata['Name'] = request.POST["aionreferencename"]
newdata['GCSServiceAccountKey'] = request.POST["serviceaccountkey"]
newdata['GCSbucketname'] = request.POST["bucketname"]
data.append(newdata)
with open(file_path, 'w') as f:
json.dump(data, f)
f.close()
return 'success'
def get_gcs_bucket():
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
return data
def read_gcs_bucket(name,filename,DATA_FILE_PATH):
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','gcsbuckets.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
found = False
print(data)
for x in data:
if x['Name'] == name:
GCSServiceAccountKey = x['GCSServiceAccountKey']
GCSbucketname = x['GCSbucketname']
found = True
break
print(found)
print(name)
try:
if found:
import io
from google.cloud import storage
storage_client = storage.Client.from_service_account_json(GCSServiceAccountKey)
print(GCSServiceAccountKey)
print(GCSbucketname)
bucket = storage_client.get_bucket(GCSbucketname)
blob = bucket.blob(filename)
data = blob.download_as_string()
df = pd.read_csv(io.BytesIO(data), encoding = 'utf-8', sep = ',',encoding_errors= 'replace')
return 'Success',df
except Exception as e:
print(e)
return 'Error', pd.DataFrame()<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import pandas as pd
import requests
from io import StringIO
import json
import time
import shutil
import sys
from appbe import compute
from appbe.aion_config import kafka_setting
from appbe.aion_config import running_setting
from appbe.s3bucketsDB import get_s3_bucket
from appbe.gcsbucketsDB import get_gcs_bucket
from appbe.azureStorageDB import get_azureStorage
from appbe.aion_config import eda_setting
from appbe.s3bucketsDB import read_s3_bucket
from appbe.gcsbucketsDB import read_gcs_bucket
from appbe.azureStorageDB import read_azureStorage
from appbe.validatecsv import csv_validator
import time
from appbe.dataPath import LOG_LOCATION
from appbe.dataPath import DATA_FILE_PATH
from appbe.log_ut import logg
import logging
def langchain_splittext(filename):
try:
from langchain.document_loaders import PyPDFLoader |
from langchain.text_splitter import RecursiveCharacterTextSplitter
loader = PyPDFLoader(filename)
pages = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
texts = text_splitter.split_documents(pages)
return(texts)
except Exception as e:
print(e)
def pd_lanfchain_textsplitter(datalocation,data):
try:
document=[]
for i in range(len(data)):
filename = os.path.join(datalocation,data.loc[i,"File"])
out = langchain_splittext(filename)
for doc in out:
print(doc.page_content)
document.append(doc.page_content)
my_data = pd.DataFrame({'instruction': document})
n = 1
my_data["response"] = my_data["instruction"].tolist()[n:] + my_data["instruction"].tolist()[:n]
filetimestamp = str(int(time.time()))
filename = os.path.join(DATA_FILE_PATH, 'LLMTuning_' + filetimestamp+'.csv')
my_data.to_csv(filename,index=False)
return(filename)
except Exception as e:
print(e)
def getimpfeatures(dataFile, numberoffeatures,delimiter,textqualifier):
imp_features = []
if numberoffeatures > 20:
try:
from appbe.eda import ux_eda
eda_obj = ux_eda(dataFile,delimiter,textqualifier,optimize=1)
if eda_obj.getNumericFeatureCount() >= 2:
pca_map = eda_obj.getPCATop10Features()
imp_features = pca_map.index.values.tolist()
except Exception as e:
print(e)
pass
return imp_features
def pdf2text(inpFileName):
try:
from pypdf import PdfReader
reader = PdfReader(inpFileName)
number_of_pages = len(reader.pages)
text=""
OrgTextOutputForFile=""
for i in range(number_of_pages) :
page = reader.pages[i]
text1 = page.extract_text()
text=text+text1
import nltk
tokens = nltk.sent_tokenize(text)
for sentence in tokens:
sentence=sentence.replace("\\n", " ")
if len(sentence.split()) < 4 :
continue
if len(str(sentence.split(',')).split()) < 8 :
continue
if any(chr.isdigit() for chr in sentence) :
continue
OrgTextOutputForFile= OrgTextOutputForFile+str(sentence.strip())
#print("\\n\\n\\n\\nOrgTextOutputForFile------------->\\n\\n\\n",OrgTextOutputForFile)
return (OrgTextOutputForFile)
except Exception as e:
print("Encountered exception. {0}".format(e))
def getcommonfields():
computeinfrastructure = compute.readComputeConfig()
from appbe.aion_config import settings
usecasetab = settings()
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
context = {'s3buckets':get_s3_bucket(),'gcsbuckets':get_gcs_bucket(),'computeinfrastructure':computeinfrastructure,'kafkaSetting':kafkaSetting,'ruuningSetting':ruuningSetting,'usecasetab':usecasetab,'azurestorage':get_azureStorage()}
return context
def getusercasestatus(request):
if 'UseCaseName' in request.session:
selected_use_case = request.session['UseCaseName']
else:
selected_use_case = 'Not Defined'
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
ModelStatus = 'Not Trained'
return selected_use_case,ModelVersion,ModelStatus
def delimitedsetting(delimiter='',textqualifier='',other=''):
if delimiter != '':
if delimiter.lower() == 'tab' or delimiter.lower() == '\\t':
delimiter = '\\t'
elif delimiter.lower() == 'semicolon' or delimiter.lower() == ';':
delimiter = ';'
elif delimiter.lower() == 'comma' or delimiter.lower() == ',':
delimiter = ','
elif delimiter.lower() == 'space' or delimiter.lower() == ' ':
delimiter = ' '
elif delimiter.lower() == 'other' or other.lower() != '':
if other != '':
delimiter = other
else:
delimiter = ','
elif delimiter != '':
delimiter = delimiter
else:
delimiter = ','
else:
delimiter = ','
if textqualifier == '':
textqualifier = '"'
return delimiter,textqualifier
def multipleZipExtraction(data,DATA_FILE_PATH):
from zipfile import ZipFile
try:
import glob
filetimestamp = str(int(time.time()))
extracted_data = os.path.join(DATA_FILE_PATH, 'extracted_' + filetimestamp)
os.mkdir(extracted_data)
with ZipFile(data, 'r') as zObject:
zObject.extractall(extracted_data)
csv_files = glob.glob(r'{}\\*.{}'.format(extracted_data,'csv'))
df_csv_append = pd.DataFrame()
for file in csv_files:
df = pd.read_csv(file)
df_csv_append = df_csv_append.append(df, ignore_index=True)
for f in os.listdir(extracted_data):
os.remove(os.path.join(extracted_data, f))
#os.mkdir(extracted_data)
combined_data = os.path.join(extracted_data,filetimestamp+'.csv')
df_csv_append.to_csv(combined_data)
return combined_data
except Exception as e:
if os.path.exists(extracted_data):
shutil.rmtree(extracted_data)
#print (e)
return ''
def tarFileExtraction(data,DATA_FILE_PATH):
try:
import tarfile
filetimestamp = str(int(time.time()))
extracted_data = os.path.join(DATA_FILE_PATH, 'extracted_' + filetimestamp)
os.mkdir(extracted_data)
if data.endswith('tar'):
file = tarfile.open(data)
file.extractall(extracted_data)
file.close()
for f in os.listdir(extracted_data):
if f.endswith('csv') or f.endswith('tsv'):
dataFile = os.path.join(extracted_data,f)
return dataFile
except Exception as e:
if os.path.exists(extracted_data):
shutil.rmtree(extracted_data)
print (e)
return ''
# ------ changes for the bug 10379 starts---------------- By Usnish ------
def checkRamAfterLoading(dataPath):
import psutil
availableRam = psutil.virtual_memory()[1]/1e9
filesize = os.path.getsize(dataPath)/1e9
return availableRam < 2*filesize
def checkRamBeforeLoading(dataPath):
import psutil
filesize = os.path.getsize(dataPath)/1e9
totalRam = psutil.virtual_memory()[0] / 1e9
if( filesize > 0.8 * totalRam):
return "File size is larger than the 80% of Total RAM."
return ""
# ------ changes for the bug 10379 ends---------------- By Usnish ------
# ---------- 10012:Decision Threshold related Changes S T A R T ----------
# This method is used to check If ->
# 80% of available RAM size is greater than ingested data (or not).
def checkRAMThreshold(dataPath):
import psutil
availableRam = psutil.virtual_memory()[1]/1e9
filesize = os.path.getsize(dataPath)/1e9
return (0.8 * availableRam) > filesize
# ---------------------- E N D ----------------------
# Text Data Labelling using LLM related changes
# --------------------------------------------------------
def ingestTextData(request, DATA_FILE_PATH):
log = logging.getLogger('log_ux')
try:
Datapath = request.FILES['DataFilePath']
from appbe.eda import ux_eda
ext = str(Datapath).split('.')[-1]
request.session['uploadfiletype'] = 'Local'
request.session['datatype'] = 'Normal'
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
dataPath = dataFile
request.session['textdatapath'] = dataPath
# import pdb
# pdb.set_trace()
# check_df = pd.read_csv(dataPath)
eda_obj = ux_eda(dataPath)
check_df = eda_obj.getdata()
df_top = check_df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
# featuresList = check_df.columns.tolist()
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
noTextFeature = False
if len(textFeature) == 0:
noTextFeature = True
context = {'raw_data':df_json, 'featuresList':textFeature, 'selected':'DataOperations', 'noTextFeature':noTextFeature}
return context
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
context = {'error': 'Failed to read data','emptycsv' : 'emptycsv'}
log.info('Text Data Ingestion -- Error : Failed to read data, '+str(e))
log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return context
# ---------------------- E N D ---------------------------
def ingestDataFromFile(request,DATA_FILE_PATH):
log = logging.getLogger('log_ux')
delimiter,textqualifier = delimitedsetting(request.POST.get('delimiters'),request.POST.get('qualifier'),request.POST.get('delimiters_custom_value'))
request.session['delimiter'] = delimiter
request.session['textqualifier'] = textqualifier
context = getcommonfields()
selected_use_case,ModelVersion,ModelStatus = getusercasestatus(request)
context.update({'selected_use_case': selected_use_case,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,})
try:
t1 = time.time()
request.session['uploadfiletype'] = ''
request.session['uploadLocation'] = ''
data_is_large = False
check_df = pd.DataFrame()
if request.method == 'POST':
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelName' not in request.session:
movenext = False
request.session['currentstate'] = 0
context.update({'tab': 'tabconfigure', 'error': 'Please Create/Select the Use Case First', 'movenext': movenext,'currentstate': request.session['currentstate']})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please Create/Select the Use Case First')
return context
else:
type = request.POST.get("optradio")
if type == "s3Bucket":
try:
request.session['uploadfiletype'] = 'S3Bucket'
bucketname = request.POST.get('s3bucketname')
fileName = request.POST.get('s3file')
if fileName != '':
status,msg,check_df = read_s3_bucket(bucketname,fileName,DATA_FILE_PATH)
if status == 'Success':
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile, index=False)
request.session['datalocation'] = dataFile
else :
request.session['currentstate'] = 0 #usnish
context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + 'sec' + ' : ' + 'Error : ' + str(msg))
return context
else: #usnish
request.session['currentstate'] = 0
context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name')
return context
except Exception as e:
request.session['currentstate'] = 0
context.update({'error': str(e),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+ str(e))
return context
'''request.session['datalocation'] = "S3"'''
# -------------------------------- Graviton-Integration Changes S T A R T --------------------------------
elif type == "graviton":
try:
dataServiceId = request.POST.get('dataservice')
metadataId = request.POST.get('metadata')
data = []
from appbe.aion_config import get_graviton_data
graviton_url,graviton_ |
userid = get_graviton_data()
gravitonURL = graviton_url
gravitonUserId = graviton_userid
# url = 'https://xenius.azurewebsites.net/api/getdata?userid=1&dataserviceid='+str(dataserviceId) +'&metadataid=' +str(metadataId)
url = gravitonURL + 'getdata?userid=' + gravitonUserId +'&dataserviceid='+str(dataServiceId) +'&metadataid=' +str(metadataId)
print(url)
response = requests.get(url)
statuscode = response.status_code
if statuscode == 200:
json_dictionary = json.loads(response.content)
data = json_dictionary['result']
firstElement = next(iter(data[0].keys()))
check_df = pd.DataFrame.from_dict(data[0][firstElement])
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile, index=False)
request.session['uploadfiletype'] = 'Graviton'
request.session['datalocation'] = str(dataFile)
except Exception as e:
print(e)
request.session['currentstate'] = 0
context.update({'error':'Check log file for more details','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error :'+str(e))
return context
# ------------------------------------------------ E N D -------------------------------------------------
elif type == "azurestorage":
try:
request.session['uploadfiletype'] = 'AzureStorage'
azurename = request.POST.get('azurename')
directoryname = request.POST.get('azuredirectory')
if directoryname != '':
status,msg,check_df = read_azureStorage(azurename,directoryname,DATA_FILE_PATH)
if status == 'Success':
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile, index=False)
'''request.session['datalocation'] = "S3"'''
request.session['datalocation'] = dataFile
else :
request.session['currentstate'] = 0 #usnish
context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' +str(msg))
return context
else: #usnish
request.session['currentstate'] = 0
context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name')
return context
except Exception as e:
print(e)
request.session['currentstate'] = 0
context.update({'error': 'File does not exist','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, '+str(e))
return context
elif type == "googleBucket":
try:
request.session['uploadfiletype'] = 'GCPBucket'
bucketname = request.POST.get('gcpbucketname')
fileName = request.POST.get('file1')
if fileName != '':
status,msg,check_df = read_gcs_bucket(bucketname,fileName,DATA_FILE_PATH)
if status == 'Success':
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile, index=False)
'''request.session['datalocation'] = "S3"'''
request.session['datalocation'] = dataFile
else :
request.session['currentstate'] = 0 #usnish
context.update({'error': str(msg),'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+str(msg))
return context
else: #usnish
request.session['currentstate'] = 0
context.update({'error': 'Please provide a file name','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : Please provide a file name')
return context
except Exception as e:
request.session['currentstate'] = 0
context.update({'error': 'File does not exist','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, ' + str(e))
return context
elif type == "url":
try:
request.session['uploadfiletype'] = 'URL'
url_text = request.POST.get('urlpathinput')
log.info('Data ingesttion from URL..')
request.session['uploadLocation'] = url_text
url = url_text
check_df = pd.read_csv(url)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile,index=False)
request.session['datalocation'] = dataFile
except Exception as e:
request.session['currentstate'] = 0
e = str(e)
print(e)
if e.find("tokenizing")!=-1:
error = "This is not an open source URL to access data"
context.update({'error': error, 'ModelVersion': ModelVersion, 'emptycsv': 'emptycsv'})
elif e.find("connection")!=-1:
error = "Can not access the URL through HCL network, please try with other network"
context.update({'error': error, 'ModelVersion': ModelVersion, 'emptycsv': 'emptycsv'})
else:
error = 'Please provide a correct URL'
context.update({'error': error,'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+error + ', '+str(e))
return context
elif type == "nifi":
try:
request.session['uploadfiletype'] = 'Nifi'
log.info('Data ingesttion from Nifi..')
url_text = request.POST.get('nifiurlpathinput')
request.session['uploadLocation'] = url_text
response = requests.get(url_text)
csv_str = response.content.decode('utf-8')
check_df = pd.read_csv(StringIO(csv_str))
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile,index=False)
request.session['datalocation'] = dataFile
except requests.exceptions.ConnectionError:
request.session['currentstate'] = 0
context.update({'error': 'Connection Error','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error:Connection Error')
return context
except Exception as e:
print(e)
request.session['currentstate'] = 0
e = str(e)
context.update({'error': e,'ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+str(e))
return context
elif type == "tblaiondata":
try:
dataset = request.POST.get('datasetname')
print('dataset',dataset)
from appbe.dataPath import DATA_DIR
from appbe.sqliteUtility import sqlite_db
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
temp_data = sqlite_obj.read_data('dataingest')
dataFile = ''
for x in temp_data:
if x[1] == dataset:
dataFile = x[0]
check_df = pd.read_csv(dataFile)
request.session['datalocation'] = dataFile
except Exception as e:
request.session['currentstate'] = 0
context.update({'error': 'Failed to read data','ModelVersion': ModelVersion,'emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : File does not exist, ' + str(e))
return context
else:
if request.FILES:
Datapath = request.FILES['DataFilePath']
if Datapath.size > 31457280:
context.update({'tab': 'tabconfigure','error': 'Upload limit is 30 MB only, use local file option for larger file','currentstate': request.session['currentstate'], 'ModelVersion': ModelVersion})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : Upload limit is 30 MB only, use local file option for larger file')
return context
ext = str(Datapath).split('.')[-1]
request.session['uploadfiletype'] = 'Local'
request.session['datatype'] = 'Normal'
filetimestamp = str(int(time.time()))
if ext.lower() in ['csv','tsv','tar','zip','avro','parquet']:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.'+ext)
else:
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp)
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
destination.close()
dataPath = dataFile
else:
dataPath = request.POST.get('localfilePath')
#print(os.path.getsize(dataPath))
# 10012:Decision Threshold related Changes - S T A R T
#removed few lines related to the check to not allow data to be ingested
# E N D
if request.POST.get('optfiletype') == 'avro':
try:
import pandavro as pdx
if os.path.isdir(dataPath):
for f in os.listdir(dataPath):
if f.endswith('avro'):
processed_df = pdx.read_avro(f)
if not df.empty:
df = df.append(processed_df, ignore_index=True)
else:
df = pd.DataFrame(processed_df)
elif os.path.isfile(dataPath):
import pandavro as pdx
df = pdx.read_avro(dataPath)
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False)
request.session['datalocation'] = str(dataFile)
except Exception as e:
print(e)
elif request.POST.get('optfiletype') == 'parquet':
if os.path.isdir(dataPath):
for f in os.listdir(dataPath):
if f.endswith('parquet'):
processed_df = pd.read_parquet(f, engine='pyarrow')
if not df.empty:
df = df.append(processed_df, ignore_index=True)
else:
df = pd.DataFrame(processed_df)
elif os.path.isfile(dataPath):
df = pd.read_parquet(dataPath, engine='pyarrow')
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False)
request.session['datalocation'] = str(dataFile)
elif request.POST.get('optfiletype') == 'dilimeted':
if os.path.isdir(dataPath):
for f in os.listdir(dataPath):
if f.endswith('csv') or f.endswith('tsv'):
processed_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace')
if not df.empty:
df = df.append(processed_df, ignore_index=True)
else:
df = pd.DataFrame(processed_df)
filetimestamp = str(int(time. |
time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
df.to_csv(dataFile, index=False,sep=delimiter,quotechar=textqualifier)
request.session['datalocation'] = str(dataFile)
else:
dataFile = dataPath
request.session['uploadfiletype'] = 'Local'
request.session['datatype'] = 'Normal'
FileReadingstatus = True
request.session['currentstate'] = 0
if dataPath.endswith('tar'):
dataFile = tarFileExtraction(dataPath,DATA_FILE_PATH)
if dataPath.endswith('zip'):
dataFile = multipleZipExtraction(dataPath,DATA_FILE_PATH)
if dataFile == '':
FileReadingstatus = False
msg = 'Please provide a file name'
elif dataFile.endswith(".xls") or dataFile.endswith(".xlsx"):
FileReadingstatus = False
msg = 'Please provide a dilimited file'
elif not os.path.isfile(dataFile):
FileReadingstatus = False
msg = 'File does not exist'
else:
check_df = pd.DataFrame();
try:
try:
cvobj = csv_validator()
valid_header, validrows, rownumbers = cvobj.validate_header(dataFile,delimiter,textqualifier)
request.session['datalocation'] = str(dataFile)
if not validrows:
FileReadingstatus = False
msg = 'Data Format issue'
else:
if valid_header:
check_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,nrows=100,encoding_errors= 'replace')
request.session['datalocation'] = str(dataFile)
else:
check_df = pd.read_csv(dataFile, header=None, encoding='utf8', prefix='X',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,encoding_errors= 'replace')
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH, 'AION_' + filetimestamp+'.csv')
check_df.to_csv(dataFile, index=False)
request.session['datalocation'] = str(dataFile)
except Exception as e:
print(e)
check_df = pd.read_csv(dataFile, encoding='utf8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,nrows=100)
request.session['datalocation'] = str(dataFile)
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+str(e))
except UnicodeDecodeError:
FileReadingstatus = False
msg = 'Only utf8 file encoding supported'
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error:'+msg)
except pd.errors.EmptyDataError:
FileReadingstatus = False
msg = 'File is empty'
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error:'+msg)
except pd.errors.ParserError:
FileReadingstatus = False
msg = 'File Parsng Error'
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+msg)
except FileNotFoundError:
FileReadingstatus = False
msg = 'File does not exist'
request.session['currentstate'] = 0
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : '+msg)
except Exception as e:
msg = 'File Read Error'
FileReadingstatus = False
print(e)
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(
ModelVersion) + ' : ' + '0 ' + 'sec' + ' : ' + 'Error : ' + msg+', '+str(e))
if check_df.empty and FileReadingstatus:
FileReadingstatus = False
msg = 'Date file is empty'
if not FileReadingstatus:
context.update({'tab': 'tabconfigure','error': msg,'currentstate': request.session['currentstate'], 'ModelVersion': ModelVersion})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : '+msg)
return context
# -------------------------------- 10012:Decision Threshold related Changes S T A R T -------------------------------
data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation'])
msg = ""
if data_is_under_RAM_threshold == False:
msg = "AION will not be able to train on data set provided as it is bigger than available RAM, Please choose distributed learning for further processing."
# ------------------------------------------------------ E N D ------------------------------------------------------
check_df.rename(columns=lambda x: x.strip(), inplace=True)
featuresList = check_df.columns.tolist()
numberoffeatures = len(featuresList)
imp_features = getimpfeatures(dataFile,numberoffeatures,delimiter,textqualifier)
samplePercentage = 100
samplePercentval = 0
showRecommended = False
sample_size = int(eda_setting())
dflength = len(check_df)
if dflength > sample_size:
samplePercentage = round(float((sample_size/dflength) * 100),2)
samplePercentval = samplePercentage / 100
showRecommended = True
df_top = check_df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
statusmsg = 'Data File Uploaded Successfully '
request.session['currentstate'] = 0
request.session['finalstate'] = 0
request.session['datatype'] = 'Normal'
records = check_df.shape[0]
request.session['NoOfRecords'] = records
statusmsg = 'Data File Uploaded Successfully'
t2 = time.time()
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + str(
round(t2 - t1)) + ' sec' + ' : ' + 'Success')
# EDA Subsampling changes
context.update({'range':range(1,101),'samplePercentage':samplePercentage, 'samplePercentval':samplePercentval, 'showRecommended':showRecommended,'featuresList': featuresList,'tab': 'tabconfigure', 'data': df_json, 'status_msg': statusmsg,
'selected': 'modeltraning','imp_features':imp_features,'numberoffeatures':numberoffeatures,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'exploratory': False})
if msg!="":
context.update({'data_size_alert': msg})
return context
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
request.session['currentstate'] = 0
context.update({'error': 'Failed to read data','emptycsv' : 'emptycsv'})
log.info('Data Ingestion : ' + str(selected_use_case) + ' : ' + str(ModelVersion) + ' : ' + '0' + ' sec' + ' : ' + 'Error : Failed to read data, '+str(e))
log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return context
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os,sys
import json
import platform
import subprocess
def kafka_setting():
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf'))
f = open(file_path, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
from os.path import expanduser
home = expanduser("~")
if platform.system() == 'Windows':
DEPLOY_LOCATION = os.path.join(home,'AppData','Local','HCLT','AION','target','kafka')
else:
DEPLOY_LOCATION = os.path.join(home,'HCLT','AION','target','kafka')
configSettingsJson['kafkalocation'] = DEPLOY_LOCATION
return(configSettingsJson)
def start_tracking():
from appbe.dataPath import DEPLOY_LOCATION
import platform
mlflowpath = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','..','Scripts','mlflow.exe'))
script_path = os.path.normpath(os.path.join(os.path.dirname(__file__),'..','..','..','..','Scripts'))
#Updating path for system environment; Bug-13835
os.environ['PATH']= os.environ['PATH']+ ';'+ str(script_path)
DEPLOY_LOCATION = os.path.join(DEPLOY_LOCATION,'mlruns')
if platform.system() == 'Windows':
subprocess.Popen([sys.executable, mlflowpath,"ui", "--backend-store-uri","file:///"+DEPLOY_LOCATION])
else:
subprocess.Popen(['mlflow',"ui","-h","0.0.0.0","--backend-store-uri","file:///"+DEPLOY_LOCATION])
def aion_tracking():
status = 'Success'
import requests
try:
response = requests.get('http://localhost:5000')
if response.status_code != 200:
status = 'Error'
except Exception as inst:
print(inst)
status = 'Error'
return status
def aion_service():
try:
if platform.system() == 'Windows':
nooftasks = getrunningstatus('AION_Service')
else:
nooftasks = getrunningstatus('run_service')
if len(nooftasks):
status = 'Running'
else:
if platform.system() == 'Windows':
servicepath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','sbin','AION_Service.bat'))
os.system('start cmd /c "'+servicepath+'"')
else:
servicepath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','bin','run_service.py'))
subprocess.Popen([sys.executable,servicepath])
status = 'Started'
except Exception as inst:
print(inst)
status = 'Error'
return status
def getrunningstatus(name):
try:
taskdetails = []
if platform.system() == 'Windows':
r = ([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq conhost.exe"').decode('UTF-8').splitlines()])
r.append([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq cmd.exe"').decode('UTF-8').splitlines()])
else:
r = ([line.split() for line in subprocess.check_output("ps -ef | grep .py",shell=True).decode('UTF-8').splitlines()])
for i in range(len(r)):
s = r[i]
if any(name in j for j in s):
taskdetails.append('Yes')
break
return (taskdetails)
except Exception as inst:
print(inst)
status = 'Error'
return status
def getTasks(mlflow,consumer,service):
mlflowlist = []
consumerlist=[]
servicelist = []
#r = os.popen('tasklist /v').read().strip().split('\\n')
try:
if platform.system() == 'Windows':
r = ([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq conhost.exe"').decode('UTF-8').splitlines()])
r.append([line.split() for line in subprocess.check_output('tasklist /v /FI "IMAGENAME eq cmd.exe"').decode('UTF-8').splitlines()])
else:
r = ([line.split() for line in subprocess.check_output("ps -ef | grep .py",shell=True).decode('UTF-8').splitlines()])
except Exception as e:
print(e)
r = []
#print(r)
#print ('# of tasks is %s' % (len(r)))
for i in range(len(r)):
s = r[i]
if any(mlflow in j for j in s):
mlflowlist.append('Yes')
if any(consumer in j for j in s):
consumerlist.append('Yes')
if any(service in j for j in s):
servicelist.append('Yes')
return (mlflowlist,consumerlist,servicelist)
def running_setting():
otherApps = {}
if platform.system() == 'Windows':
mlflowlist,consumerlist,servicelist = getTasks('AION_MLFlow','AION_Consumer','AION_Service')
else:
mlflowlist,consumerlist,servicelist = getTasks('run_mlflow','AION_Consumer','run_service')
if len(mlflowlist):
otherApps['modeltracking'] = 'Running'
else:
|
otherApps['modeltracking'] = 'Not Running'
#nooftasks = getTasks('AION_Consumer')
if len(consumerlist):
otherApps['consumer'] = 'Running'
else:
otherApps['consumer'] = 'Not Running'
#nooftasks = getTasks('AION_Service')
if len(servicelist):
otherApps['service'] = 'Running'
else:
otherApps['service'] = 'Not Running'
return(otherApps)
#EDA Performance change
# ----------------------------
def eda_setting():
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','eda.config')
sample_size=''
try:
if(os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
for line in read.splitlines():
if 'sample_size=' in line:
sample_size = line.split('=',1)[1]
except Exception as inst:
pass
return(sample_size)
def get_telemetryoptout():
telemetryoptuout = "No"
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
try:
if sqlite_obj.table_exists('settings'):
data = sqlite_obj.read_data('settings')
for values in data:
telemetryoptuout = values[7]
else:
telemetryoptuout = 'No'
except Exception as e:
print(e)
telemetryoptuout ='No'
return telemetryoptuout
def get_edafeatures():
No_of_Permissible_Features_EDA = ""
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
try:
if sqlite_obj.table_exists('settings'):
data = sqlite_obj.read_data('settings')
for values in data:
No_of_Permissible_Features_EDA = values[3]
else:
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config')
if (os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
for line in read.splitlines():
if 'No_of_Permissible_Features_EDA=' in line:
No_of_Permissible_Features_EDA = line.split('=', 1)[1]
except Exception as e:
print(e)
No_of_Permissible_Features_EDA =20
return No_of_Permissible_Features_EDA
def get_graviton_data():
graviton_url = ""
graviton_userid = ""
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
try:
if sqlite_obj.table_exists('settings'):
data = sqlite_obj.read_data('settings')
for values in data:
graviton_url = values[0]
graviton_userid = values[1]
else:
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config')
if (os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
for line in read.splitlines():
if 'graviton_url=' in line:
graviton_url = line.split('=', 1)[1]
if 'graviton_userid=' in line:
graviton_userid = line.split('=', 1)[1]
except Exception as e:
print(e)
graviton_url = ""
graviton_userid = ""
return graviton_url,graviton_userid
def get_llm_data():
apiKeyIdLLM = ""
apiUrlLLM = ""
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
try:
if sqlite_obj.table_exists('openai'):
data = sqlite_obj.read_data('openai')[0]
param_keys = ['api_type','api_key','api_base','api_version']
openai_data = dict((x,y) for x,y in zip(param_keys,data))
return openai_data['api_key'],openai_data['api_base'],openai_data['api_type'],openai_data['api_version']
except Exception as e:
print(e)
apiKeyIdLLM = ""
apiUrlLLM = ""
return apiKeyIdLLM,apiUrlLLM,"",""
def settings():
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','aion.config')
usecase='disable'
graviton_url = ''
graviton_userid = ''
apiKeyIdLLM = ''
apiUrlLLM = ''
No_of_Permissible_Features_EDA = ''
try:
from appbe.sqliteUtility import sqlite_db
import pandas as pd
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('settings'):
column_names = sqlite_obj.column_names('settings')
data = sqlite_obj.read_data('settings')
if 'telemetryOptOut' not in column_names:
query = 'Alter Table settings ADD telemetryOptOut TEXT'
sqlite_obj.execute_query(query)
if 'No_of_Permissible_Features_EDA' not in column_names or 'apiKeyIdLLM' not in column_names:
sqlite_obj.drop_table('settings')
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config')
file = open(configfilepath, "r")
dataread = file.read()
for line in dataread.splitlines():
if 'usecase=' in line:
cusecase = line.split('=', 1)[1]
if 'graviton_url=' in line:
cgraviton_url = line.split('=', 1)[1]
if 'graviton_userid=' in line:
cgraviton_userid = line.split('=', 1)[1]
if 'No_of_Permissible_Features_EDA=' in line:
cNo_of_Permissible_Features_EDA = line.split('=', 1)[1]
if 'apiKeyIdLLM=' in line:
capiKeyIdLLM = ''
if 'apiUrlLLM=' in line:
capiUrlLLM = ''
file.close()
if 'apiKeyIdLLM' not in column_names:
apiKeyIdLLM = capiKeyIdLLM
if 'apiUrlLLM' not in column_names:
apiUrlLLM = capiUrlLLM
if 'No_of_Permissible_Features_EDA' not in column_names:
No_of_Permissible_Features_EDA = cNo_of_Permissible_Features_EDA
newdata = {}
newdata.update({'graviton_url':[data[0][0]],'graviton_userid': [data[0][1]],'usecase': [data[0][2]],'No_of_Permissible_Features_EDA':[No_of_Permissible_Features_EDA],'settingsid':['1'],'apiKeyIdLLM' :apiKeyIdLLM,'apiUrlLLM':apiUrlLLM,'telemetryOptOut':telemetryOptOut})
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'settings')
data = sqlite_obj.read_data('settings')
for values in data:
graviton_url = values[0]
graviton_userid = values[1]
usecase = values[2]
No_of_Permissible_Features_EDA = values[3]
telemetryOptOut = values[7]
else:
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'aion.config')
if (os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
apiKeyIdLLM = ''
apiUrlLLM = ''
for line in read.splitlines():
if 'usecase=' in line:
usecase = line.split('=', 1)[1]
if 'graviton_url=' in line:
graviton_url = line.split('=', 1)[1]
if 'graviton_userid=' in line:
graviton_userid = line.split('=', 1)[1]
if 'No_of_Permissible_Features_EDA=' in line:
No_of_Permissible_Features_EDA = line.split('=', 1)[1]
newdata = {}
newdata.update({'graviton_url':[graviton_url],'graviton_userid': [graviton_userid],'usecase': [usecase],'No_of_Permissible_Features_EDA':[No_of_Permissible_Features_EDA],'settingsid':['1'],'apiKeyIdLLM' :'','apiUrlLLM':'','telemetryOptOut':['No']})
# --------else create table and update the data, write data will create a table if it does nt exists-----
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'settings')
return(usecase)
except Exception as e:
print(e)
configfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','config','aion.config')
try:
if(os.path.isfile(configfilepath)):
file = open(configfilepath, "r")
read = file.read()
file.close()
for line in read.splitlines():
if 'usecase=' in line:
usecase = line.split('=',1)[1]
if 'graviton_url=' in line:
graviton_url = line.split('=',1)[1]
if 'graviton_userid=' in line:
graviton_userid = line.split('=',1)[1]
if 'No_of_Permissible_Features_EDA=' in line:
No_of_Permissible_Features_EDA = line.split('=', 1)[1]
if 'apiKeyIdLLM=' in line:
apiKeyIdLLM = line.split('=', 1)[1]
if 'apiUrlLLM=' in line:
apiUrlLLM = line.split('=', 1)[1]
except Exception as inst:
pass
external_system = 'enable'
semantico = 'enable'
return(usecase)
def addKafkaModel(request,datalocation):
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','config','kafkaConfig.conf'))
f = open(file_path, "r+")
configSettings = f.read()
configSettingsJson = json.loads(configSettings)
modelSignature = request.POST.get('modelsignature')
timeframe = request.POST.get('timeframe')
command = request.POST.get('kafkasubmit')
if command.lower() == 'configure':
configSettingsJson['timeFrame'][modelSignature] = str(timeframe)
configSettingsJson['trainingDataLocation'][modelSignature] = datalocation
elif command.lower() == 'unconfigure':
del configSettingsJson['timeFrame'][modelSignature]
updatedConfigSettingsJson = json.dumps(configSettingsJson)
f.seek(0)
f.write(updatedConfigSettingsJson)
f.truncate()
f.close()
def saveopenaisettings(request):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
if sqlite_obj.table_exists('openai'):
updated_data = 'api_type="'+request.POST.get('api_type')+'",api_key="'+request.POST.get('apiKeyIdLLM')+'",api_base="'+request.POST.get('apiUrlLLM')+'",api_version="'+request.POST.get('api_version')+'"'
sqlite_obj.update_data(updated_data,'','openai')
else:
newdata = {}
newdata.update({'api_type':['azure'],'api_key': [request.POST.get('apiKeyIdLLM')],'api_base': [request.POST.get('apiUrlLLM')],'api_version':[request.POST.get('api_version')]})
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'openai')
except Exception as e:
print(e)
def savegravitonconfig(request):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
updated_data = 'graviton_url="'+request.POST.get('graviton_url')+'",graviton_userid="'+request.POST.get('graviton_userid')+'"'
sqlite_obj.update_data(updated_data,'settingsid=1','settings')
except Exception as e:
print(e)
def saveconfigfile(request):
try:
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
import pandas as pd
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'config.db')
updated_data = 'usecase="'+request.POST |
.get('usecasetab')+'",No_of_Permissible_Features_EDA="'+request.POST.get('edefeatures')+'",telemetryOptOut="'+request.POST.get('telemetryOptOut')+'"'
print(updated_data)
sqlite_obj.update_data(updated_data,'settingsid=1','settings')
return request.POST.get('usecasetab')
except Exception as e:
print(e)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from wordcloud import WordCloud, STOPWORDS
import pandas as pd
import numpy as np
from appbe import distribution
import io
import urllib
import os
import sys
import base64
from appbe import help_Text as ht
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from natsort import natsorted
from sklearn.cluster import KMeans
import json
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
from appbe.aion_config import eda_setting
from dython.nominal import associations
def calculateNumberofCluster(featureData):
Sum_of_squared_distances = []
K = range(1, 15)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(featureData)
Sum_of_squared_distances.append(km.inertia_)
x1, y1 = 1, Sum_of_squared_distances[0]
x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances) - 1]
distances = []
for inertia in range(len(Sum_of_squared_distances)):
x0 = inertia + 2
y0 = Sum_of_squared_distances[inertia]
numerator = abs((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1)
denominator = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
distances.append(numerator / denominator)
n_clusters = distances.index(max(distances)) + 2
#print(n_clusters)
return (n_clusters)
def get_eda(request):
hopkins_val = ''
hopkins_tip = ''
if request.session['datatype'] == 'Normal':
from appbe.eda import ux_eda
# EDA Subsampling changes
# ----------------------------
edasampleSize = request.POST.get('SubsampleSize')
edasampleSize = str(int(edasampleSize)/100)
sampleFile = str(request.session['datalocation'])
repText = sampleFile[sampleFile.find('sub_'):sampleFile.find('_sampled_') + 9]
if len(repText) == 30:
dataLocation = sampleFile.replace(repText,"")
else:
dataLocation = sampleFile
eda_obj = ux_eda(dataLocation,request.session['delimiter'],request.session['textqualifier'])
df0 = eda_obj.getdata()
if os.path.isfile(dataLocation):
if(len(edasampleSize) > 0):
df0 = df0.sample(frac = float(edasampleSize))
#EDA Performance change
# ----------------------------
dflength = len(df0)
# sample_size = int(eda_setting())
# if dflength >= sample_size:
# eda_obj.subsampleData(sample_size)
# else:
eda_obj.subsampleData(dflength)
# ----------------------------
TrainSampleSelected = request.POST.get('TrainSampleSize')
if(TrainSampleSelected == 'EDASize'):
from pathlib import Path
filePath = Path(dataLocation)
import datetime
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
timestamp = str(timestamp.replace(":",""))
sub_sampledFile = filePath.parent/("sub_" + timestamp + "_sampled_"+filePath.name)
# sub_sampledFile = filePath.parent/(usename + "_sub_sampled_"+filePath.name)
df0.to_csv(sub_sampledFile,index=False,)
request.session['datalocation'] = str(sub_sampledFile)
records = df0.shape[0]
request.session['NoOfRecords'] = records
edaFeatures = request.POST.getlist('InputFeatures')
request.session['edaFeatures'] = edaFeatures
if(len(edaFeatures) > 0):
eda_obj.subsetFeatures(edaFeatures)
# ----------------------------
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
request.session['edanumericCatFeatures'] = numericCatFeatures
request.session['edatextFeature'] = textFeature
categoricalfeatures = catfeatures
numericfeaturecount = eda_obj.getNumericFeatureCount()
cluster_details = []
dataCharts = []
# correlated_features=[]
pca_details = []
if numericfeaturecount > 1:
try:
cluster_details,hopkins_val = eda_obj.getClusterDetails()
if hopkins_val!='':
if float(hopkins_val) <0.3:
hopkins_tip = ht.hopkins_tip[0]
elif float(hopkins_val)>0.7:
hopkins_tip = ht.hopkins_tip[2]
else:
hopkins_tip = ht.hopkins_tip[1]
else:
hopkins_tip = ''
except Exception as e:
print("========================"+str(e))
pass
try:
pca_map = eda_obj.getPCATop10Features()
pca_details = pca_map
yaxis_data = pca_map.tolist()
xaxis_data = pca_map.index.values.tolist()
import plotly.graph_objects as go
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance'))
cfig.update_layout(barmode='stack', xaxis_title='Features',yaxis_title='Explained Variance Ratio')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000)
dataCharts.append(bargraph)
except:
pass
df = eda_obj.getdata()
# try:
# top5highcorr = eda_obj.getHighlyCorrelatedFeatures(5)
# correlated_features = getHighlyCorrelatedFeatureCharts(df,top5highcorr)
# except:
# pass
else:
df = eda_obj.getdata()
# # EDA Subsampling changes
# # ----------------------------
# if os.path.isfile(dataLocation):
# if dflength < 10000:
# if(len(edasampleSize) > 0):
# df = df.sample(frac = float(edasampleSize))
# ----------------------------
if len(textFeature) > 0:
commonfeatures = eda_obj.getTopTextFeatures(10)
# comment_words = eda_obj.word_token()
del eda_obj
wordcloudpic = ''
showtextFeature = False
if len(textFeature) > 0:
showtextFeature = True
# try:
# stopwords = set(STOPWORDS)
# wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords,
# min_font_size=10).generate(comment_words)
# try:
# plt.clf()
# except:
# pass
# plt.imshow(wordcloud, interpolation='bilinear')
# plt.axis("off")
# plt.tight_layout(pad=0)
# image = io.BytesIO()
# plt.savefig(image, format='png')
# image.seek(0)
# string = base64.b64encode(image.read())
# wordcloudpic = 'data:image/png;base64,' + urllib.parse.quote(string)
# except:
# pass
xaxis_data = commonfeatures['most_common_words'].tolist()
yaxis_data = commonfeatures['freq'].tolist()
import plotly.graph_objects as go
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance'))
cfig.update_layout(barmode='stack', xaxis_title='Features',yaxis_title='Count')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000)
dataCharts.append(bargraph)
df_top = df.head(10)
df_json = df_top.to_json(orient="records")
df_json = json.loads(df_json)
# if len(df) > 10000:
# df1 = df.sample(n=10000, random_state=1)
# else:
# df1 = df
df1 = df
data_deep_json = df_top.to_json(orient='records') #df1.to_json(orient='records')
try:
gfsg = GenericFeatureStatisticsGenerator()
proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': df1}])
protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8")
except Exception as e:
protostr=''
print('protostr '+str(e))
try:
correlationgraph = getCorrelationMatrix(df)
except Exception as e:
print(e)
try:
dataDrift = 'onRequest' #getDriftDistribution(numericCatFeatures, df[numericCatFeatures])
except Exception as e:
dataDrift = ''
print(e)
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
statusmsg = 'Successfully Done'
DF_list = list()
des1 = df.describe(include='all').T
des1['missing count %'] = df.isnull().mean() * 100
des1['zero count %'] = df.isin([0]).mean() * 100
data = list(df.columns.values)
des1.insert(0, 'Features', data)
des1 = des1.to_json(orient="records")
pca_df=pd.DataFrame()
#print(pca_details)
# if pca_details.empty:
if len(pca_details) > 0:
pca_df = pd.DataFrame({'Feature':pca_details.index, 'Explained Variance Ratio':pca_details.values}).round(4)
pca_df = pca_df.to_json(orient="records")
if len(df.columns) > 25:
df3 = df[df.columns[0:24]]
else:
df3 = df.copy()
#cor_mat = abs(df3.corr())
#cor_mat = cor_mat.round(2)
try:
if len(df3.columns) > 25:
df3 = df3[df3.columns[0:24]]
cor_mat= associations(df3,compute_only=True)
cor_mat=cor_mat['corr']
#cor_mat = df3.corr()
cor_mat = cor_mat.astype(float).round(2)
except Exception as e:
print("creating correlation mat issue: \\n",e)
pass
data = list(cor_mat.index)
cor_mat.insert(0, 'Features', data)
cor_mat = cor_mat.to_json(orient="records")
cluster_df = pd.DataFrame.from_dict(cluster_details)
cluster_df = cluster_df.to_json(orient="records")
#textFeature = json.dumps(textFeature)
# 2.2 patch changes
#-------------------------------------------------
request.session['edaRecords'] = df.shape[0]
print(textFeature)
context = {'data_deep_json': data_deep_json, 'sampleFile':sampleFile,'protostr': protostr, 'data': df_json, 'oneda': True,
'dataCharts': dataCharts,'dataDrift': dataDrift, 'drift_tip': ht.drift_tip,'des1':des1,'cluster_df':cluster_df,'hopkins_val':hopkins_val,
'pca_df':pca_df,'cor_mat':cor_mat,'correlationgraph': correlationgraph, 'centroids':cluster_details, 'wordcloudpic': wordcloudpic, 'showtextFeature': showtextFeature, 'textFeature': textFeature,
# 'featurepairgraph': correlated_features,
'data_overview_tip': ht.data_overview_tip,'timeseries_analysis_tip':ht.timeseries_analysis_tip, 'feature_importance_tip': ht.feature_importance_tip,'hopkins_tip':hopkins_tip,
'correlation_analysis_tip': ht.correlation_analysis_tip,
'exploratory_analysis_tip': ht.exploratory_analysis_tip, 'data_deep_drive_tip': ht.data_deep_drive_tip,'status_msg': statusmsg,'selected_use_case': selected_use_case,
'pair_graph_tip':ht.pair_graph_tip, 'fair_metrics_tip':ht.fair_metrics_tip, 'categoricalfeatures':categoricalfeatures, 'numericCatFeatures':numericCatFeatures,
'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,'selected': 'modeltraning',
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],'exploratory':True,'NumericFeatureList':numericFeature,'dateFeature':dateFeature,'targetFeature':targetFeature}
return(context)
# EDA Visualization changes
# ---------------------------- |
def get_edaGraph(request):
if request.session['datatype'] == 'Normal':
from appbe.eda import ux_eda
df_temp = dict(request.GET).get('features[]')
graphType = request.GET.get('graphType')
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
dataLocation = request.session['datalocation']
eda_obj = ux_eda(dataLocation)
# 2.2 patch changes
#-------------------------------------------------
edaRecords = request.session['edaRecords']
#df = df.sample(n=int(edaRecords), random_state=1)
eda_obj.subsampleData(edaRecords)
eda_obj.subsetFeatures(df_temp)
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature, catfeatures = eda_obj.getFeatures()
numericfeaturecount = eda_obj.getNumericFeatureCount()
correlated_features=[]
df = eda_obj.getdata()
if numericfeaturecount > 1:
try:
if graphType == 'Default':
top5highcorr = eda_obj.getHighlyCorrelatedFeatures(5)
correlated_features = getHighlyCorrelatedFeatureCharts(df,top5highcorr)
else:
correlated_features = getFeatureCharts(df,graphType,d3_url,mpld3_url)
except:
pass
return correlated_features
# ----------------------------
# ---------------------- 12686:Data Distribution related Changes S T A R T ----------------------
def get_DataDistribution(request):
selectedFeature = request.GET.get('selected_feature')
_featureItem = []
_featureItem.append(selectedFeature)
from appbe.eda import ux_eda
dataLocation = request.session['datalocation']
eda_obj = ux_eda(dataLocation)
df = eda_obj.getdata()
numericCatFeatures = request.session['edanumericCatFeatures']
textFeature = request.session['edatextFeature']
# features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
dataDrift = ''
if selectedFeature in numericCatFeatures:
dataDrift = getDriftDistribution(_featureItem, df[numericCatFeatures])
elif selectedFeature in textFeature:
try:
comment_words = eda_obj.word_token_for_feature(selectedFeature, df[_featureItem])
stopwords = set(STOPWORDS)
wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords,
min_font_size=10).generate(comment_words)
try:
plt.clf()
except:
pass
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.tight_layout(pad=0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
# wordcloudpic = 'data:image/png;base64,' + urllib.parse.quote(string)
dataDrift = urllib.parse.quote(string)
except:
dataDrift = ''
del eda_obj
return dataDrift
# -------------------------------------------- E N D --------------------------------------------
def get_DeepDiveData(request):
if request.session['datatype'] == 'Normal':
from appbe.eda import ux_eda
dataLocation = request.session['datalocation']
eda_obj = ux_eda(dataLocation)
edaRecords = request.session['edaRecords']
edaFeatures = request.session['edaFeatures']
eda_obj.subsampleData(edaRecords)
eda_obj.subsetFeatures(edaFeatures)
df = eda_obj.getdata()
data_deep_json = df.to_json(orient='records')
return (data_deep_json)
# Fairness Metrics changes
# ----------------------------
def get_fairmetrics(request):
import mpld3
if request.session['datatype'] == 'Normal':
from appbe.eda import ux_eda
df_temp = dict(request.GET).get('features[]')
d3_url = request.GET.get('d3_url')
mpld3_url = request.GET.get('mpld3_url')
global metricvalue
metricvalue = request.GET.get('metricvalue')
dataLocation = request.session['datalocation']
# dataLocation = 'C:\\\\MyFolder\\\\AION\\\\AION Datasets\\\\AIF360\\\\database.csv'
eda_obj = ux_eda(dataLocation, optimize=1)
features,dateFeature,seqFeature,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catfeatures = eda_obj.getFeatures()
# data = eda_obj.getdata()
data = pd.read_csv(dataLocation, na_values=['Unknown', ' '])
features_toEncode = features
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
data_encoded = data.copy()
categorical_names = {}
encoders = {}
# Use Label Encoder for categorical columns (including target column)
for feature in features_toEncode:
le = LabelEncoder()
le.fit(data_encoded[feature])
data_encoded[feature] = le.transform(data_encoded[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
data_perp = data_encoded
protected_feature = df_temp[0] #'Victim Race'
target_feature = df_temp[1] #'Perpetrator Sex'
# ------Theil index----- Task->13843
from aif360.sklearn.metrics import generalized_entropy_index
Ti_List = []
for items in categorical_names[protected_feature]:
df = data[data[protected_feature]==items]
le = LabelEncoder()
le.fit(df[target_feature])
df[target_feature] = le.transform(df[target_feature])
tf = generalized_entropy_index(df[target_feature], alpha = 1)
tf = round(tf, 4)
Ti_List.append(tf)
global Thi_idx
Thi_idx = Ti_List
#claas_size = categorical_names[protected_feature].size
new_list = [item for item in categorical_names[protected_feature] if not(pd.isnull(item)) == True]
claas_size = len(new_list)
if claas_size > 10:
return 'HeavyFeature'
metrics = fair_metrics(categorical_names, data_perp, protected_feature, target_feature, claas_size)
figure = plot_fair_metrics(metrics)
html_graph = mpld3.fig_to_html(figure,d3_url=d3_url,mpld3_url=mpld3_url)
return html_graph
def fair_metrics(categorical_names, data_perp, protected_feature, target_feature, claas_size):
import aif360
from aif360.datasets import StandardDataset
from aif360.metrics import BinaryLabelDatasetMetric
cols = [metricvalue]
obj_fairness = [[0]]
fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)
for indx in range(claas_size):
priv_group = categorical_names[protected_feature][indx]
privileged_class = np.where(categorical_names[protected_feature] == priv_group)[0]
data_orig = StandardDataset(data_perp,
label_name=target_feature,
favorable_classes=[1],
protected_attribute_names=[protected_feature],
privileged_classes=[privileged_class])
dataset_pred = data_orig
attr = dataset_pred.protected_attribute_names[0]
idx = dataset_pred.protected_attribute_names.index(attr)
privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}]
unprivileged_size = dataset_pred.unprivileged_protected_attributes[0].size
unprivileged_groups = []
for idx2 in range(unprivileged_size):
unprivileged_groups.extend([{attr:dataset_pred.unprivileged_protected_attributes[idx][idx2]}])
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
if metricvalue == "Theil Index":
row = pd.DataFrame([Thi_idx[indx]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Disparate Impact":
row = pd.DataFrame([[metric_pred.disparate_impact()]],
columns = cols ,
index = [priv_group])
elif metricvalue == "Statistical Parity Difference":
row = pd.DataFrame([[metric_pred.mean_difference()]],
columns = cols ,
index = [priv_group])
#fair_metrics = fair_metrics.append(row)
fair_metrics = pd.concat([fair_metrics,row])
return fair_metrics
def plot_fair_metrics(fair_metrics):
import matplotlib.patches as patches
plt.style.use('default')
import seaborn as sns
fig, ax = plt.subplots(figsize=(10,4), ncols=1, nrows=1)
plt.subplots_adjust(
left = 0.125,
bottom = 0.1,
right = 0.9,
top = 0.9,
wspace = .5,
hspace = 1.1
)
y_title_margin = 1.2
plt.suptitle("Fairness metrics", y = 1.09, fontsize=20)
sns.set(style="dark")
cols = fair_metrics.columns.values
obj = fair_metrics.loc['objective']
if metricvalue == "Theil Index":
size_rect = [0.5]
rect = [-0.1]
bottom = [-0.1]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Disparate Impact":
size_rect = [0.4]
rect = [0.8]
bottom = [0]
top = [2]
bound = [[-0.1,0.1]]
elif metricvalue == "Statistical Parity Difference":
size_rect = [0.2]
rect = [-0.1]
bottom = [-1]
top = [1]
bound = [[-0.1,0.1]]
#display(Markdown("### Check bias metrics :"))
#display(Markdown("A model can be considered bias if just one of these five metrics show that this model is biased."))
for attr in fair_metrics.index[0:len(fair_metrics)].values:
#display(Markdown("#### For the %s attribute :"%attr))
check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,1)]
#display(Markdown("With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics"%(5 - sum(check))))
for i in range(0,1):
plt.subplot(1, 1, i+1)
xx = fair_metrics.index[1:len(fair_metrics)].values.tolist()
yy = fair_metrics.iloc[1:len(fair_metrics)][cols[i]].values.tolist()
palette = sns.color_palette('husl', len(xx))
ax = sns.pointplot(x=fair_metrics.index[1:len(fair_metrics)], y=yy, palette=palette, hue=xx)
index = 0
for p in zip(ax.get_xticks(), yy):
if (p[1] > 2.0):
_color = palette.as_hex()[index]
_val = 'Outlier(' + str(round(p[1],3)) + ')'
ax.text(p[0]-0.5, 0.02, _val, color=_color)
else:
ax.text(p[0], p[1]+0.05, round(p[1],3), color='k')
index = index + 1
plt.ylim(bottom[i], top[i])
plt.setp(ax.patches, linewidth=0)
ax.get_xaxis().set_visible(False)
ax.legend(loc='right', bbox_to_anchor=(1, 0.8), ncol=1)
ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor="green", linewidth=1, linestyle='solid'))
# plt.axhline(obj[i], color='black', alpha=0.3)
plt.title(cols[i], fontname="Times New Roman", size=20,fontweight="bold")
ax.set_ylabel('')
ax.set_xlabel('')
return fig
# ----------------------------
def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()):
try:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy
from scipy import stats
from scipy.stats import norm
import matplotlib.gridspec as gridspec
import math
import io, base64, urllib
np.seterr(divide='ignore', invalid='ignore')
from appbe.eda import ux_eda
eda_obj = ux_eda()
try:
plt.clf()
except:
pass
plt.rcParams.update({'figure.max_open_warning': 0})
sns.set(color_codes=True)
pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
if len(feature) > 4:
numneroffeatures = len(feature)
plt.figure(figsize=(10, numneroffeatures*2))
else:
plt.figure(fig |
size=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
dataframe[i[1]] = dataframe[i[1]].astype(int)
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0])
else:
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean())
plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1)
plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1)
distname, sse = eda_obj.DistributionFinder(dataframe[i[1]])
try:
ax = sns.distplot(dataframe[i[1]], label=distname)
ax.legend(loc='best')
if newdataframe.empty == False:
dataType = newdataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]])
newdataframe[i[1]] = newdataframe[i[1]].cat.codes
newdataframe[i[1]] = newdataframe[i[1]].astype(int)
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0])
else:
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean())
distname, sse = distribution.DistributionFinder(newdataframe[i[1]])
ax = sns.distplot(newdataframe[i[1]], label=distname)
ax.legend(loc='best')
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
pass
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def getCategoryWordCloud(df):
labels = df.Label.unique()
df_output = pd.DataFrame()
tcolumns=['text']
for label in labels:
df2 = df[df['Label'] == label]
df2 = df2.reset_index()
wordcloud,df_text = getWordCloud(df2,tcolumns)
newrow = {'Label':label,'wordCloud':wordcloud}
df_output = df_output.append(newrow,ignore_index=True)
return(df_output)
def getHighlyCorrelatedFeatureCharts(df, df_top):
numOfRows = df.shape[0]
cratio = 0.01
if (numOfRows < 1000):
cratio = 0.2
elif (numOfRows < 10000):
cratio = 0.1
elif (numOfRows < 100000):
cratio = 0.01
barcolor = ["red", "green", "blue", "goldenrod", "magenta"]
ffig = make_subplots(rows=2, cols=3)
height = 800
rowno = 1
colno = 1
featureCharts = []
try:
for index, row in df_top.iterrows():
feature1 = row['FEATURE_1']
feature2 = row['FEATURE_2']
df_temp = df[[feature1, feature2]]
feature1data = df_temp[feature1]
feature2data = df_temp[feature2]
nUnique = len(feature1data.unique().tolist())
if nUnique / numOfRows >= cratio:
feature1type = 'Continous'
else:
feature1type = 'Category'
nUnique = len(feature2data.unique().tolist())
if nUnique / numOfRows >= cratio:
feature2type = 'Continous'
else:
feature2type = 'Category'
charttype = 0
if feature1type == 'Continous' and feature2type == 'Continous':
df_temp[feature1] = pd.qcut(df_temp[feature1], q=8, duplicates='drop',precision=0)
df_temp[feature1] = df_temp[feature1].astype(str).str.strip('()[]')
feature1type = 'Category'
xaxis = feature1
yaxis = feature2
charttype = 1
if feature1type == 'Category' and feature2type == 'Continous':
xaxis = feature1
yaxis = feature2
charttype = 1
if feature1type == 'Continous' and feature2type == 'Category':
xaxis = feature1 #xaxis = feature2
yaxis = feature2 #yaxis = feature1
charttype = 1
if feature1type == 'Category' and feature2type == 'Category':
if (len(feature1data.unique().tolist()) < len(feature2data.unique().tolist())):
xaxis = feature1 #xaxis = feature2
yaxis = feature2 #yaxis = feature1
else:
xaxis = feature1
yaxis = feature2
if (len(df_temp[xaxis].unique().tolist()) > 5):
df_temp[xaxis] = pd.qcut(df_temp[xaxis], q=5, duplicates='drop',precision=0)
df_temp[xaxis] = df_temp[xaxis].astype(str).str.strip('()[]')
if (len(df_temp[yaxis].unique().tolist()) > 5):
df_temp[yaxis] = pd.qcut(df_temp[yaxis], q=3, duplicates='drop',precision=0)
df_temp[yaxis] = df_temp[yaxis].astype(str).str.strip('()[]')
charttype = 2
# if feature1type == 'Category' and feature2type == 'Category':
if charttype == 2:
uniqueclasses = df_temp[yaxis].unique().tolist()
cfig = go.Figure()
i = 1
for x in uniqueclasses:
df_temp3 = df_temp.loc[df_temp[yaxis] == x]
df_temp2 = df_temp3.groupby(xaxis, as_index=False)[yaxis].count()
if df_temp2[xaxis].dtypes == "object":
df_temp2 = df_temp2.set_index(xaxis).reindex(
natsorted(df_temp2[xaxis].tolist(), key=lambda y: y.lower())).reset_index()
xaxis_data = df_temp2[xaxis].tolist()
yaxis_data = df_temp2[yaxis].tolist()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name=x, marker_color=barcolor[i]))
i = i + 1
if i == 5:
break
cfig.update_layout(barmode='stack', xaxis_title=xaxis, yaxis_title=yaxis)
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=400)
featureCharts.append(bargraph)
if charttype == 1:
df_temp2 = df_temp.groupby(xaxis, as_index=False)[yaxis].mean()
if df_temp2[xaxis].dtypes == "object":
df_temp2 = df_temp2.set_index(xaxis).reindex(
natsorted(df_temp2[xaxis].tolist(), key=lambda y: y.lower())).reset_index()
xaxis_data = df_temp2[xaxis].tolist()
yaxis_data = df_temp2[yaxis].tolist()
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Primary Product', marker_color='blue'))
cfig.update_layout(xaxis_title=xaxis, yaxis_title=yaxis)
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=400)
featureCharts.append(bargraph)
colno += 1
if colno > 3:
colno = 1
rowno += 1
except Exception as e:
print(e)
return (featureCharts)
# EDA Visualization changes
# ----------------------------
def getFeatureCharts(df, graphType, d3_url,mpld3_url):
featureCharts = []
feature1 = df.columns[0]
feature2 = df.columns[1]
import seaborn as sns
import mpld3
fig, ax = plt.subplots(figsize=[10,5])
if graphType == 'marker':
df.plot(ax=ax, marker='o')
# df[['age','education-num']].plot(ax=ax, marker='o')
if graphType == 'area':
df.plot(ax=ax, kind ="area")
# df[['education-num','age']].plot(ax=ax, kind ="area") # UIprb
if graphType == 'hexbin':
df.plot.hexbin(ax=ax, x=feature1, y=feature2, gridsize=2)
if graphType == 'boxplot':
plt.boxplot(df)
if graphType == 'scatter':
ax.scatter(df[feature1], df[feature2])
if graphType == 'regplot':
ax = sns.regplot(x= feature1, y=feature2, data= df, fit_reg = False, scatter_kws={"alpha": 0.5})
if graphType == 'lineplot':
ax = sns.lineplot(x= feature1, y=feature2, data= df)
if graphType == 'barplot':
ax = sns.barplot(x= feature1, y=feature2, data= df)
# ax = sns.barplot(x= 'age', y='fnlwgt', data= df) #Start_prb
ax.legend()
ax.set_xlabel(feature1)
ax.set_ylabel(feature2)
#print(d3_url)
#print(mpld3_url)
html_graph = mpld3.fig_to_html(fig,d3_url=d3_url,mpld3_url=mpld3_url)
if graphType == 'kde':
ax = sns.pairplot(df, kind="kde", height=4, x_vars=feature1,y_vars = feature2)
# ax = sns.pairplot(df[['age','fnlwgt']], kind="kde")
html_graph = mpld3.fig_to_html(ax.fig)
if graphType == 'relplot':
sns.set(style ="darkgrid")
ax = sns.relplot(x =feature1, y =feature2, data = df)
html_graph = mpld3.fig_to_html(ax.fig)
featureCharts.append(html_graph)
return (featureCharts)
# ----------------------------
def MostCommonWords(stopwords, inputCorpus, num_of_words=10):
try:
from collections import Counter
new = inputCorpus.str.split()
new = new.values.tolist()
corpus = [word for i in new for word in i if word not in stopwords]
counter = Counter(corpus)
most = counter.most_common()
x, y = [], []
for word, count in most[: num_of_words + 1]:
x.append(word)
y.append(count)
return pd.DataFrame([x, y], index=['most_common_words', 'freq']).T
except:
print("exception", sys.exc_info())
return False
def removeFeature(df):
featuresList = df.columns.values.tolist()
modelFeatures = featuresList.copy()
datetimeFeatures = []
sequenceFeatures = []
unimportantFeatures = []
featuresRatio = {}
for i in featuresList:
check = match_date_format(df[i])
if check == True:
modelFeatures.remove(i)
continue
seq_check = check_seq_feature(df[i])
if seq_check == True:
modelFeatures.remove(i)
continue
ratio = check_category(df[i])
if ratio != 0:
featuresRatio[i] = ratio
else:
modelFeatures.remove(i)
return featuresList, modelFeatures
def check_category(data):
total_record = len(data)
nUnique = len(data.unique().tolist())
if nUnique == 1:
return 0
ratio = nUnique / total_record
return (ratio)
def check_seq_feature(data):
if data.dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
total_record = data.count()
count = (data - data.shift() == 1).sum()
if ((total_record - count) == 1):
return True
return False
def match_date_format(data):
data = data.astype(str)
beforecheckcount = (data.count()*80)/100
#####YYYY-MM-DD HH:MM:SS####
check1 = data[data.str.match(
r'(^\\d\\d\\d\\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check1.count()
if (beforecheckcount <= aftercheckcount):
return True
#####MM/DD/YYYY HH:MM####
check2 = data[data.str.match(
r'(^(0?[1-9]|1 |
[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount <= aftercheckcount):
return True
#####DD-MM-YYYY HH:MM####
check2 = data[data.str.match(
r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9]|1[0-2])-(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount <= aftercheckcount):
return True
#####YYYY/MM/DD####
check2 = data[data.str.match(r'(^\\d\\d\\d\\d/(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount <= aftercheckcount):
return True
#####MM/DD/YYYY####
check2 = data[data.str.match(r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d)$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount <= aftercheckcount):
return True
return False
def check_text_features(df, modelFeatures):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
textFeature = []
for i in enumerate(modelFeatures):
dataType = df[i[1]].dtypes
numOfRows = df.shape[0]
if dataType not in aionNumericDtypes:
if dataType != 'bool':
nUnique = len(df[i[1]].unique().tolist())
textnumbericratio = 0.01
if (numOfRows < 1000):
textnumbericratio = 0.2
elif (numOfRows < 10000):
textnumbericratio = 0.1
elif (numOfRows < 100000):
textnumbericratio = 0.01
if nUnique / numOfRows >= textnumbericratio:
textFeature.append(i[1])
return (textFeature)
def getWordCloud(df, text_columns):
df_text = pd.DataFrame()
stopwords = set(STOPWORDS)
if (len(text_columns) > 1):
df_text['combined'] = df[text_columns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
features = ['combined']
else:
df_text[['combined']] = df[text_columns]
features = ['combined']
df_text[features[0]] = df_text[features[0]].fillna("NA")
textCorpus = df_text[features[0]]
from text import TextProcessing
tp = TextProcessing.TextProcessing()
preprocessed_text = tp.transform(textCorpus)
df_text['combined'] = preprocessed_text
df_text_list = df_text.values.tolist()
comment_words = ""
for val in df_text_list:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens) + " "
wordcloud = WordCloud(stopwords=stopwords).generate(comment_words)
try:
plt.clf()
except:
pass
try:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.tight_layout(pad=0)
image = io.BytesIO()
plt.savefig(image, format='png')
image.seek(0)
string = base64.b64encode(image.read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
except Exception as inst:
print(inst)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
image_64=''
return (image_64, df_text)
def getTopTextFeatures(df_text):
stopwords = set(STOPWORDS)
commonfeatures = MostCommonWords(stopwords, df_text['combined'])
xaxis_data = commonfeatures['most_common_words'].tolist()
yaxis_data = commonfeatures['freq'].tolist()
import plotly.graph_objects as go
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance'))
cfig.update_layout(barmode='stack', xaxis_title='Features')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000)
return (bargraph)
def getPCATop10Features(df, modelFeatures):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
categorial_features = []
for i in enumerate(modelFeatures):
dataType = df[i[1]].dtypes
if dataType not in aionNumericDtypes:
categorial_features.append(i[1])
df[i[1]] = pd.Categorical(df[i[1]])
df[i[1]] = df[i[1]].cat.codes
df[i[1]] = df[i[1]].astype(int)
df[i[1]] = df[i[1]].fillna(df[i[1]].mode()[0])
else:
df[i[1]] = df[i[1]].fillna(df[i[1]].mean())
from sklearn.decomposition import PCA
pca = PCA(n_components=2).fit(df)
map = pd.DataFrame(pca.components_, columns=modelFeatures)
map = map.diff(axis=0).abs()
map = map.iloc[1]
map = map.sort_values(ascending=False).head(10)
yaxis_data = map.tolist()
xaxis_data = map.index.values.tolist()
import plotly.graph_objects as go
cfig = go.Figure()
cfig.add_trace(go.Bar(x=xaxis_data, y=yaxis_data, name='Feature Importance'))
cfig.update_layout(barmode='stack', xaxis_title='Features')
bargraph = cfig.to_html(full_html=False, default_height=450, default_width=1000)
return (bargraph)
def getCorrelationMatrix(df):
try:
#from dython.nominal import associations
if len(df.columns) > 25:
df3 = df[df.columns[0:24]]
else:
df3 = df.copy()
cor_mat= associations(df3,compute_only=True)
cor_mat=cor_mat['corr']
#cor_mat = df3.corr()
cor_mat = cor_mat.astype(float).round(2)
#print(cor_mat)
z = cor_mat.values.tolist()
fig = ff.create_annotated_heatmap(z, x=cor_mat.columns.tolist(), y=cor_mat.index.tolist(), annotation_text=z,
colorscale='Blues')
fig.layout.yaxis.automargin = True
correlationgraph = fig.to_html(full_html=True, default_height=450, default_width=1000)
except Exception as e:
print(e)
correlationgraph = ''
return (correlationgraph)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import kfp
import kfp.dsl as dsl
import json
from pathlib import Path
class aionpipelinets():
containerRegistry = str()
containerLabel = str()
containerSecret = str()
pipelineName = 'AION MLOps Pipeline {0}'
exeCmd = 'python'
codeFile = 'aionCode.py'
mntPoint = '/aion'
inputArg = '-i'
msIP = '0.0.0.0'
port = '8094'
cachingStrategy = 'P0D'
deafultVolume = '2Gi'
volName = 'aion-pvc'
volMode = 'ReadWriteMany'
fileExt = '.tar.gz'
fileName = 'aion_mlops_pipeline_{0}'
containerMM = 'modelmonitoring'
containerDI = 'dataingestion'
containerDT = 'datatransformation'
containerFE = 'featureengineering'
containerMR = 'modelregistry'
containerMS = 'modelserving'
containerImage = '{0}/{1}:{2}'
models = {}
nameSeprator = '-'
modelsLiteral = 'models'
modelNameLiteral = 'modelname'
msTemplate = '{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "{{workflow.name}}-{0}"}, "spec": {"containers": [{"name": "{0}", "image": "{1}", "command": ["python"], "args": ["aionCode.py", "-ip", "{2}", "-pn", "{3}"],"volumeMounts": [{"name": "aion-pvc", "mountPath": "{4}"}], "ports": [{"name": "http", "containerPort": {3}, "protocol": "TCP"}]}], "imagePullSecrets": [{"name": "{5}"}], "volumes": [{"name": "aion-pvc", "persistentVolumeClaim": {"claimName": "{{workflow.name}}-{6}"}}]}}'
def __init__(self, models, containerRegistry, containerLabel, containerSecret=str()):
self.models = models
self.containerRegistry = containerRegistry
self.containerLabel = containerLabel
self.containerSecret = containerSecret
@dsl.pipeline(
name=pipelineName.format(containerLabel),
description=pipelineName.format(containerLabel),
)
def aion_mlops(self, inputUri=str(), volSize=deafultVolume):
vop = dsl.VolumeOp(
name=self.volName + self.nameSeprator + self.containerLabel,
resource_name=self.volName,
modes=[self.volMode],
size=volSize
)
mm = dsl.ContainerOp(
name=self.containerMM,
image=self.containerImage.format(self.containerRegistry,self.containerMM,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
self.inputArg,
inputUri,
],
pvolumes={self.mntPoint: vop.volume}
)
mm.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
di = dsl.ContainerOp(
name=self.containerDI,
image=self.containerImage.format(self.containerRegistry,self.containerDI,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: mm.pvolume}
)
di.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
dt = dsl.ContainerOp(
name=self.containerDT,
image=self.containerImage.format(self.containerRegistry,self.containerDT,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: di.pvolume}
)
dt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
fe = dsl.ContainerOp(
name=self.containerFE,
image=self.containerImage.format(self.containerRegistry,self.containerFE,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: dt.pvolume}
)
fe.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
dictMT = {}
listMTOps = []
for model in self.models[self.modelsLiteral]:
modelName = model[self.modelNameLiteral]
mt=dsl.ContainerOp(
name=modelName,
image=self.containerImage.format(self.containerRegistry,modelName,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes={self.mntPoint: fe.pvolume})
mt.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
listMTOps.append(mt)
dictMT[self.mntPoint]=mt.pvolume
mr = dsl.ContainerOp(
name=self.containerMR,
image=self.containerImage.format(self.containerRegistry,self.containerMR,self.containerLabel),
command=self.exeCmd,
arguments=[
self.codeFile,
],
pvolumes=dictMT
).after(*tuple(listMTOps))
mr.execution_options.caching_strategy.max_cache_staleness = self.cachingStrategy
msJson = self.msTemplate.replace(str({0}),self.containerMS).replace(str({1}),self.containerImage.format(self.containerRegistry,self.containerMS,self.containerLabel)).replace(str({2}),self.msIP).replace(str({3}),self.port).replace(str({4}),self.mntPoint).replace(str({5}),self.containerSecret).replace(str({6}),self.volName)
ms = dsl.ResourceOp(
name=self.containerMS + self.nameSeprator + self.containerLabel,
k8s_resource=json.loads(msJson),
)
ms.after(mr)
|
def compilepl(self, targetPath=str()):
filePath = self.fileName.format(self.containerLabel.lower()) + self.fileExt
if targetPath != str():
filePath = Path(targetPath, filePath)
kfp.compiler.Compiler().compile(self.aion_mlops, str(filePath))
def executepl(self, kfhost=str()):
client = kfp.Client(kfhost)
client.create_run_from_pipeline_func(self.aion_mlops,arguments={})
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
from pathlib import Path
def label_filename(request):
filename = 'LabeledData.csv'
labelPath = os.path.join(request.session['datalocation'],'AION','Labels')
Path(labelPath).mkdir(parents=True, exist_ok=True)
filePath = os.path.join(labelPath,filename)
return filePath
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
import os
import rsa
import boto3 #usnish
import pandas as pd
import time
def add_new_azureStorage(request):
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
f.close()
if data == '':
data = []
except:
data = []
if request.POST["azurename"] =='' or request.POST["azureaccountkey"] == '' or request.POST["containername"] == '' :
return 'error'
newdata = {}
newdata['azurename'] = request.POST["azurename"]
newdata['azureaccountkey'] = request.POST["azureaccountkey"]
newdata['containername'] = request.POST["containername"]
data.append(newdata)
with open(file_path, 'w') as f:
json.dump(data, f)
f.close()
return 'success'
def get_azureStorage():
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
return data
def read_azureStorage(name,directoryname,DATA_FILE_PATH):
try:
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','config','azurestorage.conf'))
with open(file_path, 'r') as f:
data = json.load(f)
except:
data = []
found = False
for x in data:
if x['azurename'] == name:
storage_account_name = str(x['azurename'])
storage_account_key = str(x['azureaccountkey'])
azure_container_name = x['containername']
found = True
break
try:
if found:
root_dir = str(directoryname)
from azure.storage.filedatalake import DataLakeServiceClient
import io
import pandavro as pdx
from detect_delimiter import detect
try:
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", storage_account_name), credential=storage_account_key)
print(azure_container_name)
file_system_client = service_client.get_file_system_client(azure_container_name)
print(root_dir)
file_paths = file_system_client.get_paths(path=root_dir)
main_df = pd.DataFrame()
for path in file_paths:
if not path.is_directory:
file_client = file_system_client.get_file_client(path.name)
file_ext = os.path.basename(path.name).split('.', 1)[1]
if file_ext in ["csv", "tsv"]:
with open(csv_local, "wb") as my_file:
download = file_client.download_file()
download.readinto(my_file)
with open(csv_local, 'r') as file:
data = file.read()
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t'])
processed_df = pd.read_csv(csv_local, sep=row_delimiter)
if file_ext == "parquet":
download = file_client.download_file()
stream = io.BytesIO()
download.readinto(stream)
processed_df = pd.read_parquet(stream, engine='pyarrow')
if file_ext == "avro":
with open(avro_local, "wb") as my_file:
download = file_client.download_file()
download.readinto(my_file)
processed_df = pdx.read_avro(avro_local)
if not main_df.empty:
main_df = main_df.append(processed_df, ignore_index=True)
else:
main_df = pd.DataFrame(processed_df)
except Exception as e:
print(e)
return 'Success',main_df
except Exception as e:
print(e)
return 'Error', pd.DataFrame()<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import time
import subprocess
import sys
import json
import pandas as pd
def getDataSetRecordsCount(datalocation):
try:
records = 0
if os.path.isfile(datalocation):
for chunk in pd.read_csv(datalocation, chunksize=20000):
records = records+len(chunk)
if records == 0:
records = 'NA'
except Exception as e:
print(e)
records = 'NA'
return records
def get_train_model_details(deploy_location,request):
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r")
configSettings = f.read()
f.close()
usename = request.session['usecaseid'].replace(" ", "_")
outputfile = os.path.join(deploy_location,usename,str(request.session['ModelVersion']),'etc','output.json')
if os.path.isfile(outputfile):
f1 = open(outputfile, "r+", encoding="utf-8")
outputStr = f1.read()
f1.close()
resultJsonObj = json.loads(outputStr)
trainingStatus = resultJsonObj['status']
if trainingStatus.lower() == 'success':
details = resultJsonObj['data']
modelType = details['ModelType']
bestModel = details['BestModel']
return trainingStatus,modelType,bestModel
else:
return trainingStatus,'NA','NA'
else:
return 'Not Trained','NA','NA'<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from appbe import exploratory_Analysis as ea
import pandas as pd
from appbe.checkConfiguration import start_check
import json
import os
import ast
import time
import numpy as np
from appfe.modelTraining.models import usecasedetails
from appfe.modelTraining.models import Existusecases
# from modelTraining.models import view
from appbe.aion_config import kafka_setting
from appbe.aion_config import running_setting
from appbe.s3buckets import get_s3_bucket
from appbe.gcsbuckets import get_gcs_bucket
from appbe import help_Text as ht
def is_value_na( value):
if isinstance( value, str):
return value.strip().lower() in ['','na','none']
return not value
def set_ts_preprocessing(request,configSettingsJson): #Task 13052 Timeseries Preprocessing
interpolationType = request.POST.get('interpolationType')
ts_config = configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']
for key in ts_config['interpolation']:
configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['interpolation'][
key] = 'False'
if interpolationType != 'na':
configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['interpolation'][
interpolationType] = 'True'
ts_config['rollingWindow'] = request.POST.get('rollingWindow')
if ts_config['rollingWindow'] == 'True':
ts_config['rollingWindowSize'] = request.POST.get('rollWindowsize')
aggregation = request.POST.get('aaggregationType')
for key in ts_config['aggregation']['type']:
ts_config['aggregation']['type'][key]='False'
if is_value_na(aggregation) == False:
ts_config['aggregation']['type'][aggregation] = 'True'
granularityType = request.POST.get('unitType')
granularitySize = request.POST.get('garnularitysize')
for key in ts_config['aggregation']['granularity']['unit']:
ts_config['aggregation']['granularity']['unit'][key] = 'False'
ts_config['aggregation']['granularity']['unit'][granularityType]='True'
ts_config['aggregation']['granularity']['size'] = granularitySize
configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']= ts_config
return configSettingsJson
def update_granularity(configSettingsJson,datapath=None):
try:
from AION.appbe.utils import set_true_option
import pandas as pd
from pathlib import Path
MINUTES = 60
if not is_value_na(configSettingsJson['basic']['dateTimeFeature']):
if not datapath:
datapath = configSettingsJson['basic']['dataLocation']
if Path( datapath).exists():
df = pd.read_csv(datapath, nrows=2)
if isinstance( configSettingsJson['basic']['dateTimeFeature'], list):
datetime_feature = configSettingsJson['basic']['dateTimeFeature'][0]
else:
datetime_feature = configSettingsJson['basic']['dateTimeFeature']
datetime = pd.to_datetime(df[ datetime_feature])
if len(datetime) > 1:
time_delta = (datetime[1] - datetime[0]).total_seconds()
granularity_unit = configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['unit']
if time_delta < (1 * MINUTES):
set_true_option(granularity_unit, key='second')
elif time_delta < (60 * MINUTES):
set_true_option(granularity_unit, key='minute')
elif time_delta < (24 * 60 * MINUTES):
set_true_option(granularity_unit, key='hour')
elif time_delta < (7 * 24 * 60 * MINUTES):
set_true_option(granularity_unit, key='day')
elif time_delta < (30 * 24 * 60 * MINUTES):
set_true_option(granularity_unit, key='week')
elif time_delta < (365 * 24 * 60 * MINUTES):
set_true_option(granularity_unit, key='month')
else:
set_true_option(granularity_unit, key='year')
return configSettingsJson
except Exception as e:
print(f'\\nIgnoring error during granularity unit conversion\\n:{str(e)}')
return configSettingsJson
def save(request):
try:
status = 'pass'
msg = ""
DEPLOY_LOCATION = request.session['deploylocation']
if request.method == 'POST':
submittype = request.POST.get('BasicSubmit')
if submittype != 'BasicDefault':
filterjson = 'NA'
timegroupingjson = 'NA'
groupingjson = 'NA'
if request.POST.get('filters') != '':
filterjson = str(json.loads(request.POST.get('filters')))
if request.POST.get('timegroup') != '':
timegroupingjson = str(json.loads(request.POST.get('timegroup')))
if request.POST.get('idgroup') != '':
groupingjson = str(json.loads(request.POST.get('idgroup')))
configFile = request.session['config_json']
f = open(configFile, |
"r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
temp = {}
# Retraing settings changes
# -------- S T A R T --------
prbType = request.POST.get('ProblemType')
if prbType is None:
prbType = request.POST.get('tempProblemType')
# temp['ProblemType'] = request.POST.get('ProblemType')
# request.session['Problem'] = request.POST.get('ProblemType')
temp['ProblemType'] = prbType
request.session['Problem'] = request.POST.get('ProblemType')
# ---------------------------
temp['ModelName'] = request.session['usecaseid']
temp['Version'] = str(request.session['ModelVersion'])
temp['InputFeatures'] = request.POST.getlist('IncInputFeatures')
temp['dataLocation'] = str(request.session['datalocation'])
onlinelearning=request.POST.get('onlineLearning',None)
if (onlinelearning is not None):
if onlinelearning.lower() == 'onlinelearning':
configSettingsJson['basic']['onlineLearning'] = 'True'
if onlinelearning.lower() == 'distributedlearning':
configSettingsJson['basic']['distributedLearning'] = 'True'
temp['InputFeatures'] = request.POST.getlist('IncInputFeatures')
temp['TargetFeatures'] = request.POST.getlist('TargetFeatures')
temp['DateTimeFeatures'] = ''
temp['IndexFeatures'] = ''
for x in configSettingsJson['advance']['profiler']['normalization'].keys():
configSettingsJson['advance']['profiler']['normalization'][x] = 'False'
configSettingsJson['advance']['profiler']['normalization']['standardScaler'] = 'True'
for x in configSettingsJson['advance']['profiler']['numericalFillMethod'].keys():
configSettingsJson['advance']['profiler']['numericalFillMethod'][x] = 'False'
configSettingsJson['advance']['profiler']['numericalFillMethod']['Mean'] = 'True'
if onlinelearning.lower() == 'distributedlearning':
for x in configSettingsJson['advance']['profiler']['categoricalFillMethod'].keys():
configSettingsJson['advance']['profiler']['categoricalFillMethod'][x] = 'False'
configSettingsJson['advance']['profiler']['categoricalFillMethod']['MostFrequent'] = 'True'
for x in configSettingsJson['advance']['profiler']['categoryEncoding'].keys():
configSettingsJson['advance']['profiler']['categoryEncoding'][x] = 'False'
configSettingsJson['advance']['profiler']['categoryEncoding']['OneHotEncoding'] = 'True'
configSettingsJson['advance']['profiler']['normalization']['standardScaler'] = 'False'
for x in configSettingsJson['advance']['selector']['featureEngineering'].keys():
if x != 'numberofComponents':
configSettingsJson['advance']['selector']['featureEngineering'][x] = 'False'
elif prbType == 'llmFineTuning':
if configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'False':
temp['InputFeatures'] = request.POST.getlist('IncInputFeatures')
temp['TargetFeatures'] = request.POST.getlist('TargetFeatures')
contextFeatures = request.POST.getlist('contextFeatures')
configSettingsJson['basic']['contextFeature'] = ",".join([model for model in contextFeatures])
temp['DateTimeFeatures'] = ''
temp['IndexFeatures'] = ''
if request.POST.get('promptfriendlyname') != '':
configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt'] = request.POST.get('promptfriendlyname')
else:
configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt'] = 'Instruction'
if request.POST.get('responsefriendlyname') != '':
configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response'] = request.POST.get('responsefriendlyname')
else:
configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response'] = ''
else:
if request.session['datatype'] == 'LLM_Document':
for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'].keys():
configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'][x] = 'False'
configSettingsJson['basic']['preprocessing']['llmFineTuning']['document'][request.POST.get('dataPreprocessing')] = 'True'
if request.session['datatype'] == 'LLM_Code':
for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'].keys():
configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'][x] = 'False'
configSettingsJson['basic']['preprocessing']['llmFineTuning']['objective'][request.POST.get('llmObjective')] = 'True'
for x in configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'].keys():
configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'][x] = 'False'
configSettingsJson['basic']['preprocessing']['llmFineTuning']['code'][request.POST.get('dataPreprocessing')] = 'True'
else:
configSettingsJson['basic']['onlineLearning'] = 'False'
configSettingsJson['basic']['distributedLearning'] = 'False'
temp['InputFeatures'] = request.POST.getlist('InputFeatures')
temp['TargetFeatures'] = request.POST.getlist('TargetFeatures')
temp['DateTimeFeatures'] = request.POST.getlist('DateTimeFeatures')
temp['IndexFeatures'] = request.POST.getlist('IndexFeatures')
if (configSettingsJson['basic']['algorithms']['timeSeriesAnomalyDetection']['AutoEncoder'] == 'True'):#task 11997
if (request.POST.get('analysis') == 'MultiVariate'):
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'True' #task 11997
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'False' #task 11997
else:
#print(configSettingsJson)
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'True'
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'False' #task 11997
temp['UserID'] = ''
temp['ItemID'] = ''
temp['rating'] = ''
temp['secondDocFeature'] = ''
temp['firstDocFeature'] = ''
temp['invoiceNoFeature'] = ''
temp['itemFeature'] = ''
model = ''
if temp['ProblemType'].lower() == 'recommendersystem':
model = request.POST.get('MachineLearningModels')
if model == 'ItemRating':
temp['ProblemType'] = 'RecommenderSystem'
temp['MachineLearningModels'] = ['ItemRating']
temp['DeepLearningModels'] = ''
temp['UserID'] = request.POST.get('UserID')
temp['ItemID'] = request.POST.get('ItemID')
temp['rating'] = request.POST.get('rating')
temp['InputFeatures'] = []
temp['InputFeatures'].append(temp['UserID'])
temp['InputFeatures'].append(temp['ItemID'])
temp['InputFeatures'].append(temp['rating'])
if model == 'TextSimilarity-Siamese':
temp['ProblemType'] = 'recommenderSystem'
temp['MachineLearningModels'] = ['TextSimilarity-Siamese']
temp['secondDocFeature'] = request.POST.get('secondDocFeature')
temp['firstDocFeature'] = request.POST.get('firstDocFeature')
temp['InputFeatures'] = []
temp['InputFeatures'].append(temp['secondDocFeature'])
temp['InputFeatures'].append(temp['firstDocFeature'])
if model == 'AssociationRules-Apriori':
temp['ProblemType'] = 'recommenderSystem'
temp['DeepLearningModels'] = ''
temp['MachineLearningModels'] = ['AssociationRules-Apriori']
temp['invoiceNoFeature'] = request.POST.get('associationRuleInvoiceNo')
temp['itemFeature'] = request.POST.get('associationRuleItem')
temp['InputFeatures'] = []
temp['InputFeatures'].append(temp['invoiceNoFeature'])
temp['InputFeatures'].append(temp['itemFeature'])
temp['ScoringCriteria'] = request.POST.get('ScoringCriteria')
if temp['ProblemType'].lower() not in ['recommendersystem','textsimilarity','associationrules','llmfinetuning']:
temp['MachineLearningModels'] = request.POST.getlist('MachineLearningModels')
temp['DeepLearningModels'] = request.POST.getlist('SelectDeepLearningModels')
elif temp['ProblemType'].lower() == 'llmfinetuning':
temp['MachineLearningModels'] = request.POST.getlist('MachineLearningModels')
model = temp['MachineLearningModels'][0]
supportedModelsSize = configSettingsJson['basic']['modelSize'][temp['ProblemType']][model]
selectedModelSize = request.POST.get('modelSize')
for x in supportedModelsSize.keys():
configSettingsJson['basic']['modelSize'][temp['ProblemType']][model][x] = 'False'
configSettingsJson['basic']['modelSize'][temp['ProblemType']][model][selectedModelSize] = 'True'
temp['noofforecasts'] = request.POST.get('noofforecasts')
temp['inlierLabels'] = request.POST.get('inlierLabels')
#temp['filterExpression'] = request.POST.get('filterExpression')
if temp['ProblemType'].lower() in ['clustering','topicmodelling','similarityidentification','contextualsearch']:
temp['TargetFeatures'] = ''
configSettingsJson['basic']['modelName'] = temp['ModelName']
configSettingsJson['basic']['modelVersion'] = temp['Version']
configSettingsJson['basic']['dataLocation'] = str(temp['dataLocation'])
configSettingsJson['basic']['deployLocation'] = DEPLOY_LOCATION
if configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] == 'False':
configSettingsJson['basic']['trainingFeatures'] = ",".join([model for model in temp['InputFeatures']])
configSettingsJson['basic']['dateTimeFeature'] = ",".join([model for model in temp['DateTimeFeatures']])
configSettingsJson['basic']['targetFeature'] = ",".join([model for model in temp['TargetFeatures']])
configSettingsJson['basic']['indexFeature'] = ",".join([model for model in temp['IndexFeatures']])
if filterjson == 'NA':
configSettingsJson['basic']['filter'] = 'NA'
else:
configSettingsJson['basic']['filter'] = eval(filterjson)
if timegroupingjson == 'NA':
configSettingsJson['basic']['timegrouper'] = 'NA'
else:
configSettingsJson['basic']['timegrouper'] = eval(timegroupingjson)
if groupingjson == 'NA':
configSettingsJson['basic']['group'] = 'NA'
else:
configSettingsJson['basic']['group'] = eval(groupingjson)
problemtyp = configSettingsJson['basic']['analysisType']
for i in list(problemtyp.keys()):
configSettingsJson['basic']['analysisType'][i]='False'
algorithm = configSettingsJson['basic']['algorithms']
for i in list(algorithm.keys()):
for x in list(configSettingsJson['basic']['algorithms'][i].keys()):
if x not in ['textSimilarityConfig','itemRatingConfig','associationRulesConfig','textSummarization']:
configSettingsJson['basic']['algorithms'][i][x] = 'False'
configSettingsJson['basic']['analysisType'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]] = 'True'
# configSettingsJson['basic']['problem_type'] = temp['ProblemType']
scoring = configSettingsJson['basic']['scoringCriteria']
for i in list(scoring.keys()):
for x in list(configSettingsJson['basic']['scoringCriteria'][i].keys()):
configSettingsJson['basic']['scoringCriteria'][i][x] = 'False'
if temp['ProblemType'].lower() in ["classification","regression","survivalanalysis","similarityidentification","timeseriesforecasting","contextualsearch"]: #task 11997
configSettingsJson['basic']['scoringCriteria'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][temp['ScoringCriteria']] = 'True'
# configSettingsJson['basic']['problem_type'] = temp['ProblemType']
# configSettingsJson['basic']['scoringCriteria'] = temp['ScoringCriteria']
configSettingsJson['basic']['noofforecasts'] = temp['noofforecasts']
configSettingsJson['basic']['inlierLabels'] = temp['inlierLabels']
#configSettingsJson['basic']['filterExpression'] = temp['filterExpression']
configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['userID'] = temp['UserID']
configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['itemID'] = temp['ItemID']
configSettingsJson['basic']['algorithms']['recommenderSystem']['itemRatingConfig']['rating'] = temp['rating']
configSettingsJson['basic']['algorithms']['recommenderSystem']['textSimilarityConfig']['baseFeature'] = temp['firstDocFeature']
configSettingsJson['basic']['algorithms']['recommenderSystem']['textSimilarityConfig']['comparisonFeature'] = temp['secondDocFeature']
configSettingsJson['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['invoiceNoFeature'] = temp['invoiceNoFeature']
configSettingsJson['basic']['algorithms']['recommenderSystem']['associationRulesConfig']['itemFeature'] = temp['itemFeature']
for x in temp['MachineLearningModels']:
if temp['ProblemType'].lower() =='associationrules' or temp['ProblemType'].lower() == 'textsimilarity':
temp['ProblemType'] = 'recommenderSystem'
if request.POST.get('SearchType') != 'NAS' and request.POST.get('SearchType') != 'GoogleModelSearch'and request.POST.get('SearchType') != 'AutoGluon':
configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][x] = 'True'
#for y in temp['DeepLearningModels']:
# configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]][y] = 'True'
|
configSettingsJson['basic']['output']['profilerStage'] = 'True'
configSettingsJson['basic']['output']['selectorStage'] = 'True'
for key in configSettingsJson['advance']['profiler']['textConversionMethod']:
configSettingsJson['advance']['profiler']['textConversionMethod'][key] = 'False'
if temp['ProblemType'].lower() != 'topicmodelling':
configSettingsJson['advance']['profiler']['textConversionMethod']['TF_IDF'] ='True'
else:
configSettingsJson['advance']['profiler']['textConversionMethod']['CountVectors'] ='True'
#print('============================')
#print(temp['ProblemType'].lower())
#print('============================')
if temp['ProblemType'].lower() == 'textsummarization':
configSettingsJson['basic']['algorithms']['textSummarization']['Text Summarization'] = 'True'
configSettingsJson['basic']['textSummarization']['KeyWords'] = str(request.POST.get('addKeywordsForSummarization'))
configSettingsJson['basic']['textSummarization']['pathForKeywordFile'] = str(request.POST.get('DataFilePath'))
if temp['ProblemType'].lower() not in ['recommendersystem','textsummarization','llmfinetuning']:
if configSettingsJson['basic']['onlineLearning'] != 'True' and configSettingsJson['basic']['distributedLearning'] != 'True':
jsonarr =request.POST.get('jsonarr')
res = ast.literal_eval(jsonarr)
for x in res:
if x['type'].lower() == 'text':
configSettingsJson['advance']['selector']['featureSelection']['allFeatures'] = 'False'
configSettingsJson['advance']['selector']['featureSelection']['statisticalBased'] = 'True'
configSettingsJson['advance']['selector']['featureSelection']['modelBased'] = 'False'
if len(request.POST.get('traindfeatures').split(',')) > 30:
configSettingsJson['advance']['selector']['featureSelection']['allFeatures'] = 'False'
configSettingsJson['advance']['selector']['featureSelection']['statisticalBased'] = 'True'
configSettingsJson['advance']['selector']['featureSelection']['modelBased'] = 'False'
configSettingsJson['advance']['profiler']['featureDict'] = res
configSettingsJson['basic']['indexFeature'] = request.POST.get('indexfeatures')
configSettingsJson['basic']['trainingFeatures'] = request.POST.get('traindfeatures')
configSettingsJson['basic']['dateTimeFeature'] = request.POST.get('datefeatures')
if request.POST.get('SearchType') == 'GoogleModelSearch':
configSettingsJson['basic']['algorithms'][temp['ProblemType'][0].lower() + temp['ProblemType'][1:]]['GoogleModelSearch_DNN'] = 'True'
configSettingsJson['basic']['output']['profilerStage']= 'True'
#---------- Time series Changes Task 13052 -----------------
if temp['ProblemType'].lower() == 'timeseriesforecasting':
configSettingsJson = set_ts_preprocessing(request,configSettingsJson)
status,msg= start_check(configSettingsJson)
updatedConfigSettings = json.dumps(configSettingsJson)
updatedConfigFile = request.session['config_json']
with open(updatedConfigFile, "w") as fpWrite:
fpWrite.write(updatedConfigSettings)
fpWrite.close()
request.session['ModelStatus'] = 'Not Trained'
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
request.session['currentstate'] = 1
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'ProblemType',prbType)
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'Operation','Configured')
context = {'tab': 'configure', 'temp': temp,'advconfig': configSettingsJson,
'basic_status_msg': 'Configuration Done',
'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion,
'currentstate': request.session['currentstate'], 'selected': 'modeltraning','training':True,'basic_help':ht.basic_help}
# return render(request, 'basicconfig.html', context)
if submittype == 'BasicDefault':
temp = {}
temp['ModelName'] = request.session['UseCaseName']
temp['Version'] = request.session['ModelVersion']
dataLocation = str(request.session['datalocation'])
df = pd.read_csv(dataLocation, encoding='latin1')
featuresList = df.columns.values.tolist()
datetimeFeatures = []
sequenceFeatures = []
unimportantFeatures = []
featuresRatio = {}
for i in featuresList:
check = ea.match_date_format(df[i])
if check == True:
datetimeFeatures.append(i)
unimportantFeatures.append(i)
seq_check = ea.check_seq_feature(df[i])
if seq_check == True:
sequenceFeatures.append(i)
unimportantFeatures.append(i)
ratio = ea.check_category(df[i])
if ratio != 0:
featuresRatio[i] = ratio
else:
unimportantFeatures.append(i)
targetFeature = min(featuresRatio, key=featuresRatio.get)
unimportantFeatures.append(targetFeature)
config = {}
config['modelName'] = request.session['UseCaseName']
config['modelVersion'] = request.session['ModelVersion']
config['datetimeFeatures'] = datetimeFeatures
config['sequenceFeatures'] = sequenceFeatures
config['FeaturesList'] = featuresList
config['unimportantFeatures'] = unimportantFeatures
config['targetFeature'] = targetFeature
request.session['currentstate'] = 1
context = {'tab': 'configure', 'temp': temp, 'config': config,
'currentstate': request.session['currentstate'], 'selected': 'modeltraning'}
except Exception as e:
print(e)
import sys
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return status,msg,context
def openbasicconf(request):
# 10012:Decision Threshold related Changes
data_is_under_RAM_threshold = True
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
temp = {}
# temp['ModelName'] = request.session['UseCaseName']
# temp['Version'] = request.session['ModelVersion']
if request.session['datatype'] == 'Video' or request.session['datatype'] == 'Image' or request.session['datatype'] == 'Document':
folderLocation = str(request.session['datalocation'])
dataFile = os.path.join(folderLocation, request.session['csvfullpath'])
else:
dataFile = str(request.session['datalocation'])
# -------------------------------- 10012:Decision Threshold related Changes S T A R T -------------------------------
from appbe.dataIngestion import checkRAMThreshold
data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation'])
# ------------------------------------------------------ E N D ------------------------------------------------------
# Retraing settings changes
# -------- S T A R T --------
IsReTrainingCase = False
if request.session['IsRetraining'] == 'Yes':
IsReTrainingCase = True
IsSameFeatures = True
# ---------------------------
featuresList = configSettingsJson['basic']['featureList']
unimportantFeatures = []
modelfeatures = configSettingsJson['basic']['trainingFeatures']
for x in featuresList:
if x not in modelfeatures:
unimportantFeatures.append(x)
config = {}
config['ModelName'] = request.session['usecaseid']
config['Version'] = request.session['ModelVersion']
config['datetimeFeatures'] = configSettingsJson['basic']['dateTimeFeature'] # .split(",")
if configSettingsJson['basic']['indexFeature']:
config['sequenceFeatures'] = configSettingsJson['basic']['indexFeature'] # .split(",")
config['FeaturesList'] = featuresList
config['unimportantFeatures'] = unimportantFeatures
config['targetFeature'] = configSettingsJson['basic']['targetFeature'].split(",")
problemtypes = configSettingsJson['basic']['analysisType']
onlineLearning = configSettingsJson['basic']['onlineLearning']
problem_type = ""
for k in problemtypes.keys():
if configSettingsJson['basic']['analysisType'][k] == 'True':
problem_type = k
break
#print('123',problem_type)
config['ProblemType'] = problem_type
# config['ProblemType'] = configSettingsJson['basic']['problem_type']
scoring = configSettingsJson['basic']['scoringCriteria']
scoringCriteria = ""
for k in scoring.keys():
if configSettingsJson['basic']['scoringCriteria'][k] == 'True':
scoringCriteria = k
break
config['ScoringCriteria'] = scoringCriteria
# config['ProblemType'] = configSettingsJson['basic']['problem_type']
# config['ScoringCriteria'] = configSettingsJson['basic']['scoringCriteria']
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
if 'NoOfRecords' in request.session:
records = request.session['NoOfRecords']
else:
records = 'NA'
if request.session['finalstate'] <= 1:
request.session['finalstate'] = 1
request.session['currentstate'] = 1
# dataFile = str(request.session['datalocation'])
# df = pd.read_csv(dataFile,encoding='utf8')
if 'NoOfRecords' in request.session:
noofforecast = 20
else:
noofforecast = 20
config['noofforecasts'] = noofforecast
if 'numericFeature' in request.session:
numericFeature = request.session['numericFeature']
else:
numericFeature = ''
problemType = 'classification'
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
scoringCreteria = 'NA'
if problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997
for key in configSettingsJson['basic']['scoringCriteria'][problemType]:
if configSettingsJson['basic']['scoringCriteria'][problemType][key] == 'True':
scoringCreteria = key
break
selectAlgo = ""
if problemType in ['classification','regression','timeSeriesForecasting',
'timeSeriesAnomalyDetection',
'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition','llmFineTuning']: #task 11997
for key in configSettingsJson['basic']['algorithms'][problemType]:
if configSettingsJson['basic']['algorithms'][problemType][key] == 'True':
if selectAlgo != "":
selectAlgo += ','
selectAlgo += key
modelSize = ''
if problemType == 'llmFineTuning':
for key in configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo].keys():
if configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo][key] == 'True':
modelSize = key
break
featuresdict = [feature['feature'] for feature in configSettingsJson['advance']['profiler']['featureDict']]
context = {'tab': 'tabconfigure','modelSize':modelSize,'featuresdict':featuresdict, 'configsettings': configSettingsJson, 'temp': temp, 'config': config,'numericFeature':numericFeature,'onlineLearning':onlineLearning,
'noOfRecords': records, 'selected_use_case': selected_use_case, 'ModelStatus': ModelStatus,'problemType':problemType,'scoringCreteria':scoringCreteria,'selectAlgo':selectAlgo,
'ModelVersion': ModelVersion, 'currentstate': request.session['currentstate'],
'finalstate': request.session['finalstate'], 'selected': 'modeltraning','IsSameFeatures':IsSameFeatures,'IsReTrainingCase':IsReTrainingCase,'basic_help':ht.basic_help
# 10012:Decision Threshold related changes
, 'DLCheckpoint':data_is_under_RAM_threshold}
return context
def gotoconf(request):
selected_use_case = request.session['UseCaseName']
ModelVersion = request.session['ModelVersion']
ModelStatus = request.session['ModelStatus']
try:
# 10012:Decision Threshold related Changes
data_is_under_RAM_threshold = True
ModelName = usecasedetails.objects.get(id=request.session['ModelName'])
Version = request.session['ModelVersion']
import os
if request.session['datatype'] in ['Video', 'Image','Document','Object']:
folderLocation = str(request.session['datalocation'])
dataFile = os.path.join(folderLocation, request.session['csvfullpath'])
else:
dataFile = str(request.session['datalocation'])
# -------------------------------- 10012:Decision Threshold related Changes S T A R T -------------------------------
from appbe.dataIngestion import checkRAMThreshold
data_is_under_RAM_threshold = checkRAMThreshold(request.session['datalocation'])
# ------------------------------------------------------ E N D ------------------------------------------------------
if request.session['datatype'] not in ['LLM_Document','LLM_Code']:
from appbe.eda import ux_eda
if 'delimiter' not in request.session:
request.session['delimiter'] = ','
if 'textqualifier' not in request.session:
request.session['textqualifier'] = '"'
eda_obj = ux_eda(dataFile,request.session['delimiter'],request.session['textqualifier'],optimize=1)
featuresList,datetimeFeatures,sequenceFeatures,constantFeature,textFeature,targetFeature,numericCatFeatures,numericFeature,catFeatures = eda_obj.getFeatures()
else:
featuresList = []
featuresList.append('Instruction')
datetimeFeatures=[]
sequenceFeatures=[]
constantFeature=[]
textFeature=[]
targetFeature='Response'
numericCatFeatures = []
numericFeature=[]
catFeatures=[]
featuresListJson = []
for x in featuresList:
featureOperation={}
featureOperation['feature'] = x
if x in datetimeFeatures:
|
featureOperation['type'] = 'date'
featureOperation['fillMethod'] = 'na'
featureOperation['categoryEncoding'] = 'na'
elif x in textFeature:
featureOperation['type'] = 'text'
featureOperation['fillMethod'] = 'na'
featureOperation['categoryEncoding'] = 'na'
elif x in sequenceFeatures:
featureOperation['type'] = 'index'
featureOperation['fillMethod'] = 'median'
featureOperation['categoryEncoding'] = 'na'
elif (x in catFeatures) or (x in constantFeature):
featureOperation['type'] = 'categorical'
featureOperation['fillMethod'] = 'mode'
featureOperation['categoryEncoding'] = 'targetEncoding'
else:
featureOperation['type'] = 'numerical'
featureOperation['fillMethod'] = 'medium'
featureOperation['categoryEncoding'] = 'na'
featureOperation['outlierDetection'] = 'disable'
featureOperation['outlierOperation'] = 'nochange'
featureOperation['normalizer'] = 'none'
featuresListJson.append(featureOperation)
request.session['numericFeature'] = numericFeature
records = 0
import os
if os.path.isfile(dataFile):
for chunk in pd.read_csv(dataFile, chunksize=20000,encoding="utf-8",encoding_errors= 'replace'):
records = records+len(chunk)
request.session['NoOfRecords'] = records
filetimestamp = str(int(time.time()))
CONFIG_FILE_PATH = request.session['configfilepath']
config_json_filename = os.path.join(CONFIG_FILE_PATH, 'AION_' + filetimestamp + '.json')
outputfile = os.path.join(CONFIG_FILE_PATH, 'AION_OUTPUT_' + filetimestamp + '.json')
request.session['outputfilepath'] = str(outputfile)
modelname = request.session['usecaseid']
modelname = modelname.replace(" ", "_")
DEPLOY_LOCATION = request.session['deploylocation']
request.session['logfilepath'] = os.path.join(DEPLOY_LOCATION, modelname,str(Version),'log','model_training_logs.log')
request.session['config_json'] = config_json_filename
#request.session['ModelVersion'] = Version
request.session['ModelStatus'] = 'Not Trained'
# p = Existusecases(DataFilePath=dataFile, DeployPath=DEPLOY_LOCATION, Status='Not Trained',
# ConfigPath=config_json_filename, Version=Version, ModelName=ModelName,
# TrainOuputLocation=outputfile)
# p.save()
# from AION_UX import telemetry
# telemetry.telemetry_data('UseCaseCreated',modelname+'_'+str(Version),'UseCaseCreated')
# request.session['modelid'] = p.id
temp = {}
temp['ModelName'] = request.session['usecaseid']
temp['Version'] = request.session['ModelVersion']
'''
featuresList = features #df.columns.values.tolist()
datetimeFeatures =
datetimeFeatures = []
sequenceFeatures = []
unimportantFeatures = []
featuresRatio = {}
for i in featuresList:
check = ea.match_date_format(df[i])
if check == True:
datetimeFeatures.append(i)
unimportantFeatures.append(i)
seq_check = ea.check_seq_feature(df[i])
if seq_check == True:
sequenceFeatures.append(i)
unimportantFeatures.append(i)
ratio = ea.check_category(df[i])
if ratio != 0:
featuresRatio[i] = ratio
else:
unimportantFeatures.append(i)
targetFeature = min(featuresRatio, key=featuresRatio.get)
unimportantFeatures.append(targetFeature)
'''
unimportantFeatures = list(datetimeFeatures)
unimportantFeatures.extend(sequenceFeatures)
#unimportantFeatures = list(set(unimportantFeatures) + set(sequenceFeatures))
unimportantFeatures.append(targetFeature)
config = {}
noofforecast = 20
config['ModelName'] = request.session['usecaseid']
config['Version'] = request.session['ModelVersion']
config['datetimeFeatures'] = datetimeFeatures
config['sequenceFeatures'] = sequenceFeatures
config['FeaturesList'] = featuresList
config['unimportantFeatures'] = unimportantFeatures
config['targetFeature'] = targetFeature
config['noofforecasts'] = noofforecast
DEFAULT_FILE_PATH = request.session['defaultfilepath']
# Retraing settings changes
# -------- S T A R T --------
IsReTrainingCase = False
if request.session['IsRetraining'] == 'Yes':
id = request.session['ModelName']
p = usecasedetails.objects.get(id=id)
model = Existusecases.objects.filter(ModelName=p)
indexVal = model.count() - 1
configFile = str(model[indexVal].ConfigPath)
# configFile = str(model[0].ConfigPath)
# request.session['IsRetraining'] = 'No'
IsReTrainingCase = True
# ---------------------------
else:
configFile = os.path.join(DEFAULT_FILE_PATH, 'aion_config.json')
f = open(configFile, "r")
configSettings = f.read()
f.close()
configSettingsJson = json.loads(configSettings)
# Retraing settings changes
# -------- S T A R T --------
pickDefaultSettings = False
IsSameFeatures = False
if 'featureList' not in configSettingsJson['basic']:
pickDefaultSettings = True
IsSameFeatures = True
else:
if configSettingsJson['basic']['featureList'] == featuresList:
pickDefaultSettings = False
IsSameFeatures = True
else:
pickDefaultSettings = True
if pickDefaultSettings:
# ---------------------------
configSettingsJson['basic']['featureList'] = featuresList
configSettingsJson['basic']['dateTimeFeature'] = ",".join([feature for feature in datetimeFeatures])
configSettingsJson['basic']['indexFeature'] = sequenceFeatures
trainingFeatures = list(set(featuresList) - set(unimportantFeatures))
configSettingsJson['basic']['trainingFeatures'] = ",".join([feature for feature in trainingFeatures])
configSettingsJson['basic']['targetFeature'] = targetFeature
if request.session['datatype'].lower() in ['video','image','object','document','llm_document','llm_code']:
for x in configSettingsJson['basic']['analysisType'].keys():
configSettingsJson['basic']['analysisType'][x] = 'False'
configSettingsJson['basic']['folderSettings']['fileType'] = request.session['datatype']
configSettingsJson['basic']['folderSettings']['labelDataFile'] = request.session['csvfullpath']
configSettingsJson['basic']['folderSettings']['fileExtension'] = request.session['fileExtension']
if request.session['datatype'] in ['LLM_Document','LLM_Code']:
configSettingsJson['basic']['analysisType']['llmFineTuning'] = 'True'
configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['prompt']='Instruction'
configSettingsJson['basic']['preprocessing']['llmFineTuning']['friendlyNames']['response']='Response'
configSettingsJson['basic']['preprocessing']['llmFineTuning']['unstructuredData'] = 'True'
elif request.session['datatype'] == 'Video':
configSettingsJson['basic']['analysisType']['videoForecasting'] = 'True'
elif request.session['datatype'] == 'Image':
configSettingsJson['basic']['analysisType']['imageClassification'] = 'True'
elif request.session['datatype'] == 'Object':
configSettingsJson['basic']['analysisType']['objectDetection'] = 'True'
elif request.session['datatype'].lower() == 'document':
df = pd.read_csv(dataFile, encoding='utf8',sep=request.session['delimiter'],quotechar=request.session['textqualifier'],nrows=100)
noOfEmotyLevels = 0
shape = df.shape
if shape[1] == 2:
noOfEmotyLevels = df['Label'].isnull().sum()
#print(noOfEmotyLevels)
if noOfEmotyLevels == 100:
configSettingsJson['basic']['analysisType']['topicModelling'] = 'True'
else:
configSettingsJson['basic']['analysisType']['classification'] = 'True'
else:
if 'uploadfiletype' in request.session:
configSettingsJson['basic']['folderSettings']['fileType'] = request.session['uploadfiletype']
configSettingsJson['basic']['folderSettings']['labelDataFile'] = request.session['uploadLocation']
try:
if isinstance(datetimeFeatures, list):
if len(datetimeFeatures) != 0:
configSettingsJson = update_granularity(configSettingsJson,datapath=dataFile)
elif isinstance(datetimeFeatures, str):
if datetimeFeatures != '':
configSettingsJson = update_granularity(configSettingsJson,datapath=dataFile)
except:
pass
# Retraing settings changes
# -------- S T A R T --------
tot_count=len(numericCatFeatures)
#task 11997
if (tot_count > 1):
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'True'
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'False'
else:
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['uniVariate'] = 'True'
configSettingsJson['basic']['analysisApproach']['timeSeriesAnomalyDetection']['AutoEncoder']['multiVariate'] = 'False'
if 'delimiter' in request.session:
configSettingsJson['basic']['fileSettings']['delimiters'] = request.session['delimiter']
else:
configSettingsJson['basic']['fileSettings']['delimiters'] = ','
if 'textqualifier' in request.session:
configSettingsJson['basic']['fileSettings']['textqualifier'] = request.session['textqualifier']
else:
request.session['textqualifier'] = '"'
configSettingsJson['advance']['profiler']['featureDict'] = featuresListJson
configSettingsJson['basic']['onlineLearning'] = 'False'
configSettingsJson['basic']['dataLocation'] = request.session['datalocation']
configSettingsJson['basic']['noOfRecords'] = request.session['NoOfRecords']
onlineLearning = configSettingsJson['basic']['onlineLearning']
updatedConfigSettings = json.dumps(configSettingsJson)
with open(config_json_filename, "w") as fpWrite:
fpWrite.write(updatedConfigSettings)
fpWrite.close()
'''
p = Existusecases(DataFilePath=dataFile, DeployPath=DEPLOY_LOCATION, Status='Not Trained',
ConfigPath=config_json_filename, Version=Version, ModelName=ModelName,
TrainOuputLocation=outputfile)
p.save()
'''
p = Existusecases.objects.get(ModelName=ModelName,Version=Version)
p.DataFilePath = dataFile
p.DeployPath = DEPLOY_LOCATION
p.ConfigPath = config_json_filename
p.TrainOuputLocation = outputfile
p.save()
#from appbe import telemetry
#telemetry.telemetry_data('UseCaseCreated',modelname+'_'+str(Version),'UseCaseCreated')
request.session['modelid'] = p.id
# ---------------------------
from appbe.compute import selectedInfratructure
infra = selectedInfratructure()
if infra.lower() in ['aws','gcp']:
problemType = 'llmFineTuning'
else:
problemType = 'classification'
#print(problemType)
for key in configSettingsJson['basic']['analysisType']:
if configSettingsJson['basic']['analysisType'][key] == 'True':
problemType = key
break
scoringCreteria = 'NA'
if problemType in ['classification','regression','survivalAnalysis','timeSeriesForecasting']: #task 11997
for key in configSettingsJson['basic']['scoringCriteria'][problemType]:
if configSettingsJson['basic']['scoringCriteria'][problemType][key] == 'True':
scoringCreteria = key
break
selectAlgo = ""
if problemType in ['classification','regression','timeSeriesForecasting','timeSeriesAnomalyDetection',
'recommenderSystem','clustering','anomalyDetection','topicModelling','survivalAnalysis','videoForecasting','imageClassification','objectDetection','stateTransition','llmFineTuning']: #task 11997
for key in configSettingsJson['basic']['algorithms'][problemType]:
if configSettingsJson['basic']['algorithms'][problemType][key] == 'True':
if selectAlgo != "":
selectAlgo += ','
selectAlgo += key
modelSize = ''
if problemType == 'llmFineTuning':
for key in configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo].keys():
if configSettingsJson['basic']['modelSize']['llmFineTuning'][selectAlgo][key] == 'True':
modelSize = key
break
movenext = True
request.session['finalstate'] = 1
request.session['currentstate'] = 1
context = {'tab': 'tabconfigure','modelSize':modelSize,'tot_count':tot_count, 'temp': temp, 'configsettings': configSettingsJson, 'config': config,'numericFeature':numericFeature,'onlineLearning':onlineLearning,
'noOfRecords': records, 'selected_use_case': selected_use_case,'problemType':problemType,'scoringCreteria':scoringCreteria,'selectAlgo':selectAlgo,'ModelStatus': ModelStatus, 'ModelVersion': ModelVersion, 'movenext': movenext,
'currentstate': request.session['currentstate'], 'finalstate': request.session['finalstate'],
'selected': 'modeltraning','advance':True,'basic_help':ht.basic_help
# Retraing settings changes
,'IsSameFeatures':IsSameFeatures,'IsReTrainingCase':IsReTrainingCase
# 10012:Decision Threshold related
,'DLCheckpoint':data_is_under_RAM_threshold}
return context
except UnicodeDecodeError as e:
print(e)
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'File Reading Error: '+str(e)}
return context
except Exception as e:
print(e)
import sys,os
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno)) |
context = {'tab': 'tabconfigure','selected_use_case': selected_use_case,'ModelVersion': ModelVersion,'ModelStatus': ModelStatus,'selected': 'modeltraning','error': 'Config Error: '+str(e)}
return context<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import numpy as np
import json
import os
def downloadtrainingfile(request,Existusecases):
usename = request.session['UseCaseName'].replace(" ", "_") + '_' + str(request.session['ModelVersion'])
updatedConfigFile = request.session['config_json']
f = open(updatedConfigFile, "r+", encoding="utf-8")
configSettingsData = f.read()
configSettingsJson = json.loads(configSettingsData)
modelName = request.session['UseCaseName']
modelVersion = request.session['ModelVersion']
modelStatus = request.session['ModelStatus']
model = Existusecases.objects.get(ModelName=request.session['ModelName'],Version=request.session['ModelVersion'])
output_train_json_filename = str(model.TrainOuputLocation)
f = open(output_train_json_filename, "r+")
training_output = f.read()
f.close()
dict = {'Attribute':[],
'Value':[]
}
training_output = json.loads(training_output)
dfdashbord = pd.DataFrame(dict)
dfdashbord.loc[len(dfdashbord.index)] = ['UseCaseName',modelName]
dfdashbord.loc[len(dfdashbord.index)] = ['ProblemType',training_output['data']['ModelType']]
dfdashbord.loc[len(dfdashbord.index)] = ['Version',str(modelVersion)]
dfdashbord.loc[len(dfdashbord.index)] = ['Status',modelStatus]
if 'vmDetails' in training_output['data']:
dfdashbord.loc[len(dfdashbord.index)] = ['DeployLocation', training_output['data']['vmDetails']]
else:
dfdashbord.loc[len(dfdashbord.index)] = ['DeployLocation',training_output['data']['deployLocation']]
dfdashbord.loc[len(dfdashbord.index)] = ['BestModel',training_output['data']['BestModel']]
dfdashbord.loc[len(dfdashbord.index)] = ['BestScore',training_output['data']['BestScore']]
dfdashbord.loc[len(dfdashbord.index)] = ['ScoringParam',training_output['data']['ScoreType']]
if training_output['data']['ModelType'] != 'LLM Fine-Tuning':
dfdashbord.loc[len(dfdashbord.index)] = ['Test%',configSettingsJson['advance']['testPercentage']]
dfdashbord.loc[len(dfdashbord.index)] = ['FeaturesUsed',training_output['data']['featuresused']]
from io import BytesIO as IO
excel_file = IO()
edaFileName = usename + '_training.xlsx'
excel_writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
dfdashbord.to_excel(excel_writer, sheet_name='Dashboard',index=False)
if training_output['data']['ModelType'].lower() != 'multimodellearning' and training_output['data']['ModelType'].lower() != 'multilabelprediction':
EvaluatedModels = training_output['data']['EvaluatedModels']
EvaluatedModels = pd.DataFrame(EvaluatedModels)
EvaluatedModels.to_excel(excel_writer, sheet_name='EvaluatedModels',startrow=0 , startcol=0)
if training_output['data']['ModelType'].lower() == 'classification':
#print(training_output['data']['matrix'])
row1 = 10
row2 = 10
if 'ConfusionMatrix' in training_output['data']['matrix']:
confusionMatrix = training_output['data']['matrix']['ConfusionMatrix']
confusionMatrix = pd.DataFrame(confusionMatrix)
confusionMatrix.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0)
row1 =confusionMatrix.shape[0]+5
if 'ConfusionMatrix' in training_output['data']['trainmatrix']:
confusionMatrix = training_output['data']['trainmatrix']['ConfusionMatrix']
confusionMatrix = pd.DataFrame(confusionMatrix)
confusionMatrix.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0)
if 'ClassificationReport' in training_output['data']['matrix']:
confusionMatrix = training_output['data']['matrix']['ClassificationReport']
confusionMatrix = pd.DataFrame(confusionMatrix)
confusionMatrix.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=row1 , startcol=0)
if 'ClassificationReport' in training_output['data']['trainmatrix']:
confusionMatrix = training_output['data']['trainmatrix']['ClassificationReport']
confusionMatrix = pd.DataFrame(confusionMatrix)
confusionMatrix.to_excel(excel_writer, sheet_name='Training Matrix',startrow=row2 , startcol=0)
if training_output['data']['ModelType'].lower() == 'regression':
dict = {'Attribute':[],'Value':[]}
testingDF = pd.DataFrame(dict)
try:
testingDF.loc[len(testingDF.index)] = ['MAE',training_output['data']['matrix']['MAE']]
testingDF.loc[len(testingDF.index)] = ['R2Score',training_output['data']['matrix']['R2Score']]
testingDF.loc[len(testingDF.index)] = ['MSE',training_output['data']['matrix']['MSE']]
testingDF.loc[len(testingDF.index)] = ['MAPE',training_output['data']['matrix']['MAPE']]
testingDF.loc[len(testingDF.index)] = ['RMSE',training_output['data']['matrix']['RMSE']]
except:
pass
testingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0)
trainingDF = pd.DataFrame(dict)
try:
trainingDF.loc[len(trainingDF.index)] = ['MAE',training_output['data']['trainmatrix']['MAE']]
trainingDF.loc[len(trainingDF.index)] = ['R2Score',training_output['data']['trainmatrix']['R2Score']]
trainingDF.loc[len(trainingDF.index)] = ['MSE',training_output['data']['trainmatrix']['MSE']]
trainingDF.loc[len(trainingDF.index)] = ['MAPE',training_output['data']['trainmatrix']['MAPE']]
trainingDF.loc[len(trainingDF.index)] = ['RMSE',training_output['data']['trainmatrix']['RMSE']]
except:
pass
trainingDF.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0)
if training_output['data']['ModelType'].lower() == 'clustering':
dict = {'Attribute':[],'Value':[]}
trainingDF = pd.DataFrame(dict)
try:
trainingDF.loc[len(trainingDF.index)] = ['SilHouette_Avg',round(training_output['data']['trainmatrix']['SilHouette_Avg'],2)]
trainingDF.loc[len(trainingDF.index)] = ['DaviesBouldinScore',round(training_output['data']['trainmatrix']['DaviesBouldinScore'],2)]
trainingDF.loc[len(trainingDF.index)] = ['CalinskiHarabazScore',round(training_output['data']['trainmatrix']['CalinskiHarabazScore'],2)]
except:
pass
trainingDF.to_excel(excel_writer, sheet_name='Training Matrix',startrow=0 , startcol=0)
centroidpath = os.path.join(training_output['data']['deployLocation'],'centers.csv')
if(os.path.isfile(centroidpath)):
df_center = pd.read_csv(centroidpath)
df_center = df_center.rename(columns={"Unnamed: 0": "Cluster"})
df_center.to_excel(excel_writer, sheet_name='Centroid',startrow=0 , startcol=0)
if training_output['data']['ModelType'].lower() == 'timeseriesforecasting': #task 11997
if training_output['data']['BestModel'].lower() == 'var':
dict = {'Features':[],'Attribute':[],'Value':[]}
trainingDF = pd.DataFrame(dict)
FeaturesMatrix = training_output['data']['matrix']
for x in FeaturesMatrix:
try:
trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MAE',x['MAE']]
trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MSE',x['MSE']]
trainingDF.loc[len(trainingDF.index)] = [x['Features'],'MAPE',x['MAPE']]
trainingDF.loc[len(trainingDF.index)] = [x['Features'],'RMSE',x['RMSE']]
except:
pass
trainingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0)
else:
dict = {'Attribute':[],'Value':[]}
trainingDF = pd.DataFrame(dict)
try:
trainingDF.loc[len(trainingDF.index)] = ['MAE',training_output['data']['matrix']['MAE']]
trainingDF.loc[len(trainingDF.index)] = ['MSE',training_output['data']['matrix']['MSE']]
trainingDF.loc[len(trainingDF.index)] = ['MAPE',training_output['data']['matrix']['MAPE']]
trainingDF.loc[len(trainingDF.index)] = ['RMSE',training_output['data']['matrix']['RMSE']]
except:
pass
trainingDF.to_excel(excel_writer, sheet_name='Testing Matrix',startrow=0 , startcol=0)
workbook = excel_writer.book
#excel_writer.save()
excel_writer.close()
excel_file.seek(0)
return edaFileName,excel_file
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pyodbc as pyodbc
import pandas as pd
import json
def simple_select(c, sql_query, bind_params=None, display_sql=False):
"""where c is a cursor"""
if bind_params is None:
c.execute(sql_query)
else:
if display_sql:
c.execute(sql_query, bind_params)
headers = []
if c.description is not None:
# We have a SELECT statement
for x in c.description:
headers.append(x[0])
row_count = 0
row = c.fetchone()
data=[]
while row:
row_count += 1
xrow={}
for i in range(len(row)):
xrow[headers[i]] = row[i]
data.append(xrow)
row = c.fetchone()
#df = pd.DataFrame(data)
return(data)
def validatequery(request,query):
resultdata = []
try:
server_url = request.session['server_url']
username_actian = request.session['username']
password_actian = request.session['password']
database_actian = request.session['database']
conn = get_connection(server_url,username_actian,password_actian,database_actian)
sql_text = query
cur = conn.cursor()
resultdata = simple_select(cur, query)
cur.close()
if len(resultdata) > 0:
return "Query executed successfully"
else:
return "No rows returned"
except Exception as e:
print(e)
return str(e)
def executequery(request,query):
resultdata = []
try:
server_url = request.session['server_url']
username_actian = request.session['username']
password_actian = request.session['password']
database_actian = request.session['database']
conn = get_connection(server_url,username_actian,password_actian,database_actian)
sql_text = query
cur = conn.cursor()
resultdata = simple_select(cur, query)
cur.close()
return(resultdata)
except Exception as e:
print(e)
return(resultdata)
def list_tables_fields(request,table_list):
table_field_obj = {}
table_field_obj['data'] = []
try:
server_url = request.session['server_url']
username_actian = request.session['username']
password_actian = request.session['password']
database_actian = request.session['database']
table_list = json.loads(table_list)
conn = get_connection(server_url,username_actian,password_actian,database_actian)
for table in table_list:
tf_obj = {}
tf_obj['TableName'] = str(table).strip()
tf_obj['Fields']= []
field_list = []
sql_text = "SELECT column_name, false as is_select FROM iicolumns WHERE table_name='"+table+"'"
cur = conn.cursor()
field_list = simple_select(cur, sql_text)
cur.close()
print(field_list)
tf_obj['Fields'] = field_list
table_field_obj['data'].append(tf_obj)
print("----------------------")
print(table_field_obj)
print(json.dumps(table_field_obj))
print("----------------------")
return json.dumps(table_field_obj)
except Exception as e:
print("Something went wrong "+str(e))
return table_field_obj
def list_tables(request):
server_url = request.session['server_url']
username_actian = request.session['username']
password_actian = request.session['password']
database_actian = request.session['database']
dt_list = []
try:
conn = get_connection(server_url,username_actian,password_actian,database_actian)
sql_text = "select table_name from iitables where table_type='T' and table_owner='"+username_actian+"'"
cur = conn.cursor()
dt_list = simple_select(cur, sql_text)
cur.close()
return dt_list
except:
print("Something went wrong")
return dt_list
def get_connection(server_url,username_actian,password_actian,database_actian):
conn = pyodbc.connect(" |
driver=Ingres;servertype=ingres;server=@"+str(server_url)+",tcp_ip,VW;uid="+str(username_actian)+";pwd="+str(password_actian)+";database="+str(database_actian))
print("connected")
return conn
def getDataFromActianAvalanche(request):
server_url = request.POST.get('server_url')
username_actian = request.POST.get('username')
password_actian = request.POST.get('password')
database_actian = request.POST.get('database')
table_actian = request.POST.get('table')
conn = get_connection(server_url,username_actian,password_actian,database_actian)
c = conn.cursor()
sql_text = "select * from "+str(table_actian)
data = simple_select(c, sql_text)
df = pd.DataFrame(data)
return(df)<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os.path
from pathlib import Path
import time
import subprocess
import sys
import shutil
from appbe.aion_config import kafka_setting
from appbe.aion_config import running_setting
from appbe.publish import chech_publish_info
from llm.llm_tuning import update_sqllite_data
from appbe.data_io import sqlite_db
from appbe.dataPath import DATA_DIR
from appbe import installPackage
from appbe import compute
import json
import os
import signal
from os.path import expanduser
import platform
import pandas as pd
LOG_FILE_PATH = os.path.join(DATA_DIR,'logs')
GITHUB_FILE_PATH = os.path.join(DATA_DIR,'github')
PUBLISH_PATH = os.path.join(DATA_DIR,'target')
DEPLOY_DATABASE_PATH = os.path.join(DATA_DIR,'sqlite')
os.makedirs(LOG_FILE_PATH, exist_ok=True)
'''
def check_publish_info(usecase,version):
sqlite_dbObj = sqlite_db(DEPLOY_DATABASE_PATH,'deploy.db')
if sqlite_dbObj.table_exists('publish'):
publishState= 'Published'
'''
def get_instance(modelID):
from appbe.sqliteUtility import sqlite_db
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
data = sqlite_obj.get_data('LLMTuning','usecaseid',modelID)
if len(data) > 0:
return (data[3],data[2],data[5],data[6],data[4])
else:
return '','','','',''
else:
return '','','','',''
def startServices(request,usecasedetails,Existusecases):
try:
models = Existusecases.objects.filter(publishStatus='Published')
print(models)
if len(models) > 0:
for model in models:
try:
portNo = model.portNo
ppid = model.publishPID
if ppid == 0:
continue
try:
os.kill(int(model.publishPID), signal.SIGTERM)
except Exception as e:
print(e)
scriptPath = os.path.join(PUBLISH_PATH,model.ModelName.usecaseid,'aion_publish_service.py')
if os.path.exists(scriptPath):
outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)])
model.publishStatus = 'Published'
model.publishPID = outputStr.pid
model.portNo = portNo
model.save()
else:
print("Pass")
pass
except Exception as e:
print(e)
except Exception as e:
print(e)
def publishmodel(request,usecaseid,version,Existusecases,usecasedetails):
portNo=0
usecased = usecasedetails.objects.get(usecaseid=usecaseid)
models = Existusecases.objects.filter(ModelName=usecased,publishStatus='Published')
if len(models) > 0:
for model in models:
try:
portNo = model.portNo
try:
os.kill(int(model.publishPID), signal.SIGTERM)
except Exception as e:
print(e)
mod = Existusecases.objects.get(id=model.id)
mod.publishStatus = ''
mod.publishPID = 0
mod.portNo = 0
mod.save()
except Exception as e:
print(e)
pass
missingNumbers = []
if portNo == 0:
models = Existusecases.objects.filter(publishStatus='Published')
usedPortNo=[]
for model in models:
usedPortNo.append(model.portNo)
startPortNo = 8091
endPortNo = 8091+5
missingNumbers = [ i for i in range(startPortNo,endPortNo) if i not in usedPortNo]
if len(missingNumbers) > 0:
portNo = missingNumbers[0]
if portNo != 0:
scriptPath = os.path.join(PUBLISH_PATH,usecaseid,'aion_publish_service.py')
model = Existusecases.objects.get(ModelName=usecased,Version=version)
isExist = os.path.exists(scriptPath)
if isExist:
configfile = os.path.join(PUBLISH_PATH,usecaseid,'config.json')
configdata = {'version': str(version)}
with open(configfile, "w") as outfile:
json.dump(configdata, outfile)
outfile.close()
outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)])
model.publishStatus = 'Published'
model.publishPID = outputStr.pid
model.portNo = portNo
model.save()
Status = 'SUCCESS'
hosturl =request.get_host()
hosturl = hosturl.split(':')
url = 'http://'+hosturl[0]+':'+str(portNo)+'/AION/'+str(usecaseid)+'/predict'
Msg = 'Model Published Successfully'
else:
Status = 'Error'
Msg = 'Model Published Error'
url = ''
else:
Status = 'Error'
Msg = 'All ports are utilized'
url=''
return Status,Msg,url
def get_published_models(instanceid):
from appbe.sqliteUtility import sqlite_db
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if sqlite_obj.table_exists("LLMTuning"):
condition = f'"instance"=="{instanceid}" AND "status"=="Published"'
datas = sqlite_obj.read_data('LLMTuning',condition)
if len(datas)>0:
return True,datas[0][0]
return False,''
def maac_command(request,Existusecases,usecasedetails):
command = request.POST.get('maacsubmit')
kafkaSetting = kafka_setting()
ruuningSetting = running_setting()
computeinfrastructure = compute.readComputeConfig()
modelID = request.POST.get('modelID')
Version = request.POST.get('Version')
p = Existusecases.objects.get(id=modelID,Version=Version)
usecasename = p.ModelName.usecaseid #bugid 13339
usecaseid = p.ModelName.id
# runningStatus,pid,ip,port = installPackage.checkModelServiceRunning(usecasename)
# installationStatus,modelName,modelVersion=installPackage.checkInstalledPackge(usecasename)
usecasedetail = usecasedetails.objects.get(id=p.ModelName.id)
usecase = usecasedetails.objects.all()
problemType = p.ProblemType
score = 0
scoreType = ''
deployedModel = ''
deployedModelVersion = p.Version
models = Existusecases.objects.filter(ModelName=usecasedetail,Status='SUCCESS')
computeinfrastructure = compute.readComputeConfig()
for model in models:
model.scoringCreteria = 'NA'
model.score = 'NA'
model.deploymodel = 'NA'
if os.path.isdir(str(model.DeployPath)):
modelPath = os.path.join(str(model.DeployPath),'etc','output.json')
try:
with open(modelPath) as file:
outputconfig = json.load(file)
file.close()
if outputconfig['status'] == 'SUCCESS':
if deployedModelVersion == model.Version:
problemType = outputconfig['data']['ModelType']
scoreType = outputconfig['data']['ScoreType']
score = outputconfig['data']['BestScore']
deployedModel = outputconfig['data']['BestModel']
model.scoringCreteria = outputconfig['data']['ScoreType']
model.score = outputconfig['data']['BestScore']
model.deploymodel = outputconfig['data']['BestModel']
model.maacsupport = 'True'
model.flserversupport = 'False'
supportedmodels = ["Logistic Regression","Neural Network","Linear Regression"]
if model.deploymodel in supportedmodels:
model.flserversupport = 'True'
else:
model.flserversupport = 'False'
supportedmodels = ["Extreme Gradient Boosting (XGBoost)"]
if model.deploymodel in supportedmodels:
model.encryptionsupport = 'True'
else:
model.encryptionsupport = 'False'
except Exception as e:
print(e)
pass
MLaaC_output = ''
if command == 'generatemaac':
deployPath = str(p.DeployPath)
codeconfig = os.path.join(deployPath,'etc','code_config.json')
if os.path.isfile(codeconfig):
with open(codeconfig,'r') as f:
cconfig = json.load(f)
f.close()
dbserver = request.POST.get('productiondb')
db_config = {}
if dbserver.lower() == 'influxdb':
cconfig['prod_db_type'] = 'influx'
db_config['host'] = request.POST.get('influxdbhost')
db_config['port'] = request.POST.get('influxdbportno')
db_config['user'] = request.POST.get('influxdbuser')
db_config['password'] = request.POST.get('influxpassword')
db_config['database'] = 'production'
db_config['measurement'] = usecasename
tags = {}
db_config['tags']=tags
cconfig['db_config'] = db_config
else:
cconfig['prod_db_type'] = 'sqlite'
cconfig['db_config'] = db_config
dbserver = request.POST.get('mlflowserver')
mlflow_config = {}
if dbserver.lower() == 'local':
cconfig['mlflow_config'] = mlflow_config
else:
mlflow_config['tracking_uri_type'] = request.POST.get('mlflowserverurl')
mlflow_config['tracking_uri'] = request.POST.get('mlflowserverurl')
mlflow_config['registry_uri'] = request.POST.get('mlflowserverurl')
mlflow_config['artifacts_uri'] = request.POST.get('mlflowserverurl')
cconfig['mlflow_config'] = mlflow_config
with open(codeconfig,'w') as f:
json.dump(cconfig, f)
f.close()
from bin.aion_mlac import generate_mlac_code
outputStr = generate_mlac_code(codeconfig)
output = json.loads(outputStr)
from appbe.telemetry import UpdateTelemetry
UpdateTelemetry(request.session['usecaseid']+'-'+str(request.session['ModelVersion']),'MLaC','Yes')
if output['Status'] == 'SUCCESS':
Status = 'SUCCESS'
MLaaC_output = output['MLaC_Location'].replace('\\\\', '\\\\\\\\')
Msg = 'MLaC code successfully generated'
else:
Status = 'Failure'
Msg = output['msg']
else:
Status = 'Failure'
Msg = 'Code Config Not Present'
if command == 'buildContainer':
deployPath = str(p.DeployPath)
maac_path = os.path.join(deployPath,'publish','MLaC')
if os.path.isdir(maac_path):
config={'usecase':str(usecasename),'version':str(p.Version),'mlacPath':maac_path}
config = json.dumps(config)
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py'))
if platform.system() == 'Windows':
outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','buildMLaCContainerLocal' ,'-j',config],creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','buildMLaCContainerLocal' ,'-j',config])
#cmd = scriptPath+" "+str(usecasename)+" "+str(p.Version)+" "+str(maac_path)
#subprocess.Popen(cmd,shell=True)
Status = 'SUCCESS'
Msg = 'Build Container Started'
else:
Status = 'Failure'
Msg = 'Run Code Generator'
if command == 'runpipeline':
deployPath = str(p.DeployPath)
dockerlist = os.path.join(deployPath,'publish','MLaC','dockerlist.json')
if os.path.isfile(dockerlist):
persistancevolume = request.POST.get('persistancevolume')
datasetpath = request.POST.get('dataset')
filetimestamp = str(int(time.time()))
logfilepath = os.path.join(LOG_FILE_PATH,'AIONPipeline_'+str(filetimestamp)+'.log')
config={'usecase':str(usecasename),'version':str(p.Version),'persistancevolume':persistancevolume,'datasetpath':datasetpath,'dockerlist':str(dockerlist), |
'logfilepath':logfilepath}
config = json.dumps(config)
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py'))
if platform.system() == 'Windows':
outputStr = subprocess.Popen([sys.executable, scriptPath,'-m','runpipelinelocal','-j',config],creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
outputStr = subprocess.Popen([sys.executable, scriptPath, str(usecasename),str(p.Version),persistancevolume,datasetpath,str(dockerlist),logfilepath])
Status = 'SUCCESS'
Msg = 'Pipeline Started'
MLaaC_output = 'Check log file for pipeline execution status: ' + str(logfilepath)
else:
Status = 'Failure'
Msg = 'Not found container information'
if command == 'generateyaml':
deployPath = str(p.DeployPath)
maac_path = os.path.join(deployPath,'publish','MLaC')
if os.path.isdir(maac_path):
persistancevolume = request.POST.get('persistancevolume')
datasetpath = request.POST.get('dataset')
supported_urls_starts_with = ('gs://','https://','http://')
if datasetpath.startswith(supported_urls_starts_with):
datasetpath = request.POST.get('dataset')
else:
datasetpath = '/aion/'+request.POST.get('dataset')
serviceport = request.POST.get('serviceport')
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..','bin','run_generateyaml.py'))
outputStr = subprocess.check_output([sys.executable, scriptPath, str(usecasename),str(p.Version),persistancevolume,datasetpath,maac_path,serviceport])
outputStr = outputStr.decode('utf-8')
outputStr=outputStr.strip()
print(outputStr)
output = json.loads(outputStr)
if output['Status'] == 'SUCCESS':
Status = 'SUCCESS'
MLaaC_output = output['location']
Msg = 'MLaaC dockerfile successfully generated'
else:
Status = 'Failure'
Msg = output['msg']
else:
Status = 'Failure'
Msg = 'Execute generate code first'
if command == 'githubupload':
if shutil.which('git') is None:
Status = 'Failure'
Msg = 'Git is not installed, Please install Git first.'
else:
try:
deployPath = str(p.DeployPath)
maac_path = os.path.join(deployPath,'publish','MLaC')
if os.path.isdir(maac_path):
githuburl = request.POST.get('githuburl')
githubusername = request.POST.get('githubusername')
githubtoken = request.POST.get('githubtoken')
githubemail = request.POST.get('githubemail')
githubconfig = {"url_type":"https","url":githuburl,"username":githubusername,"email":githubemail,"token":githubtoken,"location":maac_path,"modelName":usecasename,"gitFolderLocation":GITHUB_FILE_PATH}
from mlops import git_upload
outputStr = git_upload.upload(githubconfig)
print(outputStr)
output = json.loads(outputStr)
if output['Status'] == 'SUCCESS':
Status = 'SUCCESS'
MLaaC_output = githuburl
Msg = 'Code Uploaded to GitHub Successfully'
else:
Status = 'Failure'
Msg = output['msg']
else:
Status = 'Failure'
Msg = 'GitHub Upload failed'
except Exception as e:
print(e)
Status = 'Failure'
Msg = 'GitHub Upload failed'
if command == 'unpublishmodel':
try:
models = Existusecases.objects.filter(ModelName=usecasedetail,publishStatus='Published')
if len(models) > 0:
for model in models:
try:
if problemType.lower() == "llm fine-tuning":
cloudconfig = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json'))
modelid = usecasename + '_' + str(Version)
usecasename = usecasename.replace(" ", "_")
hypervisor,instanceid,region,image,status = get_instance(usecasename + '_' + str(Version))
from llm.llm_inference import kill_inference_server
kill_inference_server(cloudconfig,instanceid,hypervisor,region,image)
update_sqllite_data(modelid,'status','Success')
else:
try:
os.kill(int(model.publishPID), signal.SIGTERM)
mod.publishPID = 0
except Exception as e:
print(e)
mod = Existusecases.objects.get(id=model.id)
mod.publishStatus = ''
mod.portNo = 0
mod.save()
Status = 'SUCCESS'
Msg = 'Model Unpublished Successfully'
except Exception as e:
print(e)
Status = 'Error'
Msg = 'Model Unpublished Error'
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(e)
pass
if command == 'publishmodel':
try:
portNo=0
models = Existusecases.objects.filter(ModelName=usecasedetail,publishStatus='Published')
if len(models) > 0:
for model in models:
try:
portNo = model.portNo
try:
os.kill(int(model.publishPID), signal.SIGTERM)
except Exception as e:
print(e)
mod = Existusecases.objects.get(id=model.id)
mod.publishStatus = ''
mod.publishPID = 0
mod.portNo = 0
mod.save()
except Exception as e:
print(e)
pass
missingNumbers = []
if problemType.lower() == "llm fine-tuning":
model = Existusecases.objects.get(ModelName=usecasedetail,Version=Version)
try:
usecasename = usecasename.replace(" ", "_")
hypervisor,instanceid,region,image,status = get_instance(usecasename + '_' + str(Version))
if status.lower() in ['published','success'] :
if status.lower() == 'published':
from llm.llm_inference import kill_inference_server
kill_inference_server('',instanceid, hypervisor, region, image)
update_sqllite_data(usecasename + '_' + str(Version), 'status', 'Success')
already_published,published_usecase = get_published_models(instanceid)
if already_published:
Status = 'Error'
Msg = f'{published_usecase} is published at the same id, Please Unpublish mentioned model to proceed.'
else:
if not region:
region = ''
cloudconfig = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config', 'compute_conf.json'))
usecase = usecasename + '_' + str(Version)
#modelid = usecasename + '_' + str(Version)
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'aion.py'))
cmd = [sys.executable, scriptPath, '-m', 'llmpublish', '-cc', cloudconfig, '-i',instanceid,'-hv',hypervisor,'-md',deployedModel,'-uc',usecase,'-r',region,'-im',image ]
outputStr = subprocess.Popen(cmd)
model.publishStatus = 'Published'
model.publishPID = 0
model.portNo = 8000
model.save()
Status = 'SUCCESS'
from llm.llm_inference import get_ip
instanceip = get_ip(cloudconfig,instanceid,hypervisor,region,image)
print(instanceip)
url = 'http://' + instanceip + ':' + str(model.portNo) + '/generate'
Msg = 'Model Published Successfully, Server will take few minutes to be ready for Inferencing. URL: ' + url
update_sqllite_data(usecase,'status','Published')
else:
Status = 'Error'
Msg = 'Only Trained models are availble for Publish.'
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
Status = 'Error'
Msg = 'Model Published Error'
else:
if portNo == 0:
models = Existusecases.objects.filter(publishStatus='Published')
usedPortNo=[]
for model in models:
usedPortNo.append(model.portNo)
startPortNo = 8091
endPortNo = 8091+5
missingNumbers = [ i for i in range(startPortNo,endPortNo) if i not in usedPortNo]
if len(missingNumbers) > 0:
portNo = missingNumbers[0]
if portNo != 0:
model = Existusecases.objects.get(ModelName=usecasedetail,Version=Version)
scriptPath = os.path.join(PUBLISH_PATH,usecasename,'aion_publish_service.py')
isExist = os.path.exists(scriptPath)
if isExist:
configfile = os.path.join(PUBLISH_PATH,usecasename,'config.json')
configdata = {'version': str(Version)}
with open(configfile, "w") as outfile:
json.dump(configdata, outfile)
outfile.close()
outputStr = subprocess.Popen([sys.executable, scriptPath,'-ip','0.0.0.0','-p',str(portNo)])
model.publishStatus = 'Published'
model.publishPID = outputStr.pid
model.portNo = portNo
model.save()
Status = 'SUCCESS'
hosturl =request.get_host()
hosturl = hosturl.split(':')
url = 'http://'+hosturl[0]+':'+str(portNo)+'/AION/'+str(usecasename)+'/predict'
Msg = 'Model Published Successfully URL: '+url
else:
Status = 'Error'
Msg = 'Model Published Error'
else:
Status = 'Error'
Msg = 'All ports are utilized'
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
print(e)
pass
if command == 'generatekubeflowyaml':
try:
if problemType.lower() == 'timeseriesforecasting': #task 11997
from appbe.aionpipelinets import aionpipelinets
else:
from appbe.aionpipeline import aionpipeline
deployPath = str(p.DeployPath)
codeconfig = os.path.join(deployPath,'etc','code_config.json')
featuresmapping = {'modelBased':'mlbased','statisticalBased':'statisticalBased'}
if os.path.isfile(codeconfig):
with open(codeconfig,'r') as f:
codeconfig = json.load(f)
f.close()
modelsarray=[]
for featureselection in codeconfig['feature_selector']:
for algo in codeconfig['algorithms'].keys():
if problemType.lower() == 'timeseriesforecasting': #task 11997
modelname = 'modeltraining_'+algo.lower()
else:
modelname = 'modeltraining_'+algo.lower()+'_'+featuresmapping[featureselection]
modelx = {'modelname':modelname}
modelsarray.append(modelx)
modelsjson = {'models':modelsarray}
kubeflowhost= request.POST.get('kubeflowhost')
containerregistry= request.POST.get('containerregistry')
containerlabel= request.POST.get('containerlabel')
containersecret= request.POST.get('containersecret')
if problemType.lower() == 'timeseriesforecasting': #task 11997
ap = aionpipelinets(modelsjson,containerregistry,containerlabel,containersecret)
else:
ap = aionpipeline(modelsjson,containerregistry,containerlabel,containersecret)
ap.aion_mlops()
ap.compilepl()
ap.executepl(kubeflowhost)
Status = 'SUCCESS'
MLaaC_output = ''
Msg = 'MLOps pipeline executed successfully'
except Exception as e:
print(e)
Status = 'Failure'
Msg = 'Error in pipeline execution'
from appbe.pages import get_usecase_page
if command in ['publishmodel','unpublishmodel']:
status,context,action = get_usecase_page(request,usecasedetails,Existusecases,usecaseid)
context['Status'] = Status
context['MLaaC_output'] = MLaaC_output
context['Msg'] = Msg
return(context,'usecasedetails.html')
else:
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['Status'] = Status
context['MLaaC_output'] = MLaaC_output
context['Msg'] = Msg
return(context,'usecases.html')
def getusercasestatus(request):
if 'UseCaseName' in request.session:
selected_use_case = request.session['UseCaseName']
else:
selected_use_case |
= 'Not Defined'
if 'ModelVersion' in request.session:
ModelVersion = request.session['ModelVersion']
else:
ModelVersion = 0
if 'ModelStatus' in request.session:
ModelStatus = request.session['ModelStatus']
else:
|
itask-qa-qg", model="valhalla/t5-base-qa-qg-hl")
for _text in docs:
res = nlp(_text)
print(res)
extracted_QnAList.extend(res)
for _record in extracted_QnAList:
extracted_QnA.append({'question': _record['question'], 'answer': _record['answer'].replace('<pad>', '')})
quesCount = len(extracted_QnA)
context = {'extracted_QnA':extracted_QnA, 'quesCount':quesCount}
filetimestamp = str(int(time.time()))
output_filepath = os.path.join(DATA_FILE_PATH,'AION_QnA' + filetimestamp+'.txt')
# Save the extracted questions as a JSON file
with open(output_filepath, 'w') as output_file:
json.dump(extracted_QnA, output_file, indent=4)
print(f"T5 based QnAs have been saved to {output_filepath}.")
request.session['QnAfilepath'] = output_filepath
return context
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
errormsg = str(e)
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
context = {'error': 'Failed to generate QnA List using T5','LLM' : 'T5', 'selected':'DataOperations', 'errormessage':errormsg}
log.info('generateQA_Offline -- Error : Failed to generate QnA List using T5.. '+str(e))
log.info('Details : '+ str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return context
def split_text_for_Offline(text, model_name):
lines = text.split('\\n')
current_section = ''
sections = []
_lastsection = 0
for line in lines:
num_tokens = count_tokens_text_offline(''.join([current_section,line]), model_name)
if num_tokens < set_tokens_limit_offline:
current_section = ''.join([current_section,line])
else:
sections.append(current_section)
current_section = line
_lastsection = 1
if _lastsection == 1:
sections.append(current_section)
return sections
def count_tokens_text_offline(text, model_name):
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
inputs = tokenizer(text, return_tensors="pt")
input_ids = inputs["input_ids"]
_token_count = len(input_ids[0])
return _token_count
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# def exploratorory_help():
#
#
#
# return (data_overview_tip, feature_importance_tip, correlation_analysis_tip, exploratory_analysis_tip, data_deep_drive_tip, drift_tip)
drift_tip = 'A data distribution represents a list of all of the possible values of each of the variables as provided in the data. Based on how the data values are distributed, it can be mapped to some well-known distribution curves so that the nature of the distribution can be shown.'
data_overview_tip = 'Data Overview give users a quick understanding of the distribution of values across the features and provides summary statistics of the features. It helps to uncover several uncommon and common issues such as unexpected feature values, missing feature values and data skew.'
timeseries_analysis_tip = "Time Series Analysis provides information about the stationarity and seasonality of each of the features in the ingested data."
feature_importance_tip = 'Feature Importance provides a features and grades the features on a scale of relative importance'
correlation_analysis_tip = 'Correlation Analysis provides the strength of relationships among various features. Values range from 0 (least correlation) to 1 (highest correlation). A high correlation means that two or more variables have a strong relationship with each other, while a weak correlation means that the variables are hardly related.'
exploratory_analysis_tip = 'This provides an unsupervised clustering view of the data and provides insights on how the data is distributed. It helps profile the attributes of different clusters and gives insight into underlying patterns of different clusters and find similarities in the data points.'
data_deep_drive_tip = 'Data Deep Dive provides an interactive interface for exploring the relationship between data points across all the different features of a dataset. Each individual item in the visualization represents a data point. Data can be grouped and binned in multiple dimensions based on their feature values.'
pair_graph_tip = 'It is used to present the correlations between two selected features.'
fair_metrics_tip = 'It provides interface to detect the bias in data associated with a sensitive or protected attribute and used for training.'
hopkins_tip =['Since the value is in between (0.0, 0.3), it indicates that the data has a high tendency to cluster.','Since the value is around 0.5, it indicates that the data distriution is random.','Since the value is in between (0.7, 0.99), it indicates that the data is regularly spaced.']
basic_help={'RowFiltering':'You can easily filter rows based on whether the column match a condition or not'}
advance_help = {'NumericFillMethod':'This is used to handle the null values present in the numerical dataset.','NumericFillMethod_Median':'Replace with middle value of the data set. Efficient and not affected by outliers.','NumericFillMethod_Mean':'Replace with average value of the columns. Affected by outliers.','NumericFillMethod_Max':'Replace all nulls with maximum value in the column.','NumericFillMethod_KNN':'This implements KNN algorithm to replace the null','NumericFillMethod_Zero':'Replace the null with 0 value','NumericFillMethod_Drop':'To remove all the null values in the dataset','NumericFillMethod_Min':'Replace all null with minimum value present in the column','CategoricalFillMethod':'This is used to handle the null values present in the categorical dataset.','CategoricalFillMethod_Mode':'Replace with most common values in the dataset. Suggested for categorical columns.','CategoricalFillMethod_Zero':'Replace the null with 0 value.','CategoricalFillMethod_KNN':'This implements KNN algorithm to replace the null','CategoricalFillMethod_Drop':'To remove all the null values in the dataset.','OutlierDetection':'An unusual data point that differs significantly from other data points.','OutlierDetection_IQR':'Identifying the outliers with interquatile range by dividing the data into quartiles.','OutlierDetection_Zscore':'If the z score of a data point is more than 3, it indicates that the data point is an outlier.','OutlierDetection_Isolation':'Randomly sub-sampled data is processed in a tree structure based on randomly selected features.','MissValueRatio':'Permitted Missing Value Ratio i.e., Number of missing values by total number of obervation. If the number of missing value in a columns is more than ratio than the columns will be assumped as empty column','NumericFeatureRatio':'In case column is mix of number and text value. If the number of numeric columns to number of rows ratio is greator than the value mentioned it is assumed as numeric columns and remaining rows which have text values will be removed','NormalStandard':'Standardize features by removing the mean and scaling to unit variance.','NormalMinMax':'This scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.','NormalLogNormal':'When a feature does not follow a linear distributio, that helps minimize skewness and map any distribution to a normal one as close as possible.','RemoveNoise':'Used to remove the noise present in the text data. Noise like special characters, unicode, emojis, hyperlinks,hashtags, html parameters etc.','ExpandContractions':'Contractions are words or combinations of words that are shortened by dropping letters and replacing them by an apostrophe.','Normalize':'Normalization is the process of converting a token into its base form. In the normalization process, the inflectional form of a word is removed so that the base form can be obtained.','Lemmatization':'It is a more effective option than stemming because it converts the word into its root word, rather than just stripping the suffices.','Stemming':'It refers to the removal of suffices, like ing,ly,s etc. by a simple rule-based approach.','NGrams':'The combination of multiple words used together.','PosTags':'The process of classifying words into their parts of speech and labeling them accordingly is known as part-of-speech tagging, or simply POS-tagging.','FeatureSelection':'Feature selection is for filtering irrelevant or redundant features from your dataset. The key difference between feature selection and extraction is that feature selection keeps a subset of the original features while feature extraction creates brand new ones.','FeatureEngineering':'Feature extraction is for creating a new, smaller set of features that stills captures most of the useful information. Again, feature selection keeps a subset of the original features while feature extraction creates new ones.','PCA':'Principle Component Analysis (PCA) is a common feature extraction method in data science. Technically, PCA finds the eigenvectors of a covariance matrix with the highest eigenvalues and then uses those to project the data into a new subspace of equal or less dimensions.','StatisticalBased':'Features are selected on the basis of statistics measures. This method does not depend on the learning algorithm and chooses the features as a pre-processing step. The filter method filters out the irrelevant feature and redundant columns from the model by using different metrics through ranking.','ModelBased':'Different tree-based methods of feature selection help us with feature importance to provide a way of selecting features. Here, feature importance specifies which feature has more importance in model building or has a great impact on the target variable.','CorrelationThreshold':'Correlation Threshold for Statistican Based Feature Selection. Correlation relation analysis done on input features vs target feature and features having correlation value grather then threshold picks for training','PValue':'P Value again for Statistical Based Feature Selection','Variance':'For Feature Selection, features should have higher variance from threshold.','Normalization':'The goal of normalization is to change the values of numeric columns in the dataset to use a common scale , without distoring differences in the ranges of values or losing information.','SVD':'The singular value decomposition (SVD) provides another way to factorize a matrix, into singular vectors and singular values. The SVD allows us to discover some of the same kind of information as the eigendecomposition.','ReplaceAcro':'Replace any abrivations into its full form Eg:{"DM":"DirectMessage"}',
'Factoranalysis':' This algorithm creates factors from the observed variables to represent the common variance i.e. variance due to correlation among the observed variables.','ICA':'ICA stands for Independent Components Analysis and it is a linear dimension reduction method, which transforms the dataset into columns of independent components.','optimizationmethod':'Optimization is the process where we train the model iteratively that results in a maximum and minimum function evaluation.','Random':'Random search is a method in which random combinations of hyperparameters are selected and used to train a model. The best random hyperparameter combinations are used. Random search bears some similarity to grid search.','Grid':'Grid search is essentially an optimization algorithm which lets to select the best parameters for your optimization problemfrom a list of parameter options that provided, hence automating the trial-and-error method.','Bays':'Bayesian optimisation in turn takes into account past evaluations when choosing the hyperparameter set to evaluate next. This approach typically requires less iterations to get to the optimal set of hyperparameter values.','Stopwords':'Stop words are commonly eliminated which are commonly used that they carry very little useful information. They are passed in a list ["Stopword1","Stopword2"]','Tokenization':'It is essentially splitting a phrase, sentence, paragraph, or an entire text document into smaller units, such as individual words or terms. Choose the library for tokenization','Lemma':'In lemmatization, the transformation uses a dictionary to map different variants of a word back to its root format.','Stopwords1':'Stop words are commonly eliminated which are commonly used that they carry very little useful information.Select from the below library to remove them',
'Genetic':'The genetic algorithm repeatedly modifies a population of individual solutions. At each step, the genetic algorithm selects individuals at random from the current population to be parents and uses them to produce the children for the next generation. Over successive generations, the population evolves toward an optimal solution.','CV':'Cross-validation is a resampling procedure used to evaluate machine learning models on a limited data sample. The procedure has a single parameter called k that refers to the number of groups that a given data sample is to be split into.','Ensemble':'Ensemble learning is a general meta approach to machine learning that seeks better predictive performance by combining the predictions from multiple models.','EnsembleStatus':'Enable or disable according to the preference','TargetEncoding':'Target encoding is the process of replacing a categorical value with the mean of the target variable','OneHotEndoding':'Encode categorical features as a one-hot numeric array.','LabelEncoding':'Encode target labels with value between 0 and n_classes-1.','SMCStrategy':'A most_frequent model - The default. In regression the prediction is equal to the mean value, in classification the prediction is equal to the most common value.\\n A uniform model - In regression, selects a random value from the y range. In classification, selects one of the labels by random.\\n A stratified model - Draws the prediction from the distribution of the labels in the train.\\n A tree model - Trains a simple decision tree with a given depth. The depth can be customized using the max_depth parameter.','SMCGain':'The gain is calculated as:\\ngain = (model score - simple score)/(perfect score - simple score)','SMCTreeDepth':'the max depth of the tree (used only if simple model type is tree).','MIcondition':'Measure model average inference time (in seconds) per sample'}<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sqlite3
from pathlib import Path
import json
import os
import rsa
import boto3 #usnish
import pandas as pd
import time
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
def table_exists(self, name):
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
return len(listOfTables) > 0
def read_data(self, table_name):
query = f"SELECT * FROM {table_name}"
row = self.cursor.execute(query).fetchall()
return list(row)
#return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT," |
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def delete_record(self,table_name,col_name, col_value):
try:
query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'"
self.conn.execute(query)
self.conn.commit()
return 'success'
except Exception as e :
print(str(e))
print("Deletion Failed")
return 'error'
def get_data(self,table_name,col_name,col_value):
query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"
row = self.cursor.execute(query).fetchone()
if(row == None):
return []
return list(row)
def write_data(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def close(self):
self.conn.close()
def add_new_s3bucket(request):
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if request.POST["aionreferencename"] =='' or request.POST["s3bucketname"] == '' or request.POST["awsaccesskey"] == '' :
return 'error'
pkeydata='''-----BEGIN RSA PUBLIC KEY-----
MIIBCgKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1AfnrMv
fVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw0m4e
wQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2PM4Re
n0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHyKxlq
i/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhxWrs/
lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQAB
-----END RSA PUBLIC KEY-----'''
pubkey = rsa.PublicKey.load_pkcs1(pkeydata)
awssecretaccesskey = rsa.encrypt(request.POST["awssecretaccesskey"].encode(), pubkey)
newdata = {}
newdata['Name'] = [request.POST["aionreferencename"]]
newdata['AWSAccessKeyID'] = [request.POST["awsaccesskey"]]
newdata['AWSSecretAccessKey'] = [str(awssecretaccesskey)]
newdata['S3BucketName'] = [request.POST["s3bucketname"]]
name = request.POST["aionreferencename"]
if sqlite_obj.table_exists("s3bucket"):
if(len(sqlite_obj.get_data("s3bucket","Name",name)) > 0):
return 'error1'
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'s3bucket')
except Exception as e:
print(e)
return 'error'
def get_s3_bucket():
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
temp_data = sqlite_obj.read_data('s3bucket')
data = []
for x in temp_data:
data_dict = {}
data_dict['Name'] = x[0]
data_dict['AWSAccessKeyID'] = x[1]
data_dict['AWSSecretAccessKey'] = x[2]
data_dict['S3BucketName'] = x[3]
data.append(data_dict)
except Exception as e:
print(e)
data = []
return data
def remove_s3_bucket(name):
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
return sqlite_obj.delete_record('s3bucket','Name',name)
def read_s3_bucket(name,filename,DATA_FILE_PATH):
privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIIEqQIBAAKCAQEAxIHM1FphEMMwViUrG0b2Bqf8tOxbhUWlnmjgFt5A25qbY1Af
nrMvfVx8+7iCcZ/3TY9Jv2I584SOc1tvsgESCke/t6+o/u2esPBsnDFzV62l3Zvw
0m4ewQeKlFC8EoOblyIXRbZdelSJinzlr9lOiKuid/xPvXHou6jxF1A2W7a89A2P
M4Ren0W9YkjB7dRGW1sSrpruHdVJvgHhGZFZ7sCTue0jVOnc5sT3Tq5saLfEDqHy
Kxlqi/mcThmcTfisRIYFH5pyt/Ysr4VVP924QlcoqPOyg3RMCS3G0VjstSoVwNhx
Wrs/lujDuCnpxvWzNpq21OWmF66GXxwiq+6W0wIDAQABAoIBAC/VbNfQPEqJSO3f
VFPqfR73q2MbGdgiMQOTgeDvLxiF1QdizJ+j/I5mgiIAMviXuOpPU+NbdMHbZZWd
D15kNlD8UCXVg6yyiOuHStjmjK4uHe8I86E1nxTb0hbyZCWZlbk/WizlDHInu+dT
KdIZcq2AIidU6tAxtwA0ingHaRSoXDlSGwOTEigNqmWOKnDTVg0SMscoHOD7siXF
DHm1/lkvD3uvcZk6c7fGxC8SgNX2dj6n/Nbuy0Em+bJ0Ya5wq4HFdLJn3EHZYORF
ODUDYoGaSxeXqYsGg/KHJBc8J7xW9FdN9fGbHfw1YplrmiGL3daATtArjMmAh0EQ
H8Sj7+ECgYkA3oWMCHi+4t8txRPkg1Fwt8dcqYhGtqpAus3NESVurAdi0ZPqEJcQ
4cUbflwQPhX0TOaBlkgzdP8DMdcW/4RalxHsAh5N8ezx/97PQMb3Bht0WsQUBeYJ
xLV7T2astjTRWactGCG7dwTaUYRtU3FqL6//3CysmA12B5EMX0udNBOTKwmaYKww
AwJ5AOISS7f12Q0fgTEVY0H8Zu5hHXNOA7DN92BUzf99iPx+H+codLet4Ut4Eh0C
cFmjA3TC78oirp5mOOQmYxwaFaxlZ7Rs60dlPFrhz0rsHYPK1yUOWRr3RcXWSR13
r+kn+f+8k7nItfGi7shdcQW+adm/EqPfwTHM8QKBiQCIPEMrvKFBzVn8Wt2A+I+G
NOyqbuC8XSgcNnvij4RelncN0P1xAsw3LbJTfpIDMPXNTyLvm2zFqIuQLBvMfH/q
FfLkqSEXiPXwrb0975K1joGCQKHxqpE4edPxHO+I7nVt6khVifF4QORZHDbC66ET
aTHA3ykcPsGQiGGGxoiMpZ9orgxyO3l5Anh92jmU26RNjfBZ5tIu9dhHdID0o8Wi
M8c3NX7IcJZGGeCgywDPEFmPrfRHeggZnopaAfuDx/L182pQeJ5MEqlmI72rz8bb
JByJa5P+3ZtAtzc2RdqNDIMnM7fYU7z2S279U3nZv0aqkk3j9UDqNaqvsZMq73GZ
y8ECgYgoeJDi+YyVtqgzXyDTLv6MNWKna9LQZlbkRLcpg6ELRnb5F/dL/eB/D0Sx
QpUFi8ZqBWL+A/TvgrCrTSIrfk71CKv6h1CGAS02dXorYro86KBLbJ0yp1T/WJUj
rHrGHczglvoB+5stY/EpquNpyca03GcutgIi9P2IsTIuFdnUgjc7t96WEQwL
-----END RSA PRIVATE KEY-----'''
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
data = sqlite_obj.get_data("s3bucket",'Name',name)
except:
data = []
awssecretaccesskey = ''
found = False
if len(data)!=0:
aws_access_key_id = data[1]
awssecretaccesskey = data[2]
bucketName = data[3]
found = True
if found:
privkey = rsa.PrivateKey.load_pkcs1(privkey,'PEM')
awssecretaccesskey = eval(awssecretaccesskey)
awssecretaccesskey = rsa.decrypt(awssecretaccesskey, privkey)
awssecretaccesskey = awssecretaccesskey.decode('utf-8')
#awssecretaccesskey = 'SGcyJavYEQPwTbOg1ikqThT+Op/ZNsk7UkRCpt9g'#rsa.decrypt(awssecretaccesskey, privkey)
client_s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=str(awssecretaccesskey))
#print(bucketName,filename)
try:
response = client_s3.get_object(Bucket=bucketName, Key=filename)
df = pd.read_csv(response['Body'])
except Exception as e:
print(str(e))#usnish
return 'Error',str(e), pd.DataFrame()
#return 'Error', pd.DataFrame()
return 'Success','',df
return 'Error',"Please check bucket configuration", pd.DataFrame()<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import math
import sys,os
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
import scipy.stats as st
from sklearn.preprocessing import StandardScaler
from dython.nominal import associations
class ux_eda ():
def __init__(self, dataPath=pd.DataFrame(),delimiter=',',textqualifier='"',optimize=None,):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.dataFrame = pd.DataFrame()
if isinstance(dataPath, pd.DataFrame):
self.dataFrame = dataPath
if optimize == 1:
self.dataFrame = self.dataFrame.sample(n=1000, random_state=1)
else:
if optimize == 1:
self.dataFrame = pd.read_csv(dataPath,nrows=1000,encoding='utf-8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
else:
self.dataFrame = pd.read_csv(dataPath, encoding='utf-8',sep=delimiter,quotechar=textqualifier,skipinitialspace = True,na_values=['-','?'],encoding_errors= 'replace')
self.dataFrame.rename(columns=lambda x: x.strip(), inplace=True)
self.features = self.dataFrame.columns.tolist()
self.indexFeature = []
self.dateFeature = []
self.categoricalFeature = []
self.constantFeature = []
self.textFeature = []
self.numericFeature = []
self.numericAndCatFeature = []
for feature, featureType in zip(self.features, self.dataFrame.dtypes):
if self.__check_seq_feature(self.dataFrame[feature]):
self.indexFeature.append(feature)
elif self.__match_date_format(self.dataFrame[feature]):
self.dateFeature.append(feature)
elif self.__check_constant_features(self.dataFrame[feature]):
self.constantFeature.append(feature)
elif self.__check_category_features(self.dataFrame[feature]):
self.categoricalFeature.append(feature)
elif feature |
Type == 'object':
'''
numOfRows = self.dataFrame.shape[0]
distinctCount = len(self.dataFrame[feature].unique())
tempDff = self.dataFrame[feature]
self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x))
tempDf = self.dataFrame[feature]
tempDf = tempDf.dropna()
numberOfNonNullVals = tempDf.count()
numericRatio = 0.8
if(numberOfNonNullVals > int(numOfRows * numericRatio)):
self.numericFeature.append(feature)
else:
self.dataFrame[feature] = tempDff
'''
self.textFeature.append(feature)
elif featureType in aionNumericDtypes:
self.numericFeature.append(feature)
# self.dataFrame[self.categoricalFeature] = self.dataFrame[self.categoricalFeature].apply(lambda x: x.cat.codes)
self.numericAndCatFeature = self.numericFeature + self.categoricalFeature
# EDA Performance change
# ----------------------------
def subsampleData(self, subsampleData):
self.dataFrame = self.dataFrame.sample(n=subsampleData, random_state=1)
def get_features_datatype(self,v,num_list,cat_list,text_list):
""" To get exact datatype of the feature in Data Overview."""
if v in cat_list:
return 'Categorical'
elif v in num_list:
return 'Numerical'
elif v in text_list:
return 'Text'
def getCorrelationMatrix(self):
try:
if len(self.dataFrame.columns) > 25:
df3 = df[self.dataFrame.columns[0:24]]
else:
df3 = self.dataFrame.copy()
cor_mat= associations(self.dataFrame,compute_only=True)
cor_mat=cor_mat['corr']
cor_mat = cor_mat.astype(float).round(2)
cor_mat.replace(np.nan, 0, inplace=True)
cor_mat.fillna('None',inplace=True)
return cor_mat
except Exception as e:
print(e)
correlationgraph = pd.DataFrame()
return (correlationgraph)
def dataDistribution(self):
df_eda_actual = self.dataFrame.copy()
des1 = df_eda_actual.describe(include='all').T
des1['missing count %'] = df_eda_actual.isnull().mean() * 100
des1['zero count %'] = df_eda_actual.isin([0]).mean() * 100
dataColumns = list(self.dataFrame.columns.values)
des1.insert(0, 'Features', dataColumns)
actual_df_numerical_features = df_eda_actual.select_dtypes(exclude='object')
actual_df_categorical_features = df_eda_actual.select_dtypes(include='object')
#For text features
textFeature_df = df_eda_actual.filter(self.textFeature)
actual_df_categorical_features = actual_df_categorical_features.drop(self.textFeature, axis=1)
for i in des1['Features']:
num_cols = actual_df_numerical_features.columns.to_list()
cat_cols = actual_df_categorical_features.columns.to_list()
text_cols = self.textFeature
des1['Features Type'] = des1['Features'].apply(lambda x: self.get_features_datatype(x, num_cols,cat_cols,text_cols))
curr_columns = des1.columns.to_list()
curr_columns.remove('Features Type')
insert_i = curr_columns.index('Features')+1
curr_columns.insert(insert_i,'Features Type')
des1 = des1[curr_columns]
return des1
# ----------------------------
def subsetFeatures(self, edaFeatures):
print(self.dataFrame.columns)
self.dataFrame = self.dataFrame[edaFeatures]
self.features = edaFeatures
self.indexFeature = []
self.dateFeature = []
self.categoricalFeature = []
self.constantFeature = []
self.textFeature = []
self.numericFeature = []
self.numericAndCatFeature = []
print('abc')
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
for feature, featureType in zip(self.features, self.dataFrame.dtypes):
if self.__check_seq_feature(self.dataFrame[feature]):
self.indexFeature.append(feature)
elif self.__match_date_format(self.dataFrame[feature]):
self.dateFeature.append(feature)
elif self.__check_constant_features(self.dataFrame[feature]):
self.constantFeature.append(feature)
elif self.__check_category_features(self.dataFrame[feature]):
self.categoricalFeature.append(feature)
elif featureType == 'object':
'''
numOfRows = self.dataFrame.shape[0]
distinctCount = len(self.dataFrame[feature].unique())
tempDff = self.dataFrame[feature]
self.dataFrame[feature]=self.dataFrame[feature].apply(lambda x: self.testNum(x))
tempDf = self.dataFrame[feature]
tempDf = tempDf.dropna()
numberOfNonNullVals = tempDf.count()
numericRatio = 0.8
if(numberOfNonNullVals > int(numOfRows * numericRatio)):
self.numericFeature.append(feature)
else:
self.dataFrame[feature] = tempDff
'''
self.textFeature.append(feature)
elif featureType in aionNumericDtypes:
self.numericFeature.append(feature)
print('def')
self.numericAndCatFeature = self.numericFeature + self.categoricalFeature
# ----------------------------
def testNum(self,value):
try:
x=eval(value)
return x
except:
return np.nan
def getFeatures(self):
leastRatioFeature = self.__LeastfeatureRatio()
return (self.features, self.dateFeature, self.indexFeature, self.constantFeature, self.textFeature, leastRatioFeature,self.numericAndCatFeature,self.numericFeature,self.categoricalFeature)
def getNumericFeatureCount(self):
return(len(self.numericAndCatFeature))
def calculateNumberofCluster(self):
df = self.dataFrame[self.numericFeature]
return self.__NumberofCluster(df)
def getTopTextFeatures(self,topn):
df_text = pd.DataFrame()
if (len(self.textFeature) > 1):
df_text['combined'] = self.dataFrame[self.textFeature].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
features = ['combined']
else:
df_text[['combined']] = self.dataFrame[self.textFeature]
features = ['combined']
df_text[features[0]] = df_text[features[0]].fillna("NA")
textCorpus = df_text[features[0]]
from text import eda
texteda_obj = eda.ExploreTextData()
df = texteda_obj.MostCommonWords(textCorpus,topn)
return df
def __NumberofCluster(self, featureData):
Sum_of_squared_distances = []
K = range(1, 15)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(featureData)
Sum_of_squared_distances.append(km.inertia_)
x1, y1 = 1, Sum_of_squared_distances[0]
x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances) - 1]
distances = []
for inertia in range(len(Sum_of_squared_distances)):
x0 = inertia + 2
y0 = Sum_of_squared_distances[inertia]
numerator = abs((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1)
denominator = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
distances.append(numerator / denominator)
n_clusters = distances.index(max(distances)) + 2
return (n_clusters)
#13841 : TrustedAI: hopkins stat
def getHopkinsVal(self,df):
try:
from appbe.hopkinsStat import hopkins
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
numeric_transformer = Pipeline(
steps=[("imputer", SimpleImputer(missing_values=np.nan,strategy="mean")),
("standard_scaler", StandardScaler())]
)
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(missing_values=np.nan,strategy="most_frequent")),
("encoder", OneHotEncoder(handle_unknown="ignore"))
]
)
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, self.numericFeature),
("cat", categorical_transformer, self.categoricalFeature)
]
)
pipe = Pipeline([('scaler',preprocessor)])
scaled_df = pipe.fit_transform(df)
if type(scaled_df) != np.ndarray:
scaled_df = scaled_df.toarray()
score = round(hopkins(scaled_df,scaled_df.shape[0]),2)
return str(score)
except Exception as e:
print(e)
return ''
def getClusterDetails(self):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
df_clus = pd.get_dummies(self.dataFrame[self.numericAndCatFeature], prefix_sep='####')
for i in df_clus.columns:
dataType = df_clus[i].dtypes
if dataType not in aionNumericDtypes:
df_clus[i] = df_clus[i].fillna(df_clus[i].mode()[0])
else:
df_clus[i] = df_clus[i].fillna(df_clus[i].mean())
n = self.__NumberofCluster(df_clus)
n = n - 1
kmeans = KMeans(n_clusters=n, init='k-means++', max_iter=10, n_init=10, random_state=0)
# Fit and predict
y_means = kmeans.fit_predict(df_clus)
centroids = kmeans.cluster_centers_.squeeze()
labels = kmeans.labels_
features = df_clus.columns
cluster_details = []
for j in range(len(features)):
cluster = {}
feature = features[j]
perflag = 0
if '####' in feature:
x = features[j].split('####')
feature = x[0] + ' ' + x[1] + '(%)'
perflag = 1
else:
feature = feature + '(AVG)'
cluster['label'] = feature
total_sum = 0
if perflag == 1:
for i in range(n):
centroid = centroids[i]
value = round(centroid[j], 2)
total_sum = total_sum + value
for i in range(n):
centroid = centroids[i]
value = round(centroid[j], 2)
if perflag == 1:
value = (value / total_sum) * 100
value = round(value, 2)
cluster['Cluster ' + str(i + 1)] = value
cluster_details.append(cluster)
hopkins_val = self.getHopkinsVal(self.dataFrame,)
return cluster_details,hopkins_val
def getHighlyCorrelatedFeatures(self,noOfTop):
df_corr = abs(self.dataFrame[self.numericAndCatFeature].corr()).stack().reset_index()
df_corr.columns = ['FEATURE_1', 'FEATURE_2', 'CORRELATION']
mask_dups = (df_corr[['FEATURE_1', 'FEATURE_2']].apply(frozenset, axis=1).duplicated()) | (
df_corr['FEATURE_1'] == df_corr['FEATURE_2'])
df_corr = df_corr[~mask_dups]
df_corr = df_corr.sort_values(by='CORRELATION', ascending=False)
df_top = df_corr.head(n=noOfTop)
return(df_top)
# ---------------------- 12686:Data Distribution related Changes S T A R T ----------------------
def word_token_for_feature(self, selectedFeature, dataframe):
comment_words = ""
try:
df_text = pd.DataFrame()
df_text[[selectedFeature]] = dataframe
features = [selectedFeature]
df_text[features[0]] = df_text[features[0]].fillna("NA")
textCorpus = df_text[features[0]]
from text import TextProcessing
tp = TextProcessing.TextProcessing()
preprocessed_text = tp.transform(textCorpus)
df_text[selectedFeature] = preprocessed_text
df_text_list = df_text.values.tolist()
for val in df_text_list:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens) + " "
except:
comment_words = ""
return comment_words
# -------------------------------------------- E N D --------------------------------------------
def word_token(self):
df_text = pd.DataFrame()
if (len(self.textFeature) > 1):
df_text['combined'] = self.dataFrame[self.textFeature].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
features = ['combined']
else:
df_text[['combined']] = self.dataFrame[self.textFeature]
features = ['combined']
df_text[features[0]] = df_text[features[0]].fillna("NA")
textCorpus = df_text[features[0]]
from text import TextProcessing
tp = TextProcessing.TextProcessing()
preprocessed_text = tp.transform(textCorpus)
df_text['combined'] = preprocessed |
_text
df_text_list = df_text.values.tolist()
comment_words = ""
for val in df_text_list:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens) + " "
if comment_words == "":
comment_words = 'Not found any token'
return comment_words
def getdata(self):
return self.dataFrame
def getPCATop10Features(self):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
df = self.dataFrame[self.numericAndCatFeature]
for feature in self.numericAndCatFeature:
if feature in self.categoricalFeature:
df[feature] = pd.Categorical(df[feature])
df[feature] = df[feature].cat.codes
df[feature] = df[feature].fillna(df[feature].mode()[0])
else:
df[feature] = df[feature].fillna(df[feature].mean())
pca = PCA(n_components=2).fit(StandardScaler().fit_transform(df))
mapping = pd.DataFrame(pca.components_, columns=self.numericAndCatFeature)
mapping = mapping.diff(axis=0).abs()
mapping = mapping.iloc[1]
mapping = mapping.sort_values(ascending=False).head(10)
return mapping
def getTopRows(self, rows=5):
return self.dataFrame.head(rows)
def __check_seq_feature(self, data):
if data.dtypes in ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']:
total_record = data.count()
count = (data - data.shift() == 1).sum()
if ((total_record - count) == 1):
return True
return False
def __match_date_format(self, data):
try:
## Using regex lib, we are check if any col contains datetime format like yyyy-mm-dd or yyyy/mm/dd format. if it finds return true.
import re
u_data = data.to_string()
date_find = (re.findall(r"[0-9]{1,4}[\\_|\\-|\\/|\\|][0-9]{1,2}[\\_|\\-|\\/|\\|][0-9]{1,4}", u_data) or re.findall(r'\\d{,2}\\-[A-Za-z]{,9}\\-\\d{,4}', u_data) or re.findall(r"[0-9]{1,4}[\\_|\\-|\\/|\\|][0-9]{1,2}[\\_|\\-|\\/|\\|][0-9]{1,4}.\\d" , u_data) or re.findall(r"[0-9]{1,4}[\\_|\\-|\\/|\\|][A-Za-z]{,9}[\\_|\\-|\\/|\\|][0-9]{1,4}", u_data))
if (date_find):
try:
data = pd.to_datetime(data, utc=True)
return True
except Exception as e:
##If not a datetime col, just pass to return false statement.
pass
except Exception as e:
data = data.astype(str)
beforecheckcount = data.count()
#####YYYY-MM-DD HH:MM:SS####
check1 = data[data.str.match(
r'(^\\d\\d\\d\\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check1.count()
if (beforecheckcount == aftercheckcount):
return True
#####MM/DD/YYYY HH:MM####
check2 = data[data.str.match(
r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount == aftercheckcount):
return True
#####DD-MM-YYYY HH:MM####
check2 = data[data.str.match(
r'(^(0?[1-9]|[12][0-9]|3[01])-(0?[1-9]|1[0-2])-(\\d\\d\\d\\d) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount == aftercheckcount):
return True
#####YYYY/MM/DD####
check2 = data[data.str.match(r'(^\\d\\d\\d\\d/(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount == aftercheckcount):
return True
#####MM/DD/YYYY####
check2 = data[data.str.match(r'(^(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])/(\\d\\d\\d\\d)$)') == True]
aftercheckcount = check2.count()
if (beforecheckcount == aftercheckcount):
return True
#####YYYY-MM-DD HH:MM:SS.fff####
check11 = data[data.str.match(
r'(^\\d\\d\\d\\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01]) (00|0?[0-9]|1[0-9]|2[0-4]):([0-9]|[0-5][0-9]):([0-9]|[0-5][0-9])\\.(\\d{3})$)') == True]
aftercheckcount = check11.count()
if (beforecheckcount == aftercheckcount):
return True
return False
def __check_category_features(self, modelFeatures):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
dataType = modelFeatures.dtypes
numOfRows = len(modelFeatures)
if dataType not in aionNumericDtypes:
if dataType != 'bool':
nUnique = len(modelFeatures.unique().tolist())
if nUnique <= 30:
return True
return False
def __check_constant_features(self, modelFeatures):
return len(modelFeatures.unique().tolist()) == 1
def __featureRatio(self, modelFeatures):
if len(modelFeatures):
return len(modelFeatures.unique().tolist()) / len(modelFeatures)
return 0
def __LeastfeatureRatio(self):
ratio = 1
feat = ""
for feature in (self.numericAndCatFeature + self.textFeature):
r = self.__featureRatio(self.dataFrame[feature])
if r < ratio:
ratio = r
feat = feature
return feat
def getDistribution(self):
aionNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
df = self.dataFrame[self.numericAndCatFeature]
dist={}
for feature in self.numericAndCatFeature:
if feature in self.categoricalFeature:
df[feature] = pd.Categorical(df[feature])
df[feature] = df[feature].cat.codes
df[feature] = df[feature].fillna(df[feature].mode()[0])
else:
df[feature] = df[feature].fillna(df[feature].mean())
distributionname,sse = self.DistributionFinder(df[feature])
if distributionname == '':
dist[feature] = 'Unknown'
else:
dist[feature] = distributionname
return dist
def DistributionFinder(self,data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= 2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
datamin = data.min()
datamax = data.max()
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
return distributionName, sse
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import sqlite3
from pathlib import Path
import json
import os
import rsa
import boto3 #usnish
import pandas as pd
import time
import sqlite3
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
def table_exists(self, name):
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
return len(listOfTables) > 0
def read_data(self, table_name):
query = f"SELECT * FROM {table_name}"
row = self.cursor.execute(query).fetchall()
return list(row)
#return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query) |
return True
def delete_record(self,table_name,col_name, col_value):
try:
query = f"DELETE FROM {table_name} WHERE {col_name}='{col_value}'"
self.conn.execute(query)
self.conn.commit()
return 'success'
except Exception as e :
print(str(e))
print("Deletion Failed")
return 'error'
def get_data(self,table_name,col_name,col_value):
query = f"SELECT * FROM {table_name} WHERE {col_name}='{col_value}'"
row = self.cursor.execute(query).fetchone()
if(row == None):
return []
return list(row)
def write_data(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def close(self):
self.conn.close()
def add_new_azureStorage(request):
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
if request.POST["azurename"] =='' or request.POST["azureaccountkey"] == '' or request.POST["containername"] == '' :
return 'error'
newdata = {}
newdata['azurename'] = [request.POST["azurename"]]
newdata['azureaccountkey'] = [request.POST["azureaccountkey"]]
newdata['containername'] = [request.POST["containername"]]
name = request.POST["azurename"]
if sqlite_obj.table_exists("azurebucket"):
if(len(sqlite_obj.get_data('azurebucket','azurename',name))>0):
return 'error1'
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'azurebucket')
except:
return 'error'
def get_azureStorage():
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
temp_data = sqlite_obj.read_data('azurebucket')
data = []
for x in temp_data:
data_dict = {}
data_dict['azurename'] = x[0]
data_dict['azureaccountkey'] = x[1]
data_dict['containername'] = x[2]
data.append(data_dict)
except Exception as e:
print(e)
data = []
return data
def read_azureStorage(name,directoryname,DATA_FILE_PATH):
try:
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
data = sqlite_obj.get_data('azurebucket','azurename',name)
except:
data = []
found = False
if len(data)!=0:
storage_account_name = str(data[0])
storage_account_key = str(data[1])
azure_container_name = data[2]
found = True
try:
if found:
root_dir = str(directoryname)
from azure.storage.filedatalake import DataLakeServiceClient
import io
import pandavro as pdx
from detect_delimiter import detect
try:
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format("https", storage_account_name), credential=storage_account_key)
print(azure_container_name)
file_system_client = service_client.get_file_system_client(azure_container_name)
print(root_dir)
file_paths = file_system_client.get_paths(path=root_dir)
main_df = pd.DataFrame()
for path in file_paths:
if not path.is_directory:
file_client = file_system_client.get_file_client(path.name)
file_ext = os.path.basename(path.name).split('.', 1)[1]
if file_ext in ["csv", "tsv"]:
with open(csv_local, "wb") as my_file:
download = file_client.download_file()
download.readinto(my_file)
with open(csv_local, 'r') as file:
data = file.read()
row_delimiter = detect(text=data, default=None, whitelist=[',', ';', ':', '|', '\\t'])
processed_df = pd.read_csv(csv_local, sep=row_delimiter)
if file_ext == "parquet":
download = file_client.download_file()
stream = io.BytesIO()
download.readinto(stream)
processed_df = pd.read_parquet(stream, engine='pyarrow')
if file_ext == "avro":
with open(avro_local, "wb") as my_file:
download = file_client.download_file()
download.readinto(my_file)
processed_df = pdx.read_avro(avro_local)
if not main_df.empty:
main_df = main_df.append(processed_df, ignore_index=True)
else:
main_df = pd.DataFrame(processed_df)
except Exception as e:
msg = str(e).split(".")[0]
print(msg)
return 'Error',str(msg), pd.DataFrame()
return "Success","",main_df
except:
return 'Error',"Please check bucket configuration", pd.DataFrame()
def remove_azure_bucket(name):
from appbe.dataPath import DATA_DIR
file_path = os.path.join(DATA_DIR,'sqlite')
sqlite_obj = sqlite_db(file_path,'config.db')
return sqlite_obj.delete_record('azurebucket','azurename',name)<s> from typing import Union
import numpy as np
import pandas as pd
from sklearn.neighbors import BallTree
def hopkins(data_frame: Union[np.ndarray, pd.DataFrame], sampling_size: int) -> float:
if type(data_frame) == np.ndarray:
data_frame = pd.DataFrame(data_frame)
data_frame_sample = sample_observation_from_dataset(data_frame, sampling_size)
sample_distances_to_nearest_neighbours = get_distance_sample_to_nearest_neighbours(
data_frame, data_frame_sample
)
uniformly_selected_observations_df = simulate_df_with_same_variation(
data_frame, sampling_size
)
df_distances_to_nearest_neighbours = get_nearest_sample(
data_frame, uniformly_selected_observations_df
)
x = sum(sample_distances_to_nearest_neighbours)
y = sum(df_distances_to_nearest_neighbours)
if x + y == 0:
raise Exception("The denominator of the hopkins statistics is null")
return x / (x + y)[0]
def get_nearest_sample(df: pd.DataFrame, uniformly_selected_observations: pd.DataFrame):
tree = BallTree(df, leaf_size=2)
dist, _ = tree.query(uniformly_selected_observations, k=1)
uniformly_df_distances_to_nearest_neighbours = dist
return uniformly_df_distances_to_nearest_neighbours
def simulate_df_with_same_variation(
df: pd.DataFrame, sampling_size: int
) -> pd.DataFrame:
max_data_frame = df.max()
min_data_frame = df.min()
uniformly_selected_values_0 = np.random.uniform(
min_data_frame[0], max_data_frame[0], sampling_size
)
uniformly_selected_values_1 = np.random.uniform(
min_data_frame[1], max_data_frame[1], sampling_size
)
uniformly_selected_observations = np.column_stack(
(uniformly_selected_values_0, uniformly_selected_values_1)
)
if len(max_data_frame) >= 2:
for i in range(2, len(max_data_frame)):
uniformly_selected_values_i = np.random.uniform(
min_data_frame[i], max_data_frame[i], sampling_size
)
to_stack = (uniformly_selected_observations, uniformly_selected_values_i)
uniformly_selected_observations = np.column_stack(to_stack)
uniformly_selected_observations_df = pd.DataFrame(uniformly_selected_observations)
return uniformly_selected_observations_df
def get_distance_sample_to_nearest_neighbours(df: pd.DataFrame, data_frame_sample):
tree = BallTree(df, leaf_size=2)
dist, _ = tree.query(data_frame_sample, k=2)
data_frame_sample_distances_to_nearest_neighbours = dist[:, 1]
return data_frame_sample_distances_to_nearest_neighbours
def sample_observation_from_dataset(df, sampling_size: int):
if sampling_size > df.shape[0]:
raise Exception("The number of sample of sample is bigger than the shape of D")
data_frame_sample = df.sample(n=sampling_size)
return data_frame_sample
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pyodbc as pyodbc
import pandas as pd
import json
import sqlalchemy as db
import pandas as pd
import urllib
def get_connection(request):
dbType = request.session['dbType']
connection_string = ""
if dbType.lower()=="sqlite":
filepath = request.session['filepath']
#table = request.session["tablenamesql"]
connection_string = "sqlite:///"+str(filepath)
elif dbType.lower() in ["postgresql","mysql","mssql"]:
db_name = request.session['dbname']
password = request.session['password']
user = request.session['username']
port = request.session['port']
host = request.session['host']
password=urllib.parse.quote_plus(password)
if dbType.lower()=="postgresql":
connection_string = "postgresql+psycopg2://" + user + ":" + password + "@" + host + ":" + port + "/" + db_name
if dbType.lower()=="mysql":
connection_string = "mysql+pyodbc://" + user + ":" + password + "@" + host + ":" + port + "/" + db_name
if dbType.lower()=="mssql":
driver=request.session['driver']
params = urllib.parse.quote_plus(
'Driver=%s;' % driver +
'Server=tcp:%s,' % host +
'%s;' % port +
'Database=%s;' % db_name +
'Uid=%s;' % user +
'Pwd={%s};' % password +
'Encrypt=yes;' +
'TrustServerCertificate=no;' +
'Connection Timeout=30;')
connection_string = 'mssql+pyodbc:///?odbc_connect=' + params
return connection_string
def list_tables(request):
connection_string = get_connection(request)
engine = db.create_engine(connection_string)
connection = engine.connect()
metadata = db.MetaData()
metadata.reflect(engine)
dt_list = []
try:
dt_list= list(metadata.tables.keys())
print(dt_list)
return dt_list
except:
print("Something went wrong")
return dt_list
def list_tables_fields(request,table_list):
connection_string = get_connection(request)
engine = db.create_engine(connection_string)
connection = engine.connect()
metadata = db.MetaData()
metadata.reflect(engine)
table_field_obj = {}
table_field_obj['data'] = []
try:
# filepath = request.session['filepath']
#table = request.session["tablenamesql"]
table_list = json.loads(table_list)
for table in table_list:
tf_obj = {}
tf_obj['TableName'] = str(table).strip()
tf_obj['Fields']= []
table = db.Table(table, metadata, autoload=True, autoload_with=engine)
col = table.columns.keys()
tempdata = []
for x in col:
my_list = {"column_name": x,"is_select":"false"}
tempdata.append(my_list)
tf_obj['Fields'] = tempdata
table_field_obj['data'].append(tf_obj)
return json.dumps(table_field_obj)
except Exception as e: |
print("Something went wrong "+str(e))
return table_field_obj
def get_data(connection_string,table):
engine = db.create_engine(connection_string)
connection = engine.connect()
metadata = db.MetaData()
metadata.reflect(engine)
table = db.Table(table,metadata, autoload=True, autoload_with=engine)
query = db.select([table])
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
col = table.columns.keys()
return pd.DataFrame(ResultSet, columns=col)
def getDataFromSingleTable(request):
dbType = request.session['dbType']
if dbType.lower() == "sqlite":
table = request.session["tablenamesql"]
else:
table = request.session["tablename"]
connection_string = get_connection(request)
df = get_data(connection_string,table)
return df
def validatequery(request,table_details,join_details,where_details):
resultdata = []
try:
table_details = json.loads(table_details)
join_details = json.loads(join_details)
where_details = json.loads(where_details)
connection_string = get_connection(request)
engine = db.create_engine(connection_string)
connection = engine.connect()
metadata = db.MetaData()
metadata.reflect(engine)
sel_col = []
for item in table_details:
table = item["TableName"]
table = db.Table(table, metadata, autoload=True, autoload_with=engine)
for ele in item["Fields"]:
if str(ele["is_select"]).lower() == 'true':
sel_col.append(table.columns[ele["column_name"]])
join_condition = []
where_clause = ""
for item in join_details:
table1 = item["Table1Name"]
table1 = db.Table(table1, metadata, autoload=True, autoload_with=engine)
left_join = table1.columns[item["Table1Field"]]
table2 = item["Table2Name"]
table2 = db.Table(table2, metadata, autoload=True, autoload_with=engine)
right_join = table2.columns[item["Table2Field"]]
join_condition = "{left_join} {Condition}= {right_join}".format(left_join=left_join,
Condition=item["Condition"],right_join= right_join)
'''dbType = request.session['dbType']
if dbType.lower()=="sqlite":
for item in where_details:
where_clause = "{table}.'{column}'{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"])
if dbType.lower()=="postgresql":
for item in where_details:
where_clause = "{table}.{column}{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"])
'''
if len(join_details)!=0:
try:
for item in where_details:
where_clause = "{table}.'{column}'{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"])
query =db.select(sel_col).\\
select_from(table1.join(table2,db.text(join_condition))). \\
where(db.and_(db.text(where_clause)))
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
except:
for item in where_details:
where_clause = "{table}.{column}{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"])
query =db.select(sel_col).\\
select_from(table1.join(table2,db.text(join_condition))). \\
where(db.and_(db.text(where_clause)))
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
else:
table = table_details[0]["TableName"]
table = db.Table(table, metadata, autoload=True, autoload_with=engine)
try:
for item in where_details:
where_clause = "{table}.'{column}'{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"])
query = db.select(sel_col). \\
select_from(table). \\
where(db.and_(db.text(where_clause)))
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
except:
for item in where_details:
where_clause = "{table}.{column}{condition}{value}".format(table=item["TableName"],column=str(item["FieldName"]),condition=item["Condition"],value=item["CompareValue"])
query = db.select(sel_col). \\
select_from(table). \\
where(db.and_(db.text(where_clause)))
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
if len(ResultSet) > 0:
data = pd.DataFrame(ResultSet)
data.columns = ResultSet[0].keys()
print(data)
return data,"query exectuted successfully"
else:
return pd.DataFrame(),"No rows returned"
# conn = get_connection(server_url,username_actian,password_actian,database_actian)
# sql_text = query
# cur = conn.cursor()
# resultdata = simple_select(cur, query)
# cur.close()
#df = pd.DataFrame(resultdata)
#print(df)
except Exception as e:
print(e)
return pd.DataFrame(), str(e) <s> import json
import os
import pandas as pd
import urllib, base64
def check_deepCheckPlots(deployedLocation):
deepCheck = 'False'
boostOverfit = 'False'
boostOverfitCond = 'False'
mi='False'
miCond='False'
smc = 'False'
smsCond = 'False'
boostOverfitFile= os.path.join(deployedLocation,'log','boosting_overfit.html')
boostOverfitCondFile= os.path.join(deployedLocation,'log','boosting_overfit_condition.html')
smcFile= os.path.join(deployedLocation,'log','smc.html')
smcCondFile= os.path.join(deployedLocation,'log','smc_condition.html')
miFile= os.path.join(deployedLocation,'log','mi.html')
miConFile= os.path.join(deployedLocation,'log','mi_con.html')
file_exists = os.path.exists(boostOverfitFile)
if file_exists:
deepCheck = 'True'
boostOverfit = 'True'
file_exists = os.path.exists(boostOverfitCondFile)
if file_exists:
deepCheck = 'True'
boostOverfitCond = 'True'
file_exists = os.path.exists(miFile)
if file_exists:
deepCheck = 'True'
mi = 'True'
file_exists = os.path.exists(miConFile)
if file_exists:
deepCheck = 'True'
miCond = 'True'
file_exists = os.path.exists(smcFile)
if file_exists:
deepCheck = 'True'
smc = 'True'
file_exists = os.path.exists(smcCondFile)
if file_exists:
deepCheck = 'True'
smsCond = 'True'
output = {'deepCheck':deepCheck,'boostOverfit':boostOverfit,'boostOverfitCond':boostOverfitCond,'mi':mi,'miCond':miCond,'smc':smc,'smsCond':smsCond}
return output
def FeaturesUsedForTraining(output_json):
resultJsonObj = json.loads(output_json)
result = {}
result['Status'] = resultJsonObj['status']
result['ModelType'] = resultJsonObj['data']['ModelType']
result['ScoreType'] = resultJsonObj['data']['ScoreType']
result['FeaturesUsed'] = resultJsonObj['data']['featuresused']
result['BestModel'] = resultJsonObj['data']['BestModel']
return result
def ParseResults(output_json):
msg1 = 'Results...'
resultJsonObj = json.loads(output_json)
result = {}
survical_images = []
result['Status'] = resultJsonObj['status']
result['ModelType'] = resultJsonObj['data']['ModelType']
if 'vmDetails' in resultJsonObj['data']:
result['DeployLocation'] = resultJsonObj['data']['vmDetails']
else:
result['DeployLocation'] = resultJsonObj['data']['deployLocation']
result['BestModel'] = resultJsonObj['data']['BestModel']
if str(resultJsonObj['data']['BestScore']) == "NA":
result['BestScore'] = 'NA'
else:
result['BestScore'] = round(float(resultJsonObj['data']['BestScore']), 2)
result['ScoreType'] = resultJsonObj['data']['ScoreType']
result['FeaturesUsed'] = resultJsonObj['data']['featuresused']
##### Training Confusion Matrix
result['problem_type'] = result['ModelType']
if result['ModelType'].lower() == 'timeseriesanomalydetection':
result['problem_type'] = 'TimeSeriesAnomalydetection'
if result['ModelType'] == 'classification' or result['ModelType'].lower() == 'distributed classification' or (result['ModelType'] == 'anomalydetection' and (result['BestScore']) != 0) or result['ModelType'] == 'ImageClassification':
bestmodel = resultJsonObj['data']['BestModel']
if bestmodel.lower() == 'nas':
modelSummary= os.path.join(result['DeployLocation'],'summary.txt')
f = open(modelSummary, 'r')
file_content = f.read()
f.close()
#print(file_content)
result['modelSummary'] = file_content
#task 11997
if result['ModelType'].lower() == 'classification':
result['problem_type'] = 'Classification'
elif result['ModelType'].lower() == 'anomalydetection':
result['problem_type'] = 'AnomalyDetection'
elif result['ModelType'].lower() == 'imageclassification':
result['problem_type'] = 'ImageClassification'
elif result['ModelType'].lower() == 'distributed classification':
result['problem_type'] = 'Distributed Classification'
try:
result['deepCheck'] = check_deepCheckPlots(result['DeployLocation'])
except Exception as e:
print(e)
if 'ConfusionMatrix' in resultJsonObj['data']['trainmatrix']:
TrainConfusionMatrix = resultJsonObj['data']['trainmatrix']['ConfusionMatrix']
numLabels = len(TrainConfusionMatrix)
TrainConfusionMatrixList = []
for act_key, value in TrainConfusionMatrix.items():
temp = {}
temp['Label'] = act_key
for pred_key, pred_value in value.items():
temp[pred_key] = pred_value
TrainConfusionMatrixList.append(temp)
result['TrainConfusionMatrix'] = TrainConfusionMatrixList
TrainClassificationReport = resultJsonObj['data']['trainmatrix']['ClassificationReport']
numRows = len(TrainClassificationReport)
TrainClassificationReportList = []
metrics_keys_list = []
for key, value in TrainClassificationReport.items():
temp = {}
temp['Label'] = key
if isinstance( value, dict):
for metricsKey, metricsValue in value.items():
temp[metricsKey] = round(metricsValue, 4)
if metricsKey not in metrics_keys_list:
metrics_keys_list.append( metricsKey)
else:
if metrics_keys_list:
for key in metrics_keys_list:
temp[key] = round(value, 4)
TrainClassificationReportList.append(temp)
result['TrainClassificationReport'] = TrainClassificationReportList
result['Train_ROC_AUC_SCORE'] = round(float(resultJsonObj['data']['trainmatrix']['ROC_AUC_SCORE']), 4)
else:
result['TrainClassificationReport'] = ''
result['Train_ROC_AUC_SCORE']=''
##### Testing Confusion Matix
if 'ConfusionMatrix' in resultJsonObj['data']['matrix']:
ConfusionMatrix = resultJsonObj['data']['matrix']['ConfusionMatrix']
numLabels = len(ConfusionMatrix)
ConfusionMatrixList = []
for act_key, value in ConfusionMatrix.items():
temp = {}
temp['Label'] = act_key
for pred_key, pred_value in value.items():
temp[pred_key] = pred_value
ConfusionMatrixList.append(temp)
result['ConfusionMatrix'] = ConfusionMatrixList
ClassificationReport = resultJsonObj['data']['matrix']['ClassificationReport']
numRows = len(ClassificationReport)
ClassificationReportList = []
metrics_keys_list = []
for key, value in ClassificationReport.items():
temp = {}
temp['Label'] = key
if isinstance( value, dict):
for metricsKey, metricsValue in value.items():
temp[metricsKey] = round(metricsValue, 4)
if metricsKey not in metrics_keys_list:
metrics_keys_list.append( metricsKey)
else:
if metrics_keys_list:
for key in metrics_keys_list:
temp[key] = round(value, 4)
ClassificationReportList.append(temp)
result['ClassificationReport'] = ClassificationReportList
result['ROC_AUC_SCORE'] = round(float(resultJsonObj['data']['matrix']['ROC_AUC_SCORE']), 4)
elif result['ModelType'] == 'similarityIdentification':
result['problem_type'] = 'similarityIdentification'
elif result['ModelType'] == 'contextualSearch':
result['problem_type'] = 'contextualSearch'
elif result['ModelType'] == 'MultiLabelPrediction':
result['problem_type'] = 'MultiLabelPrediction'
matrix = resultJsonObj['data']['matrix']
training_matrix = []
for x in matrix:
fmatrix = {}
fmatrix['feature'] = |
x
performance = {}
for y in matrix[x]:
performance[y] = matrix[x][y]
fmatrix['performance'] = performance
training_matrix.append(fmatrix)
testmatrix = resultJsonObj['data']['testmatrix']
testing_matrix = []
for x in testmatrix:
fmatrix = {}
fmatrix['feature'] = x
performance = {}
for y in testmatrix[x]:
performance[y] = testmatrix[x][y]
fmatrix['performance'] = performance
testing_matrix.append(fmatrix)
result['testing_matrix'] = testing_matrix
result['training_matrix'] = training_matrix
elif result['ModelType'] == 'regression' or result['ModelType'].lower() == 'distributed regression':
try:
result['deepCheck'] = check_deepCheckPlots(result['DeployLocation'])
except Exception as e:
print(e)
try:
result['problem_type'] = 'Regression'
testing_matrix = {}
if 'MAE' in resultJsonObj['data']['matrix']:
testing_matrix['MAE'] = float(resultJsonObj['data']['matrix'].get('MAE','0'))
testing_matrix['R2Score'] = float(resultJsonObj['data']['matrix'].get('R2Score','0'))
testing_matrix['MSE'] = float(resultJsonObj['data']['matrix'].get('MSE','0'))
testing_matrix['MAPE'] = float(resultJsonObj['data']['matrix'].get('MAPE','0'))
testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix'].get('RMSE','0'))
testing_matrix['NormalisedRMSEPercentage'] = float(resultJsonObj['data']['matrix'].get('Normalised RMSE(%)','0'))
result['testing_matrix'] = testing_matrix
training_matrix = {}
training_matrix['MAE'] = float(resultJsonObj['data']['trainmatrix'].get('MAE','0'))
training_matrix['R2Score'] = float(resultJsonObj['data']['trainmatrix'].get('R2Score','0'))
training_matrix['MSE'] = float(resultJsonObj['data']['trainmatrix'].get('MSE','0'))
training_matrix['MAPE'] = float(resultJsonObj['data']['trainmatrix'].get('MAPE','0'))
training_matrix['RMSE'] = float(resultJsonObj['data']['trainmatrix'].get('RMSE','0'))
training_matrix['NormalisedRMSEPercentage'] = float(resultJsonObj['data']['trainmatrix'].get('Normalised RMSE(%)','0'))
result['training_matrix'] = training_matrix
except Exception as e:
print(e)
elif result['ModelType'] == 'Text Similarity':
result['problem_type'] = 'textsimilarity'
testing_matrix = {}
testing_matrix['Accuracy'] = float(resultJsonObj['data']['matrix']['Accuracy'])
testing_matrix['ROC_AUC'] = float(resultJsonObj['data']['matrix']['ROC AUC'])
result['testing_matrix'] = testing_matrix
training_matrix = {}
training_matrix['Accuracy'] = float(resultJsonObj['data']['trainmatrix']['Accuracy'])
training_matrix['ROC_AUC'] = float(resultJsonObj['data']['trainmatrix']['ROC AUC'])
result['training_matrix'] = training_matrix
elif result['ModelType'] == 'RecommenderSystem': #taskid 11190
result['problem_type'] = 'Recommender'
testing_matrix = {}
testing_matrix['RMSE'] = 'NA'
result['testing_matrix'] = testing_matrix
training_matrix = {}
training_matrix['RMSE'] = 'NA'
result['training_matrix'] = training_matrix
elif result['ModelType'] == 'SurvivalAnalysis':
result['problem_type'] = 'SurvivalAnalysis'
survivalProbabilityjson = resultJsonObj['data']['survivalProbability']
performanceimages = resultJsonObj['data']['imageLocation']
start = '['
end = ']'
performanceimages = performanceimages[performanceimages.find(start) + len(start):performanceimages.rfind(end)]
performanceimages = performanceimages.split(',')
for imagefile in performanceimages:
imagefile = imagefile.replace("'", "")
string = base64.b64encode(open(imagefile, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
survical_images.append(image_64)
result['survivalProbability'] = survivalProbabilityjson
elif result['ModelType'] == 'StateTransition':
result['problem_type'] = 'StateTransition'
stateprobabilityfile = os.path.join(result['DeployLocation'],'stateTransitionProbability.csv')
clusterfile = os.path.join(result['DeployLocation'],'stateClustering.csv')
if(os.path.isfile(stateprobabilityfile)):
df_prob = pd.read_csv(stateprobabilityfile)
df_prob = df_prob[['State','NextState','Probability']]
result['probability'] = df_prob
if(os.path.isfile(clusterfile)):
df_clus = pd.read_csv(clusterfile)
df_clus = df_clus[['clusterid','clusterlist']]
result['cluster'] = df_clus
elif result['ModelType'].lower() == 'timeseriesforecasting': #task 11997
result['problem_type'] = 'TimeSeriesForecasting'
if result['BestModel'] == 'FBPROPHET':
imagefile = os.path.join(result['DeployLocation'],'log','img','prophet_fig.png')
string = base64.b64encode(open(imagefile, "rb").read())
image_64 = 'data:image/png;base64,' + urllib.parse.quote(string)
survical_images.append(image_64)
testing_matrix = {}
testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE'])
testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE'])
testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2'])
testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE'])
result['testing_matrix'] = testing_matrix
forecastjson = resultJsonObj['data']['forecasts']
result['forecast'] = forecastjson
if result['BestModel'] == 'VAR':
'''
FeaturesMatrix = resultJsonObj['data']['matrix']['FeaturesMatrix']
mae = ''
mse = ''
mape = ''
rmse = ''
for x in FeaturesMatrix:
if mae != '':
mae += ','
if mse != '':
mse += ','
if R2 != '':
R2 += ','
if rmse != '':
rmse += ','
featurename = x['Features']
mae = mae + featurename + '=' + x['MAE']
mse = mse + featurename + '=' + x['MSE']
R2 = R2 + featurename + '=' + x['R2']
rmse = rmse + featurename + '=' + x['RMSE']
testing_matrix = {}
testing_matrix['MAE'] = mae
testing_matrix['MSE'] = mse
testing_matrix['R2'] = R2
testing_matrix['RMSE'] = rmse
result['testing_matrix'] = testing_matrix
forecastjson = resultJsonObj['data']['forecasts']
result['forecast'] = forecastjson
'''
testing_matrix = {}
testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE'])
testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE'])
testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2'])
testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE'])
result['testing_matrix'] = testing_matrix
forecastjson = resultJsonObj['data']['forecasts']
result['forecast'] = forecastjson
elif result['BestModel'] == 'LSTM' or result['BestModel'] == 'MLP':
testing_matrix = {}
testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE'])
testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE'])
result['testing_matrix'] = testing_matrix
forecastjson = resultJsonObj['data']['forecasts']
result['forecast'] = forecastjson
else:
testing_matrix = {}
testing_matrix['MAE'] = float(resultJsonObj['data']['matrix']['MAE'])
testing_matrix['MSE'] = float(resultJsonObj['data']['matrix']['MSE'])
testing_matrix['R2'] = float(resultJsonObj['data']['matrix']['R2'])
testing_matrix['RMSE'] = float(resultJsonObj['data']['matrix']['RMSE'])
result['testing_matrix'] = testing_matrix
forecastjson = resultJsonObj['data']['forecasts']
result['forecast'] = forecastjson
elif result['ModelType'] == 'topicmodelling':
result['problem_type'] = 'TopicModelling'
topics = resultJsonObj['topics']
df_topic = []
dataDict = {}
for x in topics:
dataDict = {}
words = topics[x]
print(words)
word = ''
for key in words:
print(key)
if word != '':
word = word+', '
word = word+key+'('+str(round(words[key],2))+')'
dataDict["ID"] = x
dataDict["Words"] = word
df_topic.append(dataDict)
result['topics'] = df_topic
elif result['ModelType'].lower() == 'association rule':
result['problem_type'] = 'AssociationRules'
deploy_location = result['DeployLocation']
freq_item_file = os.path.join(result['DeployLocation'],'frequentItems.csv')
if(os.path.isfile(freq_item_file)):
rules_file = os.path.join(result['DeployLocation'],'associationRules.csv')
if(os.path.isfile(rules_file)):
df_rules = pd.read_csv(rules_file)
df_rules = df_rules[['antecedents','consequents','support','confidence','lift']]
#df_rules['antecedents'] = df_rules['antecedents']
result['rules'] = df_rules
else:
result['error'] = 'There are no association found in frequent items above that threshold (minThreshold)'
else:
result['error'] = 'There are no frequent items above that threshold (minSupport), try by reducing the minSupport value'
elif result['ModelType'] == 'clustering':
result['problem_type'] = 'Clustering'
testing_matrix = {}
if 'SilHouette_Avg' in resultJsonObj['data']['matrix']:
testing_matrix['SilHouette_Avg'] = round(float(resultJsonObj['data']['matrix']['SilHouette_Avg']),2)
else:
testing_matrix['SilHouette_Avg'] = 'NA'
if 'DaviesBouldinScore' in resultJsonObj['data']['matrix']:
testing_matrix['DaviesBouldinScore'] = round(float(resultJsonObj['data']['matrix']['DaviesBouldinScore']),2)
else:
testing_matrix['DaviesBouldinScore'] = 'NA'
if 'CalinskiHarabazScore' in resultJsonObj['data']['matrix']:
testing_matrix['CalinskiHarabazScore'] = round(float(resultJsonObj['data']['matrix']['CalinskiHarabazScore']),2)
else:
testing_matrix['CalinskiHarabazScore'] = 'NA'
centroidpath = os.path.join(result['DeployLocation'],'centers.csv')
if(os.path.isfile(centroidpath)):
df_center = pd.read_csv(centroidpath)
df_center = df_center.rename(columns={"Unnamed: 0": "Cluster"})
result['centerpoints'] = round(df_center,2)
result['testing_matrix'] = testing_matrix
training_matrix = {}
if 'SilHouette_Avg' in resultJsonObj['data']['matrix']:
training_matrix['SilHouette_Avg'] = round(float(resultJsonObj['data']['matrix']['SilHouette_Avg']),2)
training_matrix['DaviesBouldinScore'] = round(float(resultJsonObj['data']['matrix']['DaviesBouldinScore']),2)
training_matrix['CalinskiHarabazScore'] = round(float(resultJsonObj['data']['matrix']['CalinskiHarabazScore']),2)
else:
training_matrix['SilHouette_Avg'] = 'NA'
training_matrix['DaviesBouldinScore'] = 'NA'
training_matrix['CalinskiHarabazScore'] = 'NA'
result['training_matrix'] = training_matrix
#print(result)
evaluatedModelsList = resultJsonObj['data']['EvaluatedModels']
#print(evaluatedModelsList)
for index in range(len(evaluatedModelsList)):
if evaluatedModelsList[index]['Score'] == 'NA':
evaluatedModelsList[index]['Score'] = 'NA'
else:
evaluatedModelsList[index]['Score'] = round(float(evaluatedModelsList[index]['Score']), 4)
if result['ModelType'] == 'classification':
evaluatedModelsList = sorted(evaluatedModelsList, key=lambda k: k['Score'],reverse=True)
else:
evaluatedModelsList = sorted(evaluatedModelsList, key=lambda k: k['Score'])
result['EvaluatedModels'] = evaluatedModelsList
result['LogFile'] = resultJsonObj['data']['LogFile']
return result, survical_images<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import requests
import json
import os
from datetime import datetime
import socket
import getmac
from appbe.sqliteUtility import sqlite_db
import pandas as pd
from appbe.dataPath import DATA_DIR
def TelemetryCreateSyncState(state):
try:
newdata = {}
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'telemetry.db')
now = datetime.now()
SyncingTime = int(datetime.timestamp(now))
newdata.update({'ID':['1'],'state':[state],'syncingTime':[SyncingTime]})
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'syncState')
except Exception as e:
print(e)
pass
def TelemetryUpdateSyncState(state):
try:
newdata = {}
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'telemetry.db')
now = datetime.now()
|
SyncingTime = int(datetime.timestamp(now))
updated_data = '"state"="'+state+'","syncingTime"="'+str(SyncingTime)+'"'
sqlite_obj.update_data(updated_data,'ID="1"','syncState')
except Exception as e:
print(e)
pass
def checkTelemtry():
import subprocess
import sys
scriptPath = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','aion.py'))
if os.path.exists(scriptPath):
outputStr = subprocess.Popen([sys.executable,scriptPath,'-m','pushtelemetry'])
def SyncTelemetry():
try:
newdata = {}
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'telemetry.db')
if sqlite_obj.table_exists('syncState'):
data = sqlite_obj.read_data('syncState')[0]
param_keys = ['ID','state','syncingTime']
sync_data = dict((x,y) for x,y in zip(param_keys,data))
#print(sync_data['state'],sync_data['syncingTime'])
if sync_data['state'].lower() != 'syncing':
sync_time = sync_data['syncingTime']
now = datetime.now()
currTime = datetime.timestamp(now)
diffTime = int(float(currTime)) - int(float(sync_time))
#print(diffTime)
if int(diffTime) > 86400:
TelemetryUpdateSyncState('Syncing')
SendTelemetryUpdate(sync_time)
TelemetryUpdateSyncState('Done')
else:
TelemetryCreateSyncState('Initialize')
except Exception as e:
print(e)
pass
def UseCaseCreated(Usecase):
try:
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'telemetry.db')
newdata = {}
now = datetime.now()
ID = datetime.timestamp(now)
record_date = int(datetime.timestamp(now))
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
try:
user = os.getlogin()
except:
user = 'NA'
newdata.update({'ID':[str(int(ID))],'RecordDate': [record_date],'Usecase': [Usecase],'Operation':['Created'],'User':[str(user)],'HostName' :[computername],'MACAddress':[macaddress],'ProblemType':[''],'Algorithms':[''],'EDA':['No'],'Prediction':['No'],'MLaC':['No'],'Drift':['No'],'TrustedAI':['No']})
sqlite_obj.write_data(pd.DataFrame.from_dict(newdata),'logs')
except Exception as e:
print(e)
pass
def UpdateTelemetry(Usecase,operation,value):
try:
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'telemetry.db')
data = sqlite_obj.read_data('logs','Usecase="'+Usecase+'"')
#print(data)
if sqlite_obj.table_exists('logs'):
updated_data = operation+'="'+value+'"'
now = datetime.now()
ID = datetime.timestamp(now)
record_date = int(datetime.timestamp(now))
updated_data += ',"RecordDate"="'+str(record_date)+'"'
sqlite_obj.update_data(updated_data,'Usecase="'+Usecase+'"','logs')
except Exception as e:
print(e)
pass
def SendTelemetryUpdate(sync_time):
file_path = os.path.join(DATA_DIR, 'sqlite')
sqlite_obj = sqlite_db(file_path, 'telemetry.db')
if sqlite_obj.table_exists('logs'):
ddata = sqlite_obj.read_data("logs","RecordDate >= '"+str(sync_time)+"'")
#print(ddata)
keys = sqlite_obj.column_names('logs')
for data in ddata:
now = datetime.now()
ID = datetime.timestamp(now)
item = {}
item['ID'] = str(int(ID))
item['RecordID'] = data[ keys.index('ID')]
item['RecordDate'] = data[ keys.index('RecordDate')]
item['Usecase'] = data[ keys.index('Usecase')]
item['Operation'] = data[ keys.index('Operation')]
item['User'] = data[ keys.index('User')]
item['HostName'] = data[ keys.index('HostName')]
item['MACAddress'] = data[ keys.index('MACAddress')]
item['Algorithms'] = data[ keys.index('Algorithms')]
item['ProblemType'] = data[ keys.index('ProblemType')]
item['EDA'] = data[ keys.index('EDA')]
item['Prediction'] = data[ keys.index('Prediction')]
item['MLaC'] = data[ keys.index('MLaC')]
item['Drift'] = data[ keys.index('Drift')]
item['TrustedAI'] = data[ keys.index('TrustedAI')]
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
record = {}
record['TableName'] = 'AION_LOGS'
record['Item'] = item
record = json.dumps(record)
#print(record)
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
except Exception as e:
print(e)
def telemetry_data(operation,Usecase,data):
now = datetime.now()
ID = datetime.timestamp(now)
record_date = now.strftime("%y-%m-%d %H:%M:%S")
computername = socket.getfqdn()
macaddress = getmac.get_mac_address()
try:
user = os.getlogin()
except:
user = 'NA'
item = {}
item['ID'] = str(int(ID))
item['record_date'] = record_date
item['UseCase'] = Usecase
item['operation'] = operation
item['remarks'] = data
item['user'] = str(user)
item['hostname'] = computername
item['macaddress'] = macaddress
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
record = {}
record['TableName'] = 'AION_OPERATION'
record['Item'] = item
record = json.dumps(record)
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
check_telemetry_file()
except Exception as inst:
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
f=open(filename, "a+")
f.write(record+'\\n')
f.close()
def check_telemetry_file():
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'telemetry.txt')
if(os.path.isfile(file_path)):
f = open(file_path, 'r')
url = 'https://l5m119j6v9.execute-api.ap-south-1.amazonaws.com/default/aion_telemetry'
file_content = f.read()
f.close()
matched_lines = file_content.split('\\n')
write_lines = []
for record in matched_lines:
try:
response = requests.post(url, data=record,headers={"x-api-key":"Obzt8ijfOT3dgBYma9JCt1tE3W6tzHaV8rVuQdMK","Content-Type":"application/json",})
except:
write_lines.append(record)
f = open(file_path, "a")
f.seek(0)
f.truncate()
for record in write_lines:
f.write(record+'\\n')
f.close()
else:
return True<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from typing import Tuple, Union, List
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from flwr.common.logger import log
from logging import INFO
TRUE_FALSE_MAPPING = {'True':'False','true':'false',True:False,'y':'n','Y':'N','Yes':'No','yes':'no','YES':'NO'}
XY = Tuple[np.ndarray, np.ndarray]
Dataset = Tuple[XY, XY]
LogRegParams = Union[XY, Tuple[np.ndarray]]
XYList = List[XY]
modelUsed=None
modelname=None
def setmodelName(modelselected):
try:
modelname=str(modelselected)
print("setmodelName ,given modelname: \\n",modelname)
if (modelname.lower() == 'logisticregression'):
modelUsed=LogisticRegression()
return True
elif (modelname.lower() == "naivebayes"):
modelUsed = GaussianNB()
return True
elif (modelname.lower() == "sgdclassifier"):
#from sklearn.linear_model import SGDClassifier
modelUsed=SGDClassifier()
return True
elif (modelname.lower() == "knn"):
modelUsed = KNeighborsClassifier()
return True
elif (modelname.lower() == "decisiontreeclassifier"):
modelUsed = DecisionTreeClassifier()
return True
else:
return False
except Exception as e:
log(INFO, "set fl model name fn issue: ",e)
def get_model_parameters(model:modelUsed) -> LogRegParams:
"""Returns the paramters of a sklearn LogisticRegression model."""
model_name=model.__class__.__name__
if model.fit_intercept:
params = (model.coef_, model.intercept_)
else:
params = (model.coef_,)
return params
def set_model_params(
model:modelUsed, params: LogRegParams
) -> modelUsed:
"""Sets the parameters of a sklean LogisticRegression model."""
model.coef_ = params[0]
model_name=model.__class__.__name__
try:
if model.fit_intercept:
model.intercept_ = params[1]
except Exception as e:
log(INFO, "set_model_params fn issue: ",e)
pass
return model
def set_initial_params(model,no_classes,no_features):
"""Sets initial parameters as zeros Required since model params are
uninitialized until model.fit is called.
But server asks for initial parameters from clients at launch. Refer
to sklearn.linear_model.LogisticRegression documentation for more
information.
"""
n_classes = no_classes
n_features = no_features
model.classes_ = np.array([i for i in range(n_classes)])
model.coef_ = np.zeros((n_classes, n_features))
model_name=model.__class__.__name__
try:
if model.fit_intercept:
model.intercept_ = np.zeros((n_classes,))
except Exception as e:
log(INFO, "set_initial_params fn issue: ",e)
pass
def shuffle(X: np.ndarray, y: np.ndarray) -> XY:
"""Shuffle X and y."""
rng = np.random.default_rng()
idx = rng.permutation(len(X))
return X[idx], y[idx]
def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList:
"""Split X and y into a number of partitions."""
return list(
zip(np.array_split(X, num_partitions), np.array_split(y, num_partitions))
)
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if v in TRUE_FALSE_MAPPING.keys():
return k
return default_value
def get_true_options( d):
options = []
if isinstance(d, dict):
for k,v in d.items():
if v in TRUE_FALSE_MAPPING.keys():
options.append(k)
return options
def set_true_option(d, key=None, value='True'):
if key in d.keys():
if value in TRUE_FALSE_MAPPING.keys():
for k in d.keys():
d[ k] = TRUE_FALSE_MAPPING[ value]
d[key] = value
return d
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
def analysis_images(folder_path):
from AIX import image_eda
qualityscore = image_eda.img_MeasureImageQuality(folder_path)
eda_result = image_eda.img_EDA(folder_path)
#Image Duplicate Finder
duplicate_img = image_eda.img_duplicatefinder(folder_path)
color_plt = image_eda.img_plot_colour_hist(folder_path)
return qualityscore,eda_result,duplicate_img,color_plt<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* ================================================== |
===========================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this fil |
dictionary.get("number_samples")
number_numerical_features = dictionary.get("number_numerical_features")
number_categorical_features = dictionary.get("number_categorical_features")
missing_proportion = dictionary.get("missing_proportion")
number_informative = dictionary.get("number_informative")
number_target = dictionary.get("number_target")
bias = dictionary.get("bias")
noise = dictionary.get("noise")
value_range_dict = dictionary.get("value_range_dict")
gen_data_series(univariate=is_univariate,
number_samples=number_samples,
number_numerical_features=number_numerical_features,
file_name=data_path,
number_categorical_features=number_categorical_features,
# number_text_features=2,
missing_proportion=missing_proportion,
number_informative=number_informative,
number_target=number_target, bias=bias,
noise=noise,
value_range_dict=value_range_dict)
if __name__ == "__main__":
data_generated_csv()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
#Standard Library modules
import sqlite3
import pandas as pd
from pathlib import Path
class sqlite_writer():
def __init__(self, target_path):
self.target_path = Path(target_path)
database_file = self.target_path.stem + '.db'
self.db = sqlite_db(self.target_path, database_file)
def file_exists(self, file):
if file:
return self.db.table_exists(file)
else:
return False
def read(self, file):
return self.db.read(file)
def write(self, data, file):
self.db.write(data, file)
def close(self):
self.db.close()
class sqlite_db():
def __init__(self, location, database_file=None):
if not isinstance(location, Path):
location = Path(location)
if database_file:
self.database_name = database_file
else:
self.database_name = location.stem + '.db'
db_file = str(location/self.database_name)
self.conn = sqlite3.connect(db_file)
self.cursor = self.conn.cursor()
self.tables = []
def table_exists(self, name):
if name in self.tables:
return True
elif name:
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name='{name}';"
listOfTables = self.cursor.execute(query).fetchall()
if len(listOfTables) > 0 :
self.tables.append(name)
return True
return False
def read(self, table_name,condition=''):
if condition == '':
return pd.read_sql_query(f"SELECT * FROM {table_name}", self.conn)
else:
return pd.read_sql_query(f"SELECT * FROM {table_name} WHERE {condition}", self.conn)
def create_table(self,name, columns, dtypes):
query = f'CREATE TABLE IF NOT EXISTS {name} ('
for column, data_type in zip(columns, dtypes):
query += f"'{column}' TEXT,"
query = query[:-1]
query += ');'
self.conn.execute(query)
return True
def update(self,table_name,updates,condition):
update_query = f'UPDATE {table_name} SET {updates} WHERE {condition}'
self.cursor.execute(update_query)
self.conn.commit()
return True
def write(self,data, table_name):
if not self.table_exists(table_name):
self.create_table(table_name, data.columns, data.dtypes)
tuple_data = list(data.itertuples(index=False, name=None))
insert_query = f'INSERT INTO {table_name} VALUES('
for i in range(len(data.columns)):
insert_query += '?,'
insert_query = insert_query[:-1] + ')'
self.cursor.executemany(insert_query, tuple_data)
self.conn.commit()
return True
def delete(self, name):
pass
def close(self):
self.conn.close()
<s> import json
import os
import sys
import re
import numpy as np
def check_unsupported_col(config): #bugId14444
unsupported_chars = '[]<>#{}@&'
try:
featureList = config['basic']['featureList']
return any([x in y for x in unsupported_chars for y in featureList])
except Exception as e:
print(str(e))
return False
def check_granularity(configSettingsJson,datapath=None):
try:
from AION.appbe.utils import get_true_option
import pandas as pd
from pathlib import Path
seconds_per_unit = {'second':1,'minute':60,'hour':60 * 60,'day':24 * 60 * 60,'week':7 * 24 * 60 * 60,'month':30 * 24 * 60 * 60,'year':365 * 24 * 60 * 60}
if not get_true_option(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['type']):
return ''
if isinstance( configSettingsJson['basic']['dateTimeFeature'], list):
datetime_feature = configSettingsJson['basic']['dateTimeFeature'][0]
else:
datetime_feature = configSettingsJson['basic']['dateTimeFeature']
if get_true_option(configSettingsJson['basic']['analysisType']) == 'timeSeriesForecasting' and datetime_feature:
if not datapath:
datapath = configSettingsJson['basic']['dataLocation']
if Path( datapath).exists():
df = pd.read_csv(datapath, nrows=2)
datetime = pd.to_datetime(df[ datetime_feature])
if len(datetime) > 1:
source_time_delta = (datetime[1] - datetime[0]).total_seconds()
granularity_unit = get_true_option(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['unit'])
size = int(configSettingsJson['basic']['preprocessing']['timeSeriesForecasting']['aggregation']['granularity']['size'])
target_time_delta = size * seconds_per_unit[granularity_unit]
amplify = int(source_time_delta / target_time_delta)
if amplify > 20:
return f'Current Granularity setting will amplify the data approx {amplify} times. Depending on your system configuration, this may cause Memory error'
return ''
except Exception as e:
return ''
def getStatusCount(matched_lines,total_steps):
stepsdone = 0
leaner = True
#print(matched_lines)
for line in matched_lines:
if 'AION feature transformation completed' in line:
stepsdone = stepsdone + 1
elif 'AION feature engineering completed' in line:
stepsdone = stepsdone + 1
elif 'AION Association Rule completed' in line:
stepsdone = stepsdone + 1
elif 'AION Image Classification completed' in line:
stepsdone = stepsdone + 1
elif 'AION Association Rule completed' in line:
stepsdone = stepsdone + 1
elif 'AION State Transition completed' in line:
stepsdone = stepsdone + 1
elif 'AION SurvivalAnalysis completed' in line:
stepsdone = stepsdone + 1
elif 'AION Recommender completed' in line:
stepsdone = stepsdone + 1
elif 'AION Gluon Stop' in line:
stepsdone = stepsdone + 1
elif 'AION Evaluation Stop' in line:
stepsdone = stepsdone + 1
elif 'AION Object Detection completed' in line:
stepsdone = stepsdone + 1
elif ('training completed' in line) and leaner:
stepsdone = stepsdone + 1
leaner = False
elif 'Prediction Service completed' in line:
stepsdone = stepsdone + 1
elif 'AION TimeSeries Forecasting started' in line: #task 11997
stepsdone = stepsdone + 1
elif 'Distributed Learning Completed' in line:
stepsdone = stepsdone + 4
elif 'AION Batch Deployment completed' in line:
stepsdone = stepsdone + 2
match_lines = []
for line in matched_lines:
count = len(line)-len(line.lstrip())
uline = line.split('...')
uline = uline[1]
if count == 0:
uline = '|... <span style="border: 1px solid black; line-height:2; padding: 2px">'+uline+'</span>'
elif count == 8 or count == 1:
uline = ' |... <span style="border: 1px dashed darkblue; line-height:2; padding: 2px">'+uline+'</span>'
elif count == 16 or count == 2:
uline = ' |... <span style="border: 1px dotted darkgray; line-height:2; padding: 2px">'+uline+'</span>'
elif count == 32 or count == 3:
uline = ' |... <span style="border: 1px dotted lightgray ; line-height:2; padding: 2px">'+uline+'</span>'
else:
uline = line
match_lines.append(uline)
stepline = '<b>Stage: ' + str(stepsdone) + '/' + str(total_steps) + ' Complete</b>'
match_lines.insert(0, stepline)
#print(match_lines)
output = "\\n".join([status_text for status_text in match_lines])
output = "<pre>{}</pre>".format(output)
#print(output)
return(output)
def calculate_total_interations(config):
try:
noOfIterations = 0
problemtypes = config['basic']['analysisType']
problem_type = ""
for key in problemtypes:
if config['basic']['analysisType'][key] == 'True':
problem_type = key
break
if problem_type.lower() in ['classification','regression']:
algorithms = config['basic']['algorithms'][problem_type]
for key in algorithms:
if config['basic']['algorithms'][problem_type][key] == 'True':
if key not in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)','Deep Q Network','Dueling Deep Q Network']:
if problem_type.lower() == 'classification':
configparam = config['advance']['mllearner_config']['modelParams']['classifierModelParams'][key]
else:
configparam = config['advance']['mllearner_config']['modelParams']['regressorModelParams'][key]
param = paramDefine(configparam,config['advance']['mllearner_config']['optimizationMethod'])
interationsum = 1
for x in param.values():
interationsum = interationsum*len(x)
if config['advance']['mllearner_config']['optimizationMethod'].lower() == 'random':
if interationsum > int(config['advance']['mllearner_config']['optimizationHyperParameter']['iterations']):
interationsum = int(config['advance']['mllearner_config']['optimizationHyperParameter']['iterations'])
noOfIterations = noOfIterations+interationsum
else:
if key in ['Neural Network','Convolutional Neural Network (1D)','Recurrent Neural Network','Recurrent Neural Network (GRU)','Recurrent Neural Network (LSTM)']:
if problem_type.lower() == 'classification':
configparam = config['advance']['dllearner_config']['modelParams']['classifierModelParams'][key]
else:
configparam = config['advance']['dllearner_config']['modelParams']['regressorModelParams'][key]
interationsum = 1
for j in list(configparam.keys()):
if isinstance(configparam[j],(list,dict,tuple,str)):
x = configparam[j].split(',')
interationsum = interationsum*len(x)
noOfIterations = noOfIterations+interationsum
elif key in ['Deep Q Network','Dueling Deep Q Network']:
if problem_type.lower() == 'classification':
configparam = config['advance']['rllearner_config']['modelParams']['classifierModelParams'][key]
interationsum = 1
for j in list(configparam.keys()):
if isinstance(configparam[j],(list,dict,tuple,str)):
x = configparam[j].split(',')
interationsum = interationsum*len(x)
noOfIterations = noOfIterations+interationsum
elif problem_type.lower() in ['llmfinetuning']:
algorithms = config['basic']['algorithms'][problem_type]
for key in algorithms:
if config['basic']['algorithms'][problem_type][key] == 'True':
noOfIterations = configparam = config['advance']['llmFineTuning']['modelParams'][key]['epochs']
break
else:
noOfIterations= 'NA'
except Exception as e:
print(e)
noOfIterations = 'NA'
pass
return(noOfIterations)
def paramDefine(paramSpace, method):
paramDict = {}
for j in list(paramSpace.keys()):
inp = paramSpace[j]
try:
isLog = False
isLin = False
isRan = False
isList = False
isString = False
try:
# check if functions are given as input and reassign paramspace
v = paramSpace[j]
if 'logspace' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isLog = True
elif 'linspace' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isLin = True
elif 'range' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "") |
isRan = True
elif 'list' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isList = True
elif '[' and ']' in paramSpace[j]:
paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "")
isList = True
x = paramSpace[j].split(',')
except:
x = paramSpace[j]
str_arg = paramSpace[j]
# check if arguments are string
try:
test = eval(x[0])
except:
isString = True
if isString:
paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x})
else:
res = eval(str_arg)
if isLin:
y = eval('np.linspace' + str(res))
paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y})
elif isLog:
y = eval('np.logspace' + str(res))
paramDict.update(
{j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y})
elif isRan:
y = eval('np.arange' + str(res))
paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y})
# check datatype of argument
elif isinstance(eval(x[0]), bool):
y = list(map(lambda i: eval(i), x))
paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y})
elif isinstance(eval(x[0]), float):
res = eval(str_arg)
if len(str_arg.split(',')) == 3 and not isList:
y = eval('np.linspace' + str(res))
#print(y)
paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y})
else:
y = list(res) if isinstance(res, tuple) else [res]
paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y})
else:
res = eval(str_arg)
if len(str_arg.split(',')) == 3 and not isList:
y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))
else:
y = list(res) if isinstance(res, tuple) else [res]
paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y})
except Exception as inst:
print(inst)
return paramDict
def calculate_total_activities(config):
req_step = 0
problemtypes = config['basic']['analysisType']
problem_type = ""
for key in problemtypes:
if config['basic']['analysisType'][key] == 'True':
problem_type = key
break
Modelproblem = problem_type
if Modelproblem.lower() in ['classification','regression','clustering','anomalydetection','topicmodelling']:
req_step = req_step+4
if Modelproblem.lower() in ['timeseriesforecasting','imageclassification','objectdetection','multilabelprediction','similarityidentification','contextualsearch']: #task 11997
req_step = req_step+2
if Modelproblem.lower() in ['survivalanalysis']:
req_step = req_step+3
if Modelproblem.lower() in ['recommendersystem']:
if config['basic']['algorithms']['recommenderSystem']['ItemRating'] == 'True':
req_step = req_step+3
if config['basic']['algorithms']['recommenderSystem']['AssociationRules-Apriori'] == 'True':
req_step = req_step+1
if Modelproblem.lower() in ['statetransition']:
req_step = req_step+1
return (req_step)
def getModelStatus(Existusecases,modelid):
model = Existusecases.objects.get(id=modelid)
return(model.Status)
def changeModelStatus(Existusecases,modelid,status,problemType,deployPath):
model = Existusecases.objects.get(id=modelid)
model.Status = status
model.ProblemType = problemType
model.DeployPath = deployPath
model.save()
def checkversionrunningstatus(modelid,usecasedetails,Existusecases):
modelx = Existusecases.objects.get(id=modelid)
ConfigPath = str(modelx.ConfigPath)
status = 'Running'
try:
if os.path.exists(ConfigPath):
with open(ConfigPath, 'r') as json_file:
data = json.load(json_file)
json_file.close()
deployPath = str(data['basic']['deployLocation'])
modelName = data['basic']['modelName']
modelVersion = data['basic']['modelVersion']
modelName = modelName.replace(" ", "_")
logfile = os.path.join(deployPath,modelName,str(modelVersion),'log','model_training_logs.log')
print(logfile)
if os.path.exists(logfile):
with open(logfile) as f:
contents = f.read()
f.close()
contents = re.search(r'aion_learner_status:(.*)', str(contents), re.IGNORECASE).group(1)
contents = contents.strip()
print(contents)
if contents != '':
resultJsonObj = json.loads(contents)
odataFile = str(modelx.TrainOuputLocation)
with open(odataFile, 'w') as json_file:
json.dump(resultJsonObj, json_file)
json_file.close()
modelx.Status = resultJsonObj['status']
status = modelx.Status
if resultJsonObj['status'] == 'SUCCESS':
modelx.DeployPath = str(resultJsonObj['data']['deployLocation'])
if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection']:
modelx.ProblemType = 'unsupervised'
else:
modelx.ProblemType = 'supervised'
modelx.save()
except Exception as e:
pass
return status
def updateLLM_Model_training_logs(deployPath,modelName,modelVersion,model,configPath):
from appbe.prediction import get_instance
hypervisor,instanceid,region,image = get_instance(modelName+'_'+str(modelVersion))
from llm.llm_tuning import llm_logs
cloudconfig = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..','config','compute_conf.json'))
llm_logs(configPath,cloudconfig,instanceid,hypervisor,model)
def checkModelUnderTraining(request,usecasedetails,Existusecases):
try:
models = Existusecases.objects.filter(Status='Running')
for model in models:
ConfigPath = str(model.ConfigPath)
try:
if os.path.exists(ConfigPath):
with open(ConfigPath, 'r') as json_file:
data = json.load(json_file)
json_file.close()
deployPath = str(data['basic']['deployLocation'])
modelName = data['basic']['modelName']
modelVersion = data['basic']['modelVersion']
modelName = modelName.replace(" ", "_")
if data['basic']['analysisType']['llmFineTuning'] == 'True':
mlmodels =''
algorihtms = data['basic']['algorithms']['llmFineTuning']
for k in algorihtms.keys():
if data['basic']['algorithms']['llmFineTuning'][k] == 'True':
if mlmodels != '':
mlmodels += ', '
mlmodels += k
updateLLM_Model_training_logs(deployPath,modelName,modelVersion,mlmodels,ConfigPath)
logfile = os.path.join(deployPath,modelName,str(modelVersion),'log','model_training_logs.log')
if os.path.exists(logfile):
with open(logfile,encoding="utf-8") as f:
contents = f.read()
f.close()
contents = re.search(r'aion_learner_status:(.*)', str(contents), re.IGNORECASE).group(1)
contents = contents.strip()
if contents != '':
resultJsonObj = json.loads(contents)
odataFile = str(model.TrainOuputLocation)
with open(odataFile, 'w') as json_file:
json.dump(resultJsonObj, json_file)
json_file.close()
modelx = Existusecases.objects.get(id=model.id)
modelx.Status = resultJsonObj['status']
if resultJsonObj['status'] == 'SUCCESS':
modelx.DeployPath = str(resultJsonObj['data']['deployLocation'])
if resultJsonObj['data']['ModelType'] in ['clustering','anomalydetection']:
modelx.ProblemType = 'unsupervised'
else:
modelx.ProblemType = 'supervised'
modelx.save()
except Exception as e:
print(ConfigPath)
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
pass
except Exception as e:
print(e)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import shutil
import subprocess
import sys
import time
import glob
import re
from appbe.pages import get_usecase_page
import json
from django.http import FileResponse
def startIncrementallearning(request,usecasedetails,Existusecases,DATA_FILE_PATH):
try:
modelid = request.POST.get('modelid')
#incfilepath = request.POST.get('incfilepath')
Datapath = request.FILES['incfilepath']
filetimestamp = str(int(time.time()))
dataFile = os.path.join(DATA_FILE_PATH,'AION_' + filetimestamp+'.csv')
with open(dataFile, 'wb+') as destination:
for chunk in Datapath.chunks():
destination.write(chunk)
# destination.close()#bugfix 11656
incfilepath = dataFile
p = Existusecases.objects.get(id=modelid)
deployPath = str(p.DeployPath)
scriptPath = os.path.abspath(os.path.join(deployPath,'aion_inclearning.py'))
request.session['IsRetraining'] = 'No'
if not os.path.exists(scriptPath):
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
context['Msg'] = 'Incremental/Online learning not supported for this model.For online training select Online Training in basic configuration page and provide with training'
else:
outputStr = subprocess.check_output([sys.executable, scriptPath, incfilepath])
outputStr = outputStr.decode('utf-8')
outputStr = re.search(r'aion_learner_status:(.*)', str(outputStr), re.IGNORECASE).group(1)
outputStr = outputStr.strip()
decoded_data = json.loads(outputStr)
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
if decoded_data['status'] == 'SUCCESS':
msg = decoded_data['Msg']
context['Status'] = 'SUCCESS'
context['Msg'] = msg
else:
msg = decoded_data['Msg']
context['Status'] = 'SUCCESS'
context['Msg'] = msg
except Exception as e:
print(e)
try:
status,context,action = get_usecase_page(request,usecasedetails,Existusecases)
except Exception as msg:
context['errorMsg'] = msg
return action,context
<s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import VGG16
from tensorflow.keras.callbacks import EarlyStopping
import logging
from sklearn.preprocessing import LabelEncoder
from statistics import mean
import sys
from learner.machinelearning import machinelearning
from learner.aion_matrix import a |
ion_matrix
from profiler.imageAug import ImageAugmentation
from pathlib import Path
class ImageLearning:
def __init__(self,dataFrame,input_directory,outputdir,modelname,hyperParam, AugEnabled,keepAugImages,operations,augConf):
self.image_list = dataFrame
self.input_directory = input_directory
self.outputdir = outputdir
self.modelname = modelname
self.hyperParam = hyperParam
self.labelMapping={}
self.log = logging.getLogger('eion')
self.AIONNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.AugEnabled = AugEnabled
self.keepAugImages = keepAugImages
self.operations = operations
self.augConf = augConf
def TrainCAST(self,predicted_data_file):
datatype = self.image_list['Label'].dtypes
if datatype not in self.AIONNumericDtypes:
labelEncode= LabelEncoder()
self.image_list['Label'] = self.image_list['Label'].apply(str)
self.image_list['Label'] = labelEncode.fit_transform(self.image_list['Label'])
self.labelMapping = dict(zip(labelEncode.classes_, labelEncode.transform(labelEncode.classes_)))
self.log.info('\\n-------> First Ten Rows of Input Data After Encoding: ')
self.log.info(self.image_list.head(10))
self.log.info('Status:- |... Target Feature Encoding Done')
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
train_df, test_df = train_test_split(self.image_list, random_state=42, test_size=self.hyperParam['test_split_ratio'])
if self.AugEnabled:
csv_file = "tempTrainDf.csv"
train_df.to_csv(csv_file, index=False)
ia = ImageAugmentation(self.input_directory, csv_file)
csv_file = ia.augment("imageclassification", self.operations,None,self.augConf)
train_df = pd.read_csv(csv_file)
Path(csv_file).unlink()
train_image = []
train_df.reset_index(drop=True, inplace=True)
for i in range(train_df.shape[0]):
#print(os.path.join(self.input_directory,str(self.image_list['File'][i])))
img = image.load_img(os.path.join(self.input_directory,str(train_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False)
img = image.img_to_array(img)
img = img/255
train_image.append(img)
test_image = []
test_df.reset_index(drop=True, inplace=True)
for i in range(test_df.shape[0]):
#print(os.path.join(self.input_directory,str(self.image_list['File'][i])))
img = image.load_img(os.path.join(self.input_directory,str(test_df['File'][i])), target_size=(self.hyperParam['img_width'],self.hyperParam['img_height'],self.hyperParam['img_channel']), grayscale=False)
img = image.img_to_array(img)
img = img/255
test_image.append(img)
self.log.info('Status:- |... Image Loading Done')
X_train = np.array(train_image)
y_train = train_df['Label']
X_test = np.array(test_image)
y_test = test_df['Label']
ytrain = y_train.values
ytrain = to_categorical(ytrain)
ytest = y_test.values
ytest = to_categorical(ytest)
#print(y)
self.log.info("Loading Imagenet Weights...")
if self.modelname == "densenet":
self.log.info('Loading Densenet model')
baseModel = tensorflow.keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #98
elif self.modelname == "inception":
self.log.info('Loading Inception model')
baseModel = tensorflow.keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(self.hyperParam['img_width'], self.hyperParam['img_height'], self.hyperParam['img_channel']))) #97
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(1024, activation='relu')(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation='sigmoid')(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
self.log.info("[INFO] compiling model...")
opt = Adam(lr=self.hyperParam['lr'])
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
#early_stop = EarlyStopping(monitor='val_loss',patience=2)
#history = model.fit(X_train, y_train, epochs=hyperparam_config['epochs'], validation_data=(X_test, y_test), callbacks=[early_stop])
history = model.fit(X_train, ytrain, epochs=self.hyperParam['epochs'], validation_data=(X_test, ytest))
self.log.info('Status:- |... Image Classification Algorithm applied:'+str(self.modelname))
#Saving trained model weights
model.save_weights(os.path.join(self.outputdir, self.modelname))
saved_model = self.modelname
modelname = self.modelname
prediction = model.predict(X_train)
predictedData = np.argmax(prediction,axis=1)
mlobj = machinelearning()
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
trainingperformancematrix = mlobj.getClassificationPerformaceMatrix(y_train, predictedData,self.labelMapping)
prediction = model.predict(X_test)
predictedData = np.argmax(prediction,axis=1)
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performancematrix = mlobj.getClassificationPerformaceMatrix(y_test, predictedData,self.labelMapping)
df_test = pd.DataFrame()
df_test['actual'] = y_test
df_test['predict'] = predictedData
df_test.to_csv(predicted_data_file)
objClf = aion_matrix()
scoring_param = 'Accuracy'
score = objClf.get_score(scoring_param,y_test,predictedData)
#score = mean(history.history['accuracy'])
if self.AugEnabled and not self.keepAugImages:
ia.removeAugmentedImages(train_df)
scoredetails = '{"Model":"'+modelname+'","Score":'+str(round(score,2))+'}'
self.log.info('Status:- |... Score Accuracy: '+str(round(score,2)))
return saved_model,modelname,'ImageClassification',scoring_param,score,scoredetails,self.labelMapping,trainingperformancematrix,performancematrix
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from Fkeras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
from keras.applications import VGG16
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve
import seaborn as sns
def PredictCAST(test_csv, test_dataset_directory, load_model_dir, model_name, hparams_config_file):
hyperparam_config = hparams_config_file['img_classifier']
print("[Info] loading imagenet weights...")
#baseModel = keras.applications.ResNet101(weights="imagenet", include_top=False, input_tensor=Input(shape=(128, 128, 3)))
if model_name == "densenet":
print('Loading Densenet model')
baseModel = keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98
elif model_name == "inception":
print('Loading Inception model')
baseModel = keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(1024, activation='relu')(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation='sigmoid')(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
print("[INFO] compiling model...")
opt = Adam(lr=hyperparam_config['lr'])
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
model.load_weights(os.path.join(load_model_dir, model_name))
#model.load_weights(load_model_dir)
test_image = []
for i in range(test_csv.shape[0]):
img = image.load_img(test_dataset_directory + '/' + str(test_csv['file_name'][i]), target_size=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']), grayscale=False)
img = image.img_to_array(img)
img = img/255
test_image.append(img)
test_images = np.array(test_image)
test_labels = test_csv['class'].values
test_labels = to_categorical(test_labels)
# making predictions
prediction = model.predict(test_images)
prediction = np.argmax(prediction,axis=1)
print('Classification Report : ')
print(classification_report(test_csv['class'],prediction))
sns.heatmap(confusion_matrix(test_csv['class'],prediction),annot=True)
plt.show()
print('Confusion matrix : ')
print(confusion_matrix(test_csv['class'],prediction))
print("[INFO] Evaluating model accuracy and loss...Take some moment...")
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\\nTest accuracy:', test_acc)
print('\\nTest loss:', test_loss)
print("Prediction Completed...")
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.simplefilter("ignore")
import os
import numpy as np
from numpy import asarray
import cv2
import sys
import random
import glob as glob
import math as m
# for gamma function, called
from scipy.special import gamma as tgamma
import matplotlib.image as mpimg
import skimage
from libsvm import svmutil,svm
#import svmutil
from svmutil import *
from svm import *
from PIL import Image
from collections import Counter
from imutils import paths
import matplotlib.pyplot as plt
import json
###################################################################################
#Input - # AGGD fit model, takes input as the MSCN Image / Pair-wise Product
#Output - best values of image parameters
#Defination - used as internal method to measure_ImageQualityScore
###################################################################################
def AGGDfit(structdis):
# variables to count positive pixels / negative pixels and their squared sum
poscount = 0
negcount = 0
possqsum = 0
negsqsum = 0
abssum = 0
poscount = len(structdis[structdis > 0]) # number of positive pixels
negcount = len(structdis[structdis < 0]) # number of negative pixels
# calculate squared sum of positive pixels and negative pixels
possqsum = np.sum(np.power(structdis[structdis > 0], 2))
negsqsum = np.sum(np.power(structdis[structdis < 0], 2))
# absolute squared sum
abssum = np.sum(structdis[structdis > 0 |
]) + np.sum(-1 * structdis[structdis < 0])
# calculate left sigma variance and right sigma variance
lsigma_best = np.sqrt((negsqsum/negcount))
rsigma_best = np.sqrt((possqsum/poscount))
gammahat = lsigma_best/rsigma_best
# total number of pixels - totalcount
totalcount = structdis.shape[1] * structdis.shape[0]
rhat = m.pow(abssum/totalcount, 2)/((negsqsum + possqsum)/totalcount)
rhatnorm = rhat * (m.pow(gammahat, 3) + 1) * (gammahat + 1)/(m.pow(m.pow(gammahat, 2) + 1, 2))
prevgamma = 0
prevdiff = 1e10
sampling = 0.001
gam = 0.2
# vectorized function call for best fitting parameters
vectfunc = np.vectorize(func, otypes = [np.float], cache = False)
# calculate best fit params
gamma_best = vectfunc(gam, prevgamma, prevdiff, sampling, rhatnorm)
return [lsigma_best, rsigma_best, gamma_best]
def func(gam, prevgamma, prevdiff, sampling, rhatnorm):
while(gam < 10):
r_gam = tgamma(2/gam) * tgamma(2/gam) / (tgamma(1/gam) * tgamma(3/gam))
diff = abs(r_gam - rhatnorm)
if(diff > prevdiff): break
prevdiff = diff
prevgamma = gam
gam += sampling
gamma_best = prevgamma
return gamma_best
def compute_features(img):
scalenum = 2
feat = []
# make a copy of the image
im_original = img.copy()
# scale the images twice
for itr_scale in range(scalenum):
im = im_original.copy()
# normalize the image
im = im / 255.0
# calculating MSCN coefficients
mu = cv2.GaussianBlur(im, (7, 7), 1.166)
mu_sq = mu * mu
sigma = cv2.GaussianBlur(im*im, (7, 7), 1.166)
sigma = (sigma - mu_sq)**0.5
# structdis is the MSCN image
structdis = im - mu
structdis /= (sigma + 1.0/255)
# calculate best fitted parameters from MSCN image
best_fit_params = AGGDfit(structdis)
# unwrap the best fit parameters
lsigma_best = best_fit_params[0]
rsigma_best = best_fit_params[1]
gamma_best = best_fit_params[2]
# append the best fit parameters for MSCN image
feat.append(gamma_best)
feat.append((lsigma_best*lsigma_best + rsigma_best*rsigma_best)/2)
# shifting indices for creating pair-wise products
shifts = [[0,1], [1,0], [1,1], [-1,1]] # H V D1 D2
for itr_shift in range(1, len(shifts) + 1):
OrigArr = structdis
reqshift = shifts[itr_shift-1] # shifting index
# create transformation matrix for warpAffine function
M = np.float32([[1, 0, reqshift[1]], [0, 1, reqshift[0]]])
ShiftArr = cv2.warpAffine(OrigArr, M, (structdis.shape[1], structdis.shape[0]))
Shifted_new_structdis = ShiftArr
Shifted_new_structdis = Shifted_new_structdis * structdis
# shifted_new_structdis is the pairwise product
# best fit the pairwise product
best_fit_params = AGGDfit(Shifted_new_structdis)
lsigma_best = best_fit_params[0]
rsigma_best = best_fit_params[1]
gamma_best = best_fit_params[2]
constant = m.pow(tgamma(1/gamma_best), 0.5)/m.pow(tgamma(3/gamma_best), 0.5)
meanparam = (rsigma_best - lsigma_best) * (tgamma(2/gamma_best)/tgamma(1/gamma_best)) * constant
# append the best fit calculated parameters
feat.append(gamma_best) # gamma best
feat.append(meanparam) # mean shape
feat.append(m.pow(lsigma_best, 2)) # left variance square
feat.append(m.pow(rsigma_best, 2)) # right variance square
# resize the image on next iteration
im_original = cv2.resize(im_original, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
return feat
def img_MeasureImageQuality(dataset_directory):
"""
####################################################################################
#Input - img_path
#Output - Quality index of input image
#Defination - function to calculate BRISQUE quality score in range of 0 and 100 [0:good;100:bad]
####################################################################################
"""
imgfile_dict = {}
for file in os.listdir(dataset_directory):
if (file.endswith(".jfif") or file.endswith(".png") or file.endswith(".jpg") or file.endswith(".jpeg")):
filename = os.path.join(dataset_directory , file)
if os.path.isfile(filename)==False:
sys.exit()
file_extension = os.path.splitext(filename)[1]
if file_extension==".jfif":
extension=".jfif"
if file_extension==".png":
extension=".png"
if file_extension==".jpg":
extension=".jpg"
if file_extension==".jpeg":
extension=".jpeg"
if (extension not in [".jpg",".jpeg",".jfif",".png"]):
sys.exit()
try:
# read image from given path
dis = cv2.imread(filename, 1)
if(dis is None):
sys.exit(0)
# convert to gray scale
dis = cv2.cvtColor(dis, cv2.COLOR_BGR2GRAY)
# compute feature vectors of the image
features = compute_features(dis)
# rescale the brisqueFeatures vector from -1 to 1
x = [0]
# pre loaded lists from C++ Module to rescale brisquefeatures vector to [-1, 1]
min_= [0.336999 ,0.019667 ,0.230000 ,-0.125959 ,0.000167 ,0.000616 ,0.231000 ,-0.125873 ,0.000165 ,0.000600 ,0.241000 ,-0.128814 ,0.000179 ,0.000386 ,0.243000 ,- 0.133080 ,0.000182 ,0.000421 ,0.436998 ,0.016929 ,0.247000 ,-0.200231 ,0.000104 ,0.000834 ,0.257000 ,-0.200017 ,0.000112 ,0.000876 ,0.257000 ,-0.155072 , 0.000112 ,0.000356 ,0.258000 ,-0.154374 ,0.000117 ,0.000351]
max_= [9.999411, 0.807472, 1.644021, 0.202917, 0.712384, 0.468672, 1.644021, 0.169548, 0.713132, 0.467896, 1.553016, 0.101368, 0.687324, 0.533087, 1.554016, 0.101000 , 0.689177, 0.533133, 3.639918, 0.800955, 1.096995, 0.175286, 0.755547, 0.399270, 1.095995, 0.155928, 0.751488, 0.402398, 1.041992, 0.093209, 0.623516, 0.532925, 1.042992, 0.093714, 0.621958, 0.534484]
# append the rescaled vector to x
for i in range(0, 36):
min = min_[i]
max = max_[i]
x.append(-1 + (2.0/(max - min) * (features[i] - min)))
modelPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'allmodel.txt')
# load model
model = svmutil.svm_load_model(modelPath)
# create svm node array from python list
x, idx = gen_svm_nodearray(x[1:], isKernel=(model.param.kernel_type == PRECOMPUTED))
x[36].index = -1 # set last index to -1 to indicate the end.
# get important parameters from model
svm_type = model.get_svm_type()
is_prob_model = model.is_probability_model()
nr_class = model.get_nr_class()
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
# here svm_type is EPSILON_SVR as it's regression problem
nr_classifier = 1
dec_values = (c_double * nr_classifier)()
# calculate the quality score of the image using the model and svm_node_array
qualityscore = svmutil.libsvm.svm_predict_probability(model, x, dec_values)
imgfile_dict[file] = round(qualityscore,2)
#print ("Quality Score of the given image is: ", qualityscore, "[0:Good;100:Bad]")
except:
pass
finally:
warnings.simplefilter("ignore")
#print(imgfile_dict)
return imgfile_dict
# calculate moode
def mode(arr):
if arr==[]:
return None
else:
return max(set(arr), key=arr.count)
def img_EDA(dataset_directory):
"""
####################################################################################
#Input - dataset_directory with all type of Images
#Output - mean,median and mode image size, channels type, extensions, recommendation of images etc
#Defination - img_EDA takes the all images and print the EDA results
####################################################################################
"""
imgeda_dict = {}
# check input directory
if os.path.isdir(dataset_directory)==False:
print("folder does not exist")
sys.exit()
width_list=[]
height_list=[]
k=[]
c=[]
cnum=[]
v=[]
ext=[]
cnt=0
for item in os.listdir(dataset_directory):
if (item.endswith(".jfif") or item.endswith(".png") or item.endswith(".jpg") or item.endswith(".jpeg")):
if os.path.isfile(os.path.join(dataset_directory , item)):
im = Image.open(os.path.join(dataset_directory , item))
c.append(im.mode)
cnum.append(len(im.mode))
width_list.append(im.width)
height_list.append(im.height)
k.append(im.size)
v.append(im.width*im.height)
f, e = os.path.splitext(os.path.join(dataset_directory , item))
ext.append(e)
cnt=cnt+1
# calculate biggest and smallest image
img_dict={}
for key, val in zip(k, v):
img_dict[key] = val
max_key = max(img_dict, key=img_dict.get)
#max_key
min_key = min(img_dict, key=img_dict.get)
#min_key
imgeda_dict['Channels'] = set(c)
imgeda_dict['Extensions'] = set(ext)
imgeda_dict['Total_Images'] = cnt
imgeda_dict['Smallest_Image'] = min_key
imgeda_dict['Largest_Image'] = max_key
imgeda_dict['Mean_Width'] = int(np.mean(width_list))
imgeda_dict['Mean_Height'] = int(np.mean(height_list))
imgeda_dict['Median_Width'] = int(np.median(width_list))
imgeda_dict['Median_Height'] = int(np.median(height_list))
imgeda_dict['Mode_Width'] = int(mode(width_list))
imgeda_dict['Mode_Height'] = int(mode(height_list))
imgeda_dict['Recomended_Mean_Width_Height'] = (int(np.mean(width_list)),int(np.mean(height_list)))
imgeda_dict['Recomended_Median_Width_Height'] = (int(np.median(width_list)),int(np.median(height_list)))
imgeda_dict['Recomended_Mode_Width_Height'] = (int(mode(width_list)),int(mode(height_list)))
imgeda_dict['Size_Distribution'] = dict(Counter(k).items())
imgeda_dict['Channel_Mean'] = np.mean(cnum)
imgeda_dict['Channel_Standard_Deviation'] = np.std(cnum)
'''
print('*-----------------------<<< RESULTS >>>-------------------------*')
print()
print('%-30s | ' % 'Channels', set(c))
print('%-30s | ' % 'Extensions', set(ext))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'Total Images', cnt)
print('%-30 |
s | ' % 'Smallest Image', min_key)
print('%-30s | ' % 'Largest Image', max_key)
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'Mean Width', int(np.mean(width_list)))
print('%-30s | ' % 'Mean Height', int(np.mean(height_list)))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'Median Width', int(np.median(width_list)))
print('%-30s | ' % 'Median Height', int(np.median(height_list)))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'Mode Width', int(mode(width_list)))
print('%-30s | ' % 'Mode Height', int(mode(height_list)))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'recommended size by mean(w,h)',(int(np.mean(width_list)),int(np.mean(height_list))))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'recommended size by median(w,h)',(int(np.median(width_list)),int(np.median(height_list))))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'recommended size by mode(w,h)',(int(mode(width_list)),int(mode(height_list))))
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'distribution of sizes',dict(Counter(k).items()) )
print('*---------------------------------------------------------------*')
print('%-30s | ' % 'channel mean',np.mean(cnum))
print('%-30s | ' % 'channel standard deviation',np.std(cnum))
'''
#print(imgeda_dict)
return imgeda_dict
def dhash(image, hashSize=8):
# convert the image to grayscale and resize the grayscale image,
# adding a single column (width) so we can compute the horizontal
# gradient
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (hashSize + 1, hashSize))
# compute the (relative) horizontal gradient between adjacent
# column pixels
diff = resized[:, 1:] > resized[:, :-1]
# convert the difference image to a hash and return it
return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
def img_duplicatefinder(dataset_directory):
# grab the paths to all images in our input dataset directory and
# then initialize our hashes dictionary
print("[INFO] computing image hashes...")
imagePaths = list(paths.list_images(dataset_directory))
hashes = {}
duplimg_list = []
remove_file = 0
# loop over our image paths
for imagePath in imagePaths:
# load the input image and compute the hash
image = cv2.imread(imagePath)
h = dhash(image)
# grab all image paths with that hash, add the current image
# path to it, and store the list back in the hashes dictionary
p = hashes.get(h, [])
p.append(imagePath)
hashes[h] = p
# loop over the image hashes
for (h, hashedPaths) in hashes.items():
# check to see if there is more than one image with the same hash
if len(hashedPaths) > 1:
#print(hashedPaths)
duplimg_list.append(hashedPaths)
return duplimg_list
def img_plot_colour_hist(dataset_directory):
import io, base64, urllib
red_values = []; green_values = []; blue_values = []; all_channels = []
imagePaths = list(paths.list_images(dataset_directory))
for imagePath in imagePaths:
img = np.array(Image.open(imagePath))
red_values.append(np.mean(img[:, :, 0]))
green_values.append(np.mean(img[:, :, 1]))
blue_values.append(np.mean(img[:, :, 2]))
all_channels.append(np.mean(img))
_, axes = plt.subplots(ncols=4, nrows=1, constrained_layout=True, figsize=(16, 3), sharey=True)
for ax, column, vals, c in zip(
axes,
['red', 'green', 'blue', 'all colours'],
[red_values, green_values, blue_values, all_channels],
'rgbk'
):
ax.hist(vals, bins=100, color=c)
ax.set_title(f'{column} hist')
plt.suptitle("Image Dataset Colour Distribution")
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = 'data:image/png;base64,' + urllib.parse.quote(string)
return uri<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
from keras.applications import VGG16
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve
import seaborn as sns
import cv2
def PredictCAST(test_image, load_model_dir, model_name, hparams_config_file):
hyperparam_config = hparams_config_file['img_classifier']
print("[Info] loading imagenet weights...")
#baseModel = keras.applications.ResNet101(weights="imagenet", include_top=False, input_tensor=Input(shape=(128, 128, 3)))
if model_name == "densenet":
print('Loading Densenet model')
baseModel = keras.applications.DenseNet121(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #98
elif model_name == "inception":
print('Loading Inception model')
baseModel = keras.applications.InceptionV3(weights="imagenet", include_top=False, input_tensor=Input(shape=(hyperparam_config['img_width'],hyperparam_config['img_height'],hyperparam_config['img_channel']))) #97
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(1024, activation='relu')(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation='sigmoid')(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
print("[INFO] compiling model...")
opt = Adam(lr=hyperparam_config['lr'])
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
model.load_weights(os.path.join(load_model_dir, model_name))
img = cv2.imread(test_image)
img = cv2.resize(img, (hyperparam_config['img_width'],hyperparam_config['img_height']))
orig = img.copy()
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255
print("[Info] predicting output")
#prediction = model.predict_classes(img)
prediction = model.predict(img)
prediction = np.argmax(prediction,axis=1)
print(prediction)
if (prediction<0.5):
print("def_front")
cv2.putText(orig, "def_front", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
else:
print("ok_front")
cv2.putText(orig, "ok_front", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
plt.imshow(orig)
plt.axis('off')
plt.show()
print("Prediction Completed...")
<s> import numpy as np
# from learner.classificationModel import ClassifierModel
from learner.aion_matrix import aion_matrix
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
class defaultParams():
def __init__(self, modelName, paramDict, scoreParam, MakeFP0, MakeFN0,paramSize):
self.modelName = modelName
self.paramDict = paramDict
self.scoreParam = scoreParam
self.MakeFP0 = MakeFP0
self.MakeFN0 = MakeFN0
self.dictsize = paramSize
def paramDictConvertion(self):
if self.dictsize != 0:
for keys in self.paramDict.keys():
self.paramDict[keys] = self.paramDict[keys][0]
def startTrainingClassification(self, trainX, trainY, testX, testY):
threshold = -1
precisionscore = -1
recallscore = -1
objClf = aion_matrix()
self.paramDictConvertion()
if self.modelName == 'LogisticRegression':
from sklearn import linear_model
estimator = linear_model.LogisticRegression()
if self.modelName == 'GaussianNB':
from sklearn.naive_bayes import GaussianNB
estimator = GaussianNB()
if self.modelName == 'SVC':
from sklearn import svm
estimator = svm.SVC()
if self.modelName == 'KNeighborsClassifier':
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier()
if self.modelName == 'DecisionTreeClassifier':
from sklearn.tree import DecisionTreeClassifier
estimator = DecisionTreeClassifier()
if self.modelName == 'RandomForestClassifier':
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier()
if self.modelName == 'GradientBoostingClassifier':
from sklearn.ensemble import GradientBoostingClassifier
estimator = GradientBoostingClassifier()
if self.modelName == 'XGBClassifier':
import xgboost as xgb
estimator = xgb.XGBClassifier()
if self.modelName == 'CatBoostClassifier':
from catboost import CatBoostClassifier
estimator = CatBoostClassifier()
if self.modelName == 'LGBMClassifier':
from lightgbm import LGBMClassifier
estimator = LGBMClassifier()
if self.dictsize != 0:
estimator.set_params(**self.paramDict)
estimator.fit(trainX, trainY)
if not testX.empty:
predictedData = estimator.predict(testX)
score = objClf.get_score(self.scoreParam, testY, predictedData)
if self.MakeFP0:
self.log.info('-------- Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange, endRange, stepsize)
threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY,
threshold_range, 'FP', self.modelName)
self.log.info('-------- Calculate Threshold for FP End-------')
if self.MakeFN0:
self.log.info('-------- Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange, endRange, stepsize)
threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY,
threshold_range, 'FN', self.modelName)
self.log.info('-------- Calculate Threshold for FN End-------')
else:
predictedData = estimator.predict(trainX)
score = objClf.get_score(self.scoreParam, trainY, predictedData)
if self.MakeFP0:
self.log.info('-------- Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange, endRange, stepsize)
threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY,
threshold_range, 'FP', self.modelName)
self.log.info('-------- Calculate Threshold for FP End-------')
if self.MakeFN0:
self.log.info('-------- Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange, endRange, stepsize)
threshold, precisionscore, recallscore = objClf.check_threshold(estimator, trainX, trainY,
threshold_range, 'FN', self.modelName)
self.log.info('-------- Calculate Threshold for FN End-------')
# status, bscore, bthres, brscore, bpscore = objClf.getBestModel(self.MakeFP0,self.MakeFN0, threshold,
# bestthreshold, recallscore, bestrecallscore,
# precisionscore, bestprecisionscore, score,
# bestScore)
return estimator, estimator.get_params(), self.modelName, score, threshold, precisionscore, recallscore
def startTrainingRegression(self, trainX, trainY, testX, |
testY):
#objClf = aion_matrix()
try:
score = 0
self.paramDictConvertion()
if self.modelName=="LinearRegression":
from sklearn import linear_model
estimator = linear_model.LinearRegression()
if self.modelName=="Lasso":
from sklearn import linear_model
estimator = linear_model.Lasso()
if self.modelName=="Ridge":
from sklearn import linear_model
estimator = linear_model.Ridge()
if self.modelName=="DecisionTreeRegressor":
from sklearn.tree import DecisionTreeRegressor
estimator = DecisionTreeRegressor()
if self.modelName=="RandomForestRegressor":
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor()
if self.modelName== "XGBRegressor":
import xgboost as xgb
estimator = xgb.XGBRegressor()
if self.modelName == 'CatBoostRegressor':
from catboost import CatBoostRegressor
estimator = CatBoostRegressor()
if self.modelName == 'LGBMRegressor':
from lightgbm import LGBMRegressor
estimator = LGBMRegressor()
if self.dictsize != 0:
estimator.set_params(**self.paramDict)
estimator.fit(trainX, trainY)
except Exception as e:
print(e)
if not testX.empty:
predictedData = estimator.predict(testX)
if 'neg_mean_squared_error' in self.scoreParam:
meanssquatederror = mean_squared_error(testY, predictedData)
score = meanssquatederror
elif 'neg_root_mean_squared_error' in self.scoreParam:
rootmeanssquatederror = mean_squared_error(testY, predictedData, squared=False)
score = rootmeanssquatederror
elif 'mae' in self.scoreParam:
meanabsoluteerror = mean_absolute_error(testY, predictedData)
score = meanabsoluteerror
elif 'r2' in self.scoreParam:
r2score = r2_score(testY, predictedData)
score = r2score
else:
predictedData = estimator.predict(trainX)
if 'neg_mean_squared_error' in self.scoreParam:
meanssquatederror = mean_squared_error(trainY, predictedData)
score = meanssquatederror
elif 'neg_root_mean_squared_error' in self.scoreParam:
rootmeanssquatederror = mean_squared_error(trainY, predictedData, squared=False)
score = rootmeanssquatederror
elif 'mae' in self.scoreParam:
meanabsoluteerror = mean_absolute_error(trainY, predictedData)
score = meanabsoluteerror
elif 'r2' in self.scoreParam:
r2score = r2_score(trainY, predictedData)
score = r2score
return estimator, estimator.get_params(), self.modelName, score
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import boto3
import json
import time
import requests
import datetime
import uuid
import shutil
from websocket import create_connection
from botocore.exceptions import ClientError
import tarfile
from pathlib import Path, PurePosixPath
from stat import S_ISDIR
from fabric import Connection
import time
import logging
class awsGPUTraining():
def __init__(self, config):
local_config = {"location":{"data":"aion/data/od", "code":"", "pretrainedModel":"aion/pretrainedModels"},
"jupyter":{"header":{"Authorization":"Token f3af05d5348301997fb014f245569e872d27bb9018fd70d2"}, "portNo":"8888",
"notebook_path":"aion/code/AWS_GPU_OD_Training.ipynb"}}
self.serverConfig = config["server"]
self.sshConfig = config["ssh"]
self.log = logging.getLogger('eion')
self.codeLocation = local_config["location"]["code"]
self.dataLocation = local_config["location"]["data"]
self.pretrainedModelLocation = local_config["location"]["pretrainedModel"]
self.jupyterConfig = local_config["jupyter"]
self.serverIP = ""
if self.serverConfig["awsAccessKeyId"] == "" or self.serverConfig["awsSecretAccessKey"] == "":
raise ValueError("Cloud server configuration is not available.")
if len(self.serverConfig["InstanceIds"]) == 0 and self.serverConfig["amiId"] == "":
raise ValueError("Please provide either InstanceIds or amiId in server config")
self.instanceId = []
self.separate_instance = False
if self.serverConfig["amiId"] != "":
self.separate_instance = True
else:
if len(self.serverConfig["InstanceIds"]):
if isinstance(self.serverConfig["InstanceIds"], list):
self.instanceId = self.serverConfig["InstanceIds"]
elif isinstance(self.serverConfig["InstanceIds"], str):
self.instanceId = [self.serverConfig["InstanceIds"]]
self.ec2_client = boto3.client(self.serverConfig["serverName"], region_name=self.serverConfig["regionName"], aws_access_key_id=self.serverConfig["awsAccessKeyId"], aws_secret_access_key=self.serverConfig["awsSecretAccessKey"])
def __sftp_exists(self, sftp, path):
try:
sftp.stat(path)
return True
except:# IOError, e:
#if e.errno == errno.ENOENT:
return False
def __rmtree(self, sftp, remotepath, level=0):
for f in sftp.listdir_attr(remotepath):
rpath = str(PurePosixPath(remotepath)/f.filename)
if S_ISDIR(f.st_mode):
self.__rmtree(sftp, rpath, level=(level + 1))
sftp.rmdir(rpath)
else:
rpath = str(PurePosixPath(remotepath)/f.filename)
sftp.remove(rpath)
def copy_files_to_server(self, location):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
client.sudo('rm -rf {}/*'.format(self.dataLocation))
tarFile = str((PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz"))
client.put(location+'/test.tfrecord', self.dataLocation+'/test.tfrecord')
client.put(location+'/train.tfrecord', self.dataLocation+'/train.tfrecord')
client.put(location+'/pipeline.config', self.dataLocation+'/pipeline.config')
client.put(location+'/label_map.pbtxt', self.dataLocation+'/label_map.pbtxt')
client.put(location+'/model.config', self.dataLocation+'/model.config')
if self.jupyterConfig != "":
client.run("touch {}".format(self.dataLocation+'/log.txt'))
except Exception as e:
raise ValueError("Error in copying data to cloud server. " + str(e))
def __myexec(self, ssh, cmd, timeout, want_exitcode=False):
# one channel per command
stdin, stdout, stderr = ssh.exec_command(cmd)
# get the shared channel for stdout/stderr/stdin
channel = stdout.channel
# we do not need stdin.
stdin.close()
# indicate that we're not going to write to that channel anymore
channel.shutdown_write()
# read stdout/stderr in order to prevent read block hangs
stdout_chunks = []
stdout_chunks.append(stdout.channel.recv(len(stdout.channel.in_buffer)))
# chunked read to prevent stalls
while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready():
# stop if channel was closed prematurely, and there is no data in the buffers.
got_chunk = False
readq, _, _ = select.select([stdout.channel], [], [], timeout)
for c in readq:
if c.recv_ready():
stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))
got_chunk = True
if c.recv_stderr_ready():
# make sure to read stderr to prevent stall
stderr.channel.recv_stderr(len(c.in_stderr_buffer))
got_chunk = True
'''
1) make sure that there are at least 2 cycles with no data in the input buffers in order to not exit too early (i.e. cat on a >200k file).
2) if no data arrived in the last loop, check if we already received the exit code
3) check if input buffers are empty
4) exit the loop
'''
if not got_chunk \\
and stdout.channel.exit_status_ready() \\
and not stderr.channel.recv_stderr_ready() \\
and not stdout.channel.recv_ready():
# indicate that we're not going to read from this channel anymore
stdout.channel.shutdown_read()
# close the channel
stdout.channel.close()
break # exit as remote side is finished and our bufferes are empty
# close all the pseudofiles
stdout.close()
stderr.close()
if want_exitcode:
# exit code is always ready at this point
return (''.join(stdout_chunks), stdout.channel.recv_exit_status())
return ''.join(stdout_chunks)
def __myexec1(self, ssh, cmd, timeout, want_exitcode=False):
# one channel per command
stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True)
for line in iter(stderr.readline, ""):
print(line, end="")
stdin.close()
stdout.close()
stderr.close()
def executeCode(self):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
cmd = 'python3.8 {} {} {}'.format(self.codeLocation, self.dataLocation, self.pretrainedModelLocation)
client.run( cmd)
except Exception as e:
raise ValueError("Error in running code on cloud server. " + str(e))
def start_executing_notebook(self):
try:
publicIp_Port = self.serverIP + ":" + self.jupyterConfig["portNo"]
conURL = "ws://" + publicIp_Port
base = 'http://' + publicIp_Port + ''
headers = self.jupyterConfig["header"]
url = base + '/api/kernels'
flag = True
while flag: # deadlock need to add timeout
response = requests.post(url, headers=headers)
flag = False
kernel = json.loads(response.text)
# Load the notebook and get the code of each cell
url = base + '/api/contents/' + self.jupyterConfig["notebook_path"]
response = requests.get(url, headers=headers)
file = json.loads(response.text)
code = [c['source'] for c in file['content']['cells'] if len(c['source']) > 0 and c['cell_type']=='code' ]
ws = create_connection(conURL + "/api/kernels/" + kernel["id"] + "/channels",
header=headers)
def send_execute_request(code):
msg_type = 'execute_request';
content = {'code': code, 'silent': False}
hdr = {'msg_id': uuid.uuid1().hex,
'username': 'test',
'session': uuid.uuid1().hex,
'data': datetime.datetime.now().isoformat(),
'msg_type': msg_type,
'version': '5.0'}
msg = {'header': hdr, 'parent_header': hdr,
'metadata': {},
'content': content}
return msg
for c in code:
ws.send(json.dumps(send_execute_request(c)))
# We ignore all the other messages, we just get the code execution output
# (this needs to be improved for production to take into account errors, large cell output, images, etc.)
error_msg = ''
traceback_msg = ''
for i in range(0, len(code)):
msg_type = '';
while msg_type != "stream":
rsp = json.loads(ws.recv())
msg_type = rsp["msg_type"]
if msg_type == 'error':
raise ValueError("Error on Cloud machine: "+rsp['content']['evalue'])
ws.close()
self.log.info('Status:- |...Execution Started`')
except ClientError as e:
raise ValueError(e)
def __wait_for_completion(self, sftp, remoteLogFile, localLogFile):
waiting = True
error_msg = ""
while waiting:
time.sleep(5 * 60)
try:
sftp.get(str(remoteLogFile), str(localLogFile))
with open(localLogFile, "r") as f:
content = f.readlines()
for x in content:
if "Error" in x:
waiting = False
error_msg = x
if "success" in x:
waiting = False
except:
raise (str(e))
return error_msg
def copy_file_from_server(self, localPath):
try:
client = Connection(
host=self.serverIP,
user=self.sshConfig["userName"],
connect_kwargs={
"key_filename": self.sshConfig["keyFilePath"],
},
)
remoteLogFile = PurePosixPath(self.dataLocation)/'log.txt'
localLogFile = Path(localPath)/'remote_log.txt'
client.get(str(remoteLogFile), str(localLogFile))
tarFile |
= (PurePosixPath(self.dataLocation).parent/PurePosixPath(self.dataLocation).name).with_suffix(".tar.gz")
client.get(str(tarFile), str(Path(localPath)/tarFile.name))
except:
raise
return str(Path(localPath)/tarFile.n |
dims, n_timesteps, n_bottleneck,units,activation,df):
# inputs = Input(shape = (n_timesteps, n_dims))
inputs = Input(shape = (df.shape[1], df.shape[2]))
e = keras.layers.LSTM(units, activation = activation, return_sequences = True)(inputs)
## code layer or compressed form of data produced by the autoencoder, bottleneck layer
latent_space = keras.layers.LSTM(n_bottleneck, activation = activation,
return_sequences = False,
name = 'bottleneck_layer')(e)
e = keras.layers.RepeatVector(n_timesteps)(latent_space)
decoder = keras.layers.LSTM(n_bottleneck, activation = activation,
return_sequences = True)(e)
decoder = keras.layers.LSTM(units, activation = activation, return_sequences = True)(decoder)
outputs = keras.layers.TimeDistributed(Dense(n_dims))(decoder)
model = Model(inputs = inputs, outputs = outputs)
return model
## adding some model checkpoints to ensure the best values will be saved and early stopping to prevent the model from running unnecessary.
def callbacks(self, **kwargs):
self.mc = ModelCheckpoint(filepath = kwargs.get("filename"),
save_best_only = True, verbose = 0)
self.es = EarlyStopping(monitor = kwargs.get("monitor"),
patience = kwargs.get("patience"))
return self.es,self.mc
##This below function create get_datetime class python file in target->scripts folder
'''This aion_gettimegranularity class is used to retrive the time pattern (for getting time granularity) of given datetime feature.'''
def create_datetime_pyfile(self):
try:
datetimepattern_code=r"""##
import pandas as pd
import numpy as np
class aion_gettimegranularity:
cls_name="datetimeinformation"
def __init__(self,dataframe, datetimefeature):
self.df=dataframe
self.datetimefeature=datetimefeature
def get_dfinfo(self,df):
from io import StringIO
buf = StringIO()
df.info(buf=buf)
#print(buf.getvalue())
return buf.getvalue()
def get_granularity(self):
try:
##get local df
df_t=self.df
buf_info=self.get_dfinfo(df_t)
df_t.drop(df_t.filter(regex='Unname'),axis=1,inplace=True)
try:
df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature])
except Exception as e:
pass
# print("Datetime feature to python datetime format convertion error.\\n",e)
df_t['time_diff']=df_t[self.datetimefeature].diff().shift(-1)
datetime_mean=df_t['time_diff'].mean()
totsec = datetime_mean.total_seconds()
## Dict variable to store datetime details.Initialized all date param as False.
status_time={"h":False,"m":False,"s":False,"us":False,"ns":False,"Y":False,"M":False,"D":False}
if (datetime_mean.days == 0):
if (totsec/3600 > 1):
## hour
status_time['h']=True
else:
if (totsec/60 >1):
## minute
status_time['m']=True
else:
if (totsec <= 1e-06 and totsec > 1e-09):
## microsecond
status_time['us']=True
elif (totsec<= 1e-09 and totsec >=1e-012):
## nanosecond
status_time['ns']=True
else:
## second
status_time['s']=True
else:
days=datetime_mean.days
if (days/365>1):
## year
status_time['Y']=True
else:
if (days>30):
## month
status_time['M']=True
else:
## day
status_time['D']=True
time_pattern=None
for k,v in status_time.items():
if (v == True):
time_pattern=k
#print("<----- DateTime feature pattern (year/month/day/hour/minute/second/millisecond/microsecond/nanosecond) is: \\t",(time_pattern))
try:
try:
df_t[self.datetimefeature] = pd.to_datetime(df_t[self.datetimefeature])
except Exception as e:
pass
df_t['Time_diff'] = ((df_t[self.datetimefeature])).diff(-1).dt.floor('T').dt.total_seconds().div(60).abs()
time_threshold=1
df_t['anomalyType'] = np.where((df_t['Time_diff'] != 1),"Point","Sequence")
df_t.drop("Time_diff",axis=1,inplace=True)
except Exception as e:
print("time_diff err message: ",str(e))
except Exception as e:
pass
# print("get_granularity err msg: ",(e))
return df_t
"""
cwd=self.deployLocation
file_name='aion_granularity'+'.py'
try:
data_file=os.path.normpath(os.path.join(cwd,'script',file_name))
with open(data_file,'w') as file:
file.write(datetimepattern_code)
except Exception as error:
self.log.info("<---- datetimepattern_code write Error.: ---->"+str(error))
self.log.info("datetimepattern source code created at target folder...\\n")
except Exception as error:
self.log.info("<---- datetimepattern_code function Error.: ---->"+str(error))
## Simple mlp based autoencoder model, not used now.
# def aetsmodel_lstm(self,X_train):
# model = keras.Sequential()
# # autoencoder encoder
# model.add(keras.layers.LSTM(
# units=64,
# input_shape=(X_train.shape[1], X_train.shape[2])
# ))
# model.add(keras.layers.Dropout(rate=0.2))
# model.add(keras.layers.RepeatVector(n=X_train.shape[1]))
# # autoencoder decoder
# model.add(keras.layers.LSTM(units=64, return_sequences=True))
# model.add(keras.layers.Dropout(rate=0.2))
# model.add(
# keras.layers.TimeDistributed(
# keras.layers.Dense(units=X_train.shape[2])
# )
# )
# return model
## To find optimal anomaly threshold value
def find_threshold(self,model, x_train_scaled):
reconstructions = model.predict(x_train_scaled)
# provides losses of individual instances msle
reconstruction_errors = tf.keras.losses.mae(reconstructions, x_train_scaled)
# threshold for anomaly scores
threshold = np.mean(reconstruction_errors.numpy())+ 2*np.std(reconstruction_errors.numpy())
return threshold
## compiling the model with adam optimizer and mean squared error loss
def model_compile(self, model,lr, loss, opt):
if opt == "adam":
opt = Adam(learning_rate = lr)
else:
opt = SGD(learning_rate = lr)
model.compile(loss = loss, optimizer = opt)
## save anomaly points in aion target folder
def save_anomalyvalues(self,df,file_name):
# cwd = os.path.abspath(os.path.dirname(__file__))
cwd=self.deployLocation
file_name=file_name+'.csv'
try:
out_path=os.path.normpath(os.path.join(cwd,'output'))
if not os.path.isdir(out_path):
os.makedirs(out_path)
data_file=os.path.normpath(os.path.join(cwd,'output',file_name))
except Exception as error:
self.log.info("<---- autoencoder artifact_dir path. Error Msg: ---->"+str(error))
try:
df.to_csv(data_file,index=False)
except Exception as e:
self.log.info("<---- Saving log data frame error. Error Msg: ---->"+str(e))
## model summary
def summary(self,model):
return model.summary()
##Method to find subsequence and point anomalies aion_gettimegranularity
def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):
try:
dataframe.reset_index(level=0, inplace=True)
try:
dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column])
except Exception as e:
self.log.info("Dataframe contains no datetime feature.Err.Msg: \\n"+str(e))
pass
try:
##Below commented part using normalize with time delta, find point anomalies.But not used,just for reference.
##get day to check difference
#date_f = dataframe[datetime_column].dt.normalize()
##compare successive rows and identify group size
#dataframe['anomaly_value'] = np.where(dataframe[datetime_column].groupby(date_f.ne(date_f.shift()).cumsum()).transform('size').gt(1),'subsequence_anomaly', 'Point_anomaly')
##Using get_timepattern method
aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column)
anomaly_info_df=aion_gettimegranularity_obj.get_granularity()
except Exception as e:
self.log.info("find_point_subsequence_anomalies,: aion_gettimegranularity err msg:: \\n"+str(e))
self.log.info("find_point_subsequence_anomalies,: anomaly_info_df: \\n"+str(anomaly_info_df))
except Exception as e:
self.log.info("find_point_subsequence_anomalies,: err msg:: \\n"+str(e))
return anomaly_info_df
## Auto encoder time series function call
## dataframe info() not working for py logging, so workaround we can get information in buffer and log it.
def get_df_info(self,df):
from io import StringIO
buf = StringIO()
df.info(buf=buf)
#self.log.info(buf.getvalue())
return buf.getvalue()
## Method to detect time series based anomalies in user data. Using both lstm and dense based autoencoder approaches.
def aionAEAnomalyTS(self,df,test_size_perc,target,time_steps,dropout,mv_unique_feature_ad):
ae_hyperparameter=self.paramSpace
anomaly_algorithm=self.anomalyMethod
# test_size=float(self.testSize)
test_size=0.0
# train_size=1-test_size
train_size=1-test_size
# train_size_perc=train_size*100
train_size=int(len(df) * train_size)
try:
timeseries_layers=ae_hyperparameter['timeseries_layers']
## Here we are checking whether to use only LSTM layers for dnn or dense layers. Dense layers better for predicting point as well sequence anomalies in time series.
if (timeseries_layers.lower() == 'lstm'):
try:
## Need to get normalized data for threshold calculation.
data_mean=df.mean(axis=0)
data_std=df.std(axis=0)
data=(df-data_mean)/data_std
# train, test = df[:train_size], df[train_size:]
train, test = data[:train_size], data[train_size:]
test=train
test1=test ## Need to copy test data
train_index=train.index
test_index=test.index
cols = df.columns
# train, test = train_test_split(df, test_size=test_size,random_state=42)
X_train, y_train = self.create_dataset(
train,
train,
time_steps
)
X_test, y_test = self.create_dataset(
test,
test,
time_steps )
n_dims=X_train.shape[2]
n_timesteps=X_train.shape[1]
opt=ae_hyperparameter['optimizer']
loss_fn=ae_hyperparameter["loss"]
epochs=int(ae_hyperparameter['epochs'])
batch_size=int(ae_hyperparameter['batch_size'])
learning_rate=float(ae_hyperparameter['learning_rate'])
n_bottleneck=int(ae_hyperparameter['latentspace_size'])
units=int(ae_hyperparameter['hidden_units'])
activation=ae_hyperparameter['activation']
##For task 20731
minimum_threshold_user = str(ae_hyperparameter['min_threshold'])
maximum_threshold_user = str(ae_hyperparameter['max_threshold'])
autoencoder=self.aetsmodel_lstm(n_dims, n_timesteps, n_bottleneck,units,activation,X_train)
##To save file
# cwd = os.path.abspath(os.path.dirname(__file__))
cwd=self.deployLocation
try:
artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir'))
if not os.path.isdir(artifact_dir):
os.makedirs(artifact_dir)
except Exception as e:
self.log.info("<---- Autoencoder artifact_dir path error. Error Msg: ---->"+str(e))
#dl callback fn to get best loss fn, early stopping & model checkpoint call backs
es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss")
self.model_compile(autoencoder,learning_rate, loss_fn, opt)
X_train = np.reshape(X_train,(X_train.shape[0],X_train.shape[1],X_train.shape[2]))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1],n_dim |
s))
# y_test = y_test.reshape((y_test.shape[0], y_test.shape[1], n_dims))
model_hist = autoencoder.fit(
X_train, X_train,
epochs=epochs,
batch_size=batch_size,
validation_split=0.1,
shuffle=False,callbacks = [mc, es]
)
model_info=self.summary(autoencoder)
X_train_pred = autoencoder.predict(X_train)
train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1)
## Task 20731
if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = float(minimum_threshold_user)
elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())):
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = float(minimum_threshold_user)
elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
else:
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
# threshold = np.mean(train_mae_loss) + np.std(train_mae_loss)
self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold))
self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold))
X_test_pred = autoencoder.predict(X_test)
test_mae_loss = np.mean(np.abs(X_test_pred - X_test), axis=1)
test_score_df = pd.DataFrame(index=test_index[time_steps:])
if (n_dims >1):
columns = [f'loss_{num}' for num in range(n_dims)]
# test_score_df = pd.DataFrame(test_mae_loss, columns=columns, index=test_index[time_steps:])
test_score_df['loss'] = test_mae_loss.mean(axis=1)
else:
test_score_df['loss'] = test_mae_loss
test_score_df['max_threshold'] = threshold
test_score_df['min_threshold'] = min_threshold
test_score_df['anomaly_value'] = (test_score_df.loss > test_score_df.max_threshold)
test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold)
## Newly added for lstm issue
## if coming dataframe have datetime index , copy it before concat (different indexed dfs)
import pandas.api.types as ptypes
# if (isinstance(test_score_df, pd.DatetimeIndex) and isinstance(df, pd.DatetimeIndex)):
test_cp_index=None
if (ptypes.is_datetime64_dtype(test_score_df.index) and ptypes.is_datetime64_dtype(df.index)):
# self.log.info("test_score_df and df have datetime index cols")
test_cp_index=test_score_df.index
df_cp_index=df.index
test_score_df=test_score_df.reset_index()
df=df.reset_index() ##self.datetimeFeature
test_score_df.dropna()
try:
test_score_df[self.datetimeFeature]=pd.to_datetime(test_score_df[self.datetimeFeature])
df[self.datetimeFeature]=pd.to_datetime(df[self.datetimeFeature])
except:
pass
try:
final_df=pd.DataFrame()
cols_to_use = df.columns.difference(test_score_df.columns)
final_df = pd.merge(test_score_df, df[cols_to_use], left_index=True, right_index=True, how='inner')
except Exception as e:
self.log.info("final_df creation err msg: \\n: "+str(e))
else:
test_index=test_score_df.reset_index(drop=True)
test_cp_index=test_index.index
df_index=df.reset_index(drop=True)
final_df=pd.DataFrame()
final_df = test_score_df.join(df)
final_df.dropna()
##Again set datetime index to dataframes,drop datetime feature column and set it as index.
try:
final_df.set_index(self.datetimeFeature,inplace=True)
df.set_index(self.datetimeFeature,inplace=True)
df.drop(self.datetimeFeature,axis=1,inplace=True)
final_df.drop(self.datetimeFeature,axis=1,inplace=True)
except:
pass
## Below commented code used to print df.info() in log file (using get_df_info() methos).
# self.log.info("anomaly final_df info: \\n")
# buf_info=self.get_df_info(final_df)
# self.log.info(buf_info)
# final_df=pd.DataFrame()
##Getback the datetime index back
final_df.index=test_cp_index
normal_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==False]
anomaly_prediction_df=test_score_df.loc[test_score_df['anomaly_value']==True]
## Newly added for lstm issue
anomaly_prediction_df=pd.merge(anomaly_prediction_df, final_df, on=['loss', 'max_threshold','min_threshold', 'anomaly_value'], how="left")
# anomaly_prediction_df.fillna(anomaly_prediction_df.mean(), inplace=True)
anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan)
# anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace([np.inf, -np.inf], np.nan)
final_df['anomaly_value'] = final_df['anomaly_value'].replace([np.inf, -np.inf], np.nan)
anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({True: 1, False: 0})
final_df['anomaly_value'] = final_df['anomaly_value'].replace({True:1, False: 0})
#make sure no nan values after dataframe operations
anomaly_prediction_df.dropna()
final_df.dropna()
# anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info.
self.log.info("Anomaly data with loss and threshold informations: \\n"+str(anomaly_prediction_df))
""" Saving anomaly plots in target->output->anomaly_plot folder """
## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction.
if (mv_unique_feature_ad.lower()=='false'):
for col in df.columns:
df_subset = anomaly_prediction_df[col]
fig, ax = plt.subplots()
df[col].plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
plot_name=col
ax.set_title(plot_name+"_Anomaly Data Plot")
ax.set_xlabel("DateTime")
ax.set_ylabel("Values")
plot_name=plot_name+'_'+'anomalyplot.png'
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
# plt.savefig(str(plot_dir)+'/'+plot_name)
plt.clf()
plt.cla()
plt.close()
else:
df_subset = anomaly_prediction_df
fig, ax = plt.subplots()
df.plot(legend=False, ax=ax)
ax.set_title("Anomaly Data Plot")
ax.set_xlabel("X values")
ax.set_ylabel("Y Values")
df_subset.plot(legend=False, ax=ax, color="r")
plot_name=df.columns[0]
ax.set_title(plot_name+"_Anomaly Data Plot")
# ax.set_xlabel("DateTime")
# ax.set_ylabel("Values")
# plot_name=df.columns[0]
plot_name=plot_name+'_'+'anomalyplot.png'
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
# plt.savefig(str(plot_dir)+'/'+plot_name)
plt.clf()
plt.cla()
plt.close()
#process dt feature and save anomalies.
datetime_column=str(self.datetimeFeature)
try:
anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df)
# normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df)
except:
##If any issue in time series point anomaly detection, skip it.
self.log.info("Detecting point anomalies have some issue,check datetime feature.")
pass
combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True)
combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data')
## If categorical features in original df, then inverse transform the values.
anomaly_prediction_df['anomaly_value'] = anomaly_prediction_df['anomaly_value'].replace({1: "Anomaly", 0: "Normal"})
final_df['anomaly_value'] = final_df['anomaly_value'].replace({1: "Anomaly", 0: "Normal"})
##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature.
if (mv_unique_feature_ad.lower()=='true'):
## Multivariate and saving individual feature based anomalies
self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe'))
# self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe'))
try:
final_df=self.merge_pre_post_dfs(final_df)
except Exception as e:
self.log.info("Anomaly Detection Merge df exception:\\n"+str(e))
#If merge fails, just out!
pass
self.save_anomalyvalues(final_df,(str(feature_name)+'_ts_overall_dataframe'))
## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line
# self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'))
## Save actual test data test_score_df
#self.save_anomalyvalues(test_score_df,(str(feature_name)+'_testdata'))
else:
self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe')
# self.save_anomalyvalues(combined_df,'ts_normal_anomaly_dataframe')
try:
final_df=self.merge_pre_post_dfs(final_df)
except Exception as e:
self.log.info("Anomaly Detection Merge df exception:\\n"+str(e))
#If merge fails, just out!
pass
self.save_anomalyvalues(final_df,'ts_overall_dataframe')
## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line
# self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'))
## Save actual test data test_score_df
#self.save_anomalyvalues(test_score_df,'testdata')
anomaly_info_df=final_df
self.log.info("<---- Autoencoder time series data anomalies: ---->"+str(anomaly_prediction_df))
self.log.info("<---- Autoencoder time series:Number of anomalies in data:: ---->"+str(len(anomaly_prediction_df)))
# return model
except Exception as e:
self.log.info("AD lstm traceback error: \\n"+str(traceback.format_exc()))
## Dense layer based time series AD, most real world usecases, it is working best compared to lstm based..
elif (timeseries_layers.lower() == 'dense'):
try:
feature_ |
name=df.columns
feature_name = ' '.join(map(str, feature_name))
try:
#Passing whole data,so test size set as zero.
test_size=0.0
# train_size=1-test_size
train_size=1-test_size
# train_size_perc=train_size*100
train_size=int(len(df) * train_size)
train_data,test_data = df[:train_size], df[train_size:]
test_data=train_data
except:
#If any error comes,us sklearn train test split
train_data,test_data = train_test_split(df,test_size=test_size,random_state=42)
pass
test_index=test_data.index ## to get datetime index
units=int(ae_hyperparameter['hidden_units'])
latent_units=int(ae_hyperparameter['latentspace_size'])
activation=ae_hyperparameter['activation']
##For task 20731
minimum_threshold_user = str(ae_hyperparameter['min_threshold'])
maximum_threshold_user = str(ae_hyperparameter['max_threshold'])
train_data=train_data.values
test_data=test_data.values
## tss is time series flag, true or false
autoencoder = AeDetector(train_data,test_data,units,latent_units,activation)
opt=ae_hyperparameter['optimizer']
loss_fn=ae_hyperparameter["loss"]
epochs=int(ae_hyperparameter['epochs'])
batch_size=int(ae_hyperparameter['batch_size'])
learning_rate=float(ae_hyperparameter['learning_rate'])
cwd=self.deployLocation
try:
artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir'))
if not os.path.isdir(artifact_dir):
os.makedirs(artifact_dir)
except Exception as e:
self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e))
es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss")
self.model_compile(autoencoder,learning_rate, loss_fn, opt)
# autoencoder.compile(optimizer='adam', loss='mae')
autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es])
# reconstructed = autoencoder(train_data)
reconstructed = autoencoder.predict(train_data)
train_mae_loss = tf.keras.losses.mae(reconstructed, train_data)
## Task 20731
if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = float(minimum_threshold_user)
elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())):
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = float(minimum_threshold_user)
elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
else:
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
# threshold = np.mean(train_mae_loss) + np.std(train_mae_loss)
self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold))
self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold))
test_labels=None
if (len(self.datetimeFeature) >= 1):
time_series_data="True"
else:
time_series_data="False"
pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data,min_threshold, threshold,test_labels,time_series_data,time_steps,test_index)
# normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']])
normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False]
anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True]
#Below ts_dataframe_anomaly not for production, just for testing purpose. If uncommented, comment it.
#self.save_anomalyvalues(anomaly_info_df,'ts_dataframe_normal')
# anomal_loss_threshold=anomaly_prediction_df #use if we want to save loss and threshold as dataframe info.
self.log.info("Anomaly data with loss and threshold informations: \\n"+str(anomaly_prediction_df))
# anomaly_prediction_df_plot=anomaly_prediction_df
""" Saving anomaly plots in target->output->anomaly_plot folder """
## Only for multivariate (all features) based anomaly data plot
## Use of the below part if anomaly df columns came as numerical columns.
# if not (df.columns.equals(anomaly_prediction_df.columns)):
# num_cols = []
# try:
# num_cols=[num_cols.append(float(col)) for col in anomaly_prediction_df.columns.values]
# except ValueError:
# pass
# #Dense layer scaler conversion makes column names as int values, so here find the int cols and rename to original names.
# if (num_cols):
# anomaly_prediction_df=anomaly_prediction_df[num_cols]
# anomaly_prediction_df.columns=df.columns
# normal_prediction_df=normal_prediction_df[num_cols]
# normal_prediction_df.columns=df.columns
## Goto if cond for multivariate whole dataset anomaly prediction, else goto else part for feature based ad prediction.
if (mv_unique_feature_ad.lower()=='false'):
# for col in df.columns:
for col in actual_data.columns:
df_subset = anomaly_prediction_df[col]
fig, ax = plt.subplots()
df[col].plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
plot_name=col
ax.set_title(plot_name+"_Anomaly Data Plot")
ax.set_xlabel("DateTime")
ax.set_ylabel("Values")
plot_name=plot_name+'_'+'anomalyplot.png'
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
# plt.savefig(str(plot_dir)+'/'+plot_name)
plt.clf()
plt.cla()
plt.close()
else:
df_subset = anomaly_prediction_df
fig, ax = plt.subplots()
df.plot(legend=False, ax=ax)
ax.set_title("Anomaly Data Plot")
ax.set_xlabel("DateTime")
ax.set_ylabel("Values")
df_subset.plot(legend=False, ax=ax, color="r")
plot_name=df.columns[0]
ax.set_title(plot_name+"_Anomaly Data Plot")
# ax.set_xlabel("DateTime")
# ax.set_ylabel("Values")
# plot_name=df.columns[0]
plot_name=plot_name+'_'+'anomalyplot.png'
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
# plt.savefig(str(plot_dir)+'/'+plot_name)
plt.clf()
plt.cla()
plt.close()
datetime_column=str(self.datetimeFeature)
# anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df)
# normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df)
try:
anomaly_prediction_df=self.find_point_subsequence_anomalies(datetime_column,anomaly_prediction_df)
# normal_prediction_df=self.find_point_subsequence_anomalies(datetime_column,normal_prediction_df)
except:
self.log.info("Detecting point anomalies have some issue,check datetime feature.")
##Just pass if datetime column provides issue, use without datetime column info
pass
combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True)
combined_df['anomaly_value']=combined_df['anomaly_value'].fillna('Normal_Data')
## If categorical features in original df, then inverse transform the values.
try:
# anomaly_info_df['anomaly_value']=anomaly_info_df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True)
self.naming_anomalyvalues(anomaly_info_df)
except Exception as e:
self.log.info("anomaly_info_df exception err msg: \\n"+str(e))
##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature.
if (mv_unique_feature_ad.lower()=='true'):
## Multivariate and saving individual feature based anomalies
self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_ts_anomaly_dataframe'))
try:
anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df)
except Exception as e:
#If merge fails, just out!.
self.log.info("Anomaly Detection Merge df exception :\\n"+str(e))
finally:
#check merging done or not, to be imp.
pass
self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_ts_overall_dataframe'))
'''For overall ordered output,uncomment the below.'''
# self.save_anomalyvalues(combined_df,(str(feature_name)+'_ts_overall_dataframe_ordered'))
## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line
# self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt')
## Save actual test data actual_data
#self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata'))
else:
self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe')
try:
anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df)
except Exception as e:
#If merge fails, just out!.
self.log.info("Anomaly Detection Merge df exception :\\n"+str(e))
finally:
#check merging done or not, to be imp.
pass
self.save_anomalyvalues(anomaly_info_df,'ts_overall_dataframe')
#Ordered data
# self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered')
## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line
# self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'),
## Save actual test data test_score_df
#self.save_anomalyvalues(actual_data,'testdata')
self.log.info("<---- Autoencoder time series anomalies : ---->"+str(anomaly_prediction_df))
self.log.info("<---- Autoencoder time series, Number of anomalies in data: ---->"+str(len(anomaly_prediction_df)))
# self.save_anomalyvalues(anomaly_prediction_df,'ts_anomaly_dataframe')
except Exception as e:
self.log.info("dense layer anomaly error: \\n"+str(traceback.format_exc()))
else:
self.log.info("Only LSTM and Dense layers supported for time series.")
except Exception as e:
self.log.info("<---- time series error msg: ---->"+str(e))
self.log.info("<---- time series error msg (detailed): ---->"+str(traceback.format_exc()))
return autoencoder,anomaly_prediction_df,anomaly_info_df
## To normalize data,use when necessary
def normalize_data(train_data,test_data):
min_val=tf.reduce_min(train_data)
max_val=tf.reduce_max(train_data)
train_data = (train_data - min_val |
)/(max_val - min_val)
test_data = (test_data - min_val)/(max_val - min_val)
#converte the data into float
train_data = tf.cast(train_data, dtype=tf.float32)
test_data = tf.cast(test_data, dtype=tf.float32)
return train_data,test_data
## Scaling data ,Not used because of our aion preprocessing data profiler option. use when necessary.
def getScaledData(method='standard', train_df=None, test_df=None, feature_col='feature'):
from sklearn.preprocessing import StandardScaler
if method == 'standard':
scaler = StandardScaler()
else:
scaler = MinMaxScaler()
scaler = scaler.fit(train_df[[feature_col]])
train_df['scaled_'+feature_col] = scaler.transform(train_df[[feature_col]])
test_df['scaled_'+feature_col] = scaler.transform(test_df[[feature_col]])
return train_df, test_df, scaler
## prediction fn
def prediction(self,model, data,min_threshold, threshold,test_labels,time_series_status,time_steps,test_index):
# data1=scaler.inverse_transform(data)
try:
df_new=self.df.drop(self.datetimeFeature,axis=1,inplace=False)
except:
df_new=self.df
try:
actual_data=pd.DataFrame(self.df,columns=df_new.columns)
except Exception as e:
actual_data=pd.DataFrame(self.df)
pass
n_features=data.shape[1]
self.log.info("prediction: number of features: \\n"+str(n_features))
predicted_data = model.predict(data)
loss = tf.keras.losses.mae(predicted_data, data)
if (time_series_status.lower() == 'true'):
test_score_df = pd.DataFrame(index=test_index)
actual_data = actual_data.set_index(test_index)
anomaly_info_df=pd.DataFrame()
test_score_df['loss'] = loss
test_score_df['max_threshold'] = threshold
test_score_df['min_threshold'] = min_threshold
## Task 20731
#test_score_df['anomaly_value'] = test_score_df.apply(lambda x: x.loss > x.max_threshold or x.loss <= x.min_threshold, axis=1)
test_score_df['anomaly_value'] = np.where((test_score_df["loss"] > test_score_df["max_threshold"]) | (test_score_df["loss"] <= test_score_df["min_threshold"]), True, False)
anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1)
else:
test_score_df = pd.DataFrame()
anomaly_info_df=pd.DataFrame()
test_score_df['loss'] = loss
#test_score_df['threshold'] = threshold
test_score_df['max_threshold'] = threshold
test_score_df['min_threshold'] = min_threshold
## Task 20731
#test_score_df['anomaly_value'] = (test_score_df.loss >= test_score_df.max_threshold)
#test_score_df['anomaly_value'] = (test_score_df.loss < test_score_df.min_threshold)
test_score_df['anomaly_value'] = np.where((test_score_df["loss"] > test_score_df["max_threshold"]) | (test_score_df["loss"] <= test_score_df["min_threshold"]), True, False)
anomaly_info_df = pd.concat([actual_data, test_score_df], axis=1)
return tf.math.less(loss, threshold),test_score_df,actual_data,anomaly_info_df
##Not used now, for data ploting purpose
# def plot(self,autoencoder,data, n):
# enc_img = autoencoder.encoder(data)
# dec_img = autoencoder.decoder(enc_img)
# plt.plot(data[n], 'b')
# plt.plot(dec_img[n], 'r')
# plt.fill_between(np.arange(data.shape[1]), data[n], dec_img[n], color = 'lightcoral')
# plt.legend(labels=['Input', 'Reconstruction', 'Error'])
# plt.show()
## autoencoder fn for non timeseries data
def ae_nontimeseriesmodelfn(self,df,target):
autoencoder=None
mv_unique_feature_ad=self.mv_featurebased_ad_status
#For supervised non time series problems, we need to remove datetime feature. This will help scaler algs process the numeric data only.
try:
if (target == ''):
try:
test_size=0.0
# train_size=1-test_size
train_size=1-test_size
# train_size_perc=train_size*100
train_size=int(len(df) * train_size)
train_data,test_data = df[:train_size], df[train_size:]
test_data=train_data
except:
test_size=float(self.testSize)
train_data,test_data = train_test_split(df,test_size=test_size,random_state=42)
pass
ae_hyperparameter=self.paramSpace
units=int(ae_hyperparameter['hidden_units'])
latent_units=int(ae_hyperparameter['latentspace_size'])
activation=ae_hyperparameter['activation']
##For task 20731
minimum_threshold_user = str(ae_hyperparameter['min_threshold'])
maximum_threshold_user = str(ae_hyperparameter['max_threshold'])
train_data=train_data.values
test_data=test_data.values
autoencoder = AeDetector(train_data,test_data,units,latent_units,activation)
opt=ae_hyperparameter['optimizer']
loss_fn=ae_hyperparameter["loss"]
# loss_fn='binary_crossentropy'
epochs=int(ae_hyperparameter['epochs'])
batch_size=int(ae_hyperparameter['batch_size'])
learning_rate=float(ae_hyperparameter['learning_rate'])
# autoencoder.save('../output/autoenc',save_format='tf')
# cwd = os.path.abspath(os.path.dirname(__file__))
cwd=self.deployLocation
try:
artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir'))
if not os.path.isdir(artifact_dir):
os.makedirs(artifact_dir)
except Exception as e:
self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e))
es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss")
# es,mc=self.callbacks(filename = "../output/autoenc.sav", patience = 3, monitor = "val_loss")
self.model_compile(autoencoder,learning_rate, loss_fn, opt)
# autoencoder.compile(optimizer='adam', loss='mae')
autoencoder.fit(train_data, train_data, epochs = epochs, batch_size=batch_size, validation_data=(test_data, test_data),callbacks = [mc, es])
reconstructed = autoencoder(train_data)
train_mae_loss = tf.keras.losses.mae(reconstructed, train_data)
#threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
#min_threshold = np.mean(train_mae_loss)- 2*np.std(train_mae_loss)
## Task 20731
if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = float(minimum_threshold_user)
elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())):
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = float(minimum_threshold_user)
elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
else:
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
# threshold = np.mean(train_mae_loss) + np.std(train_mae_loss)
self.log.info("Anomaly Upper Threshold value based on loss fn (MAE): "+str(threshold))
self.log.info("Anomaly lower_threshold value based on loss fn (MAE): "+str(min_threshold))
test_labels=None ## No test labels passed
pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, test_data, min_threshold,threshold,test_labels,'False',None,None)
# normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']])
normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False]
anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True]
self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, anomalies in data: ---->"+str(anomaly_prediction_df))
self.log.info("<---- Number of anomalies in data: ---->"+str(len(anomaly_prediction_df)))
self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe')
# combined_df=pd.concat([anomaly_prediction_df,normal_prediction_df],ignore_index=True)
self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->"+str(anomaly_info_df))
# self.save_anomalyvalues(combined_df,'overall_dataframe')
## If categorical features in original df, then inverse transform the values.
try:
##anomaly_info_df,total dataframe.
self.naming_anomalyvalues(anomaly_info_df)
except Exception as e:
self.log.info("anomaly_info_df exception err msg: \\n"+str(e))
##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature.
if (mv_unique_feature_ad.lower()=='true'):
## Multivariate and saving individual feature based anomalies
self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe'))
try:
anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df)
except Exception as e:
#If merge fails, just out!.
self.log.info("Anomaly Detection Merge df exception :\\n"+str(e))
finally:
#check merging done or not, to be imp.
pass
self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe'))
## Save actual test data actual_data
#self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata'))
else:
self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe')
try:
anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df)
except Exception as e:
#If merge fails, just out!.
self.log.info("Anomaly Detection Merge df exception :\\n"+str(e))
finally:
#check merging done or not, to be imp.
pass
self.save_anomalyvalues(anomaly_info_df,'overall_dataframe')
#Ordered data
# self.save_anomalyvalues(combined_df,'ts_overall_dataframe_ordered')
## If we want anomaly dataframe with loss and threshold for each values (rows),please uncomment below line
# self.save_anomalyvalues(anomal_loss_threshold,'ts_anomaly_dataframe_lt'),
## Save actual test data test_score_df
#self.save_anomalyvalues(actual_data,'testdata')
self.log.info("<---- Autoencoder non time series / supervised problem anomalies : ---->"+str(anomaly_prediction_df))
#ploting
df_subset = anomaly_prediction_df
fig, ax = plt.subplots()
df.plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
ax.set_title("Anomaly Data Plot")
ax.set_xlabel("DateTime")
ax.set_ylabel("Values")
plot_name='anomalyplot.png'
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
# plt.savefig(str(plot_dir)+'/'+plot_name)
plt.clf()
plt.cla()
plt.close()
else:
y=df[target]
X=df.drop(target, axis=1)
train_data,test_data,train_labels,test_labels=train_test_split(X,y,test_size=0.2,random_state=42)
count_classes = pd.value_counts(df[target], sort = True)
num_of_classes= len(count_classes)
self. |
log.info("train_data info: \\n"+str(train_data.info()))
if (num_of_classes >= 2):
# scaler = StandardScaler()
# train_data = scaler.fit_transform(train_data)
# test_data = scaler.fit_transform(test_data)
# self.saveScaler(scaler)
train_labels = train_labels.astype(bool)
test_labels = test_labels.astype(bool)
n_train_data = train_data[train_labels]
n_test_data = test_data[test_labels]
# data1=scaler.inverse_transform(n_test_data)
n_test_data_actual=pd.DataFrame(n_test_data)
##anomaly data
an_train_data = train_data[~train_labels]
an_test_data = test_data[~test_labels]
n_train_data = train_data[train_labels]
n_test_data = test_data[test_labels]
ae_hyperparameter=self.paramSpace
# autoencoder = AeDetector(n_train_data,n_test_data)
activation=ae_hyperparameter['activation']
units=int(ae_hyperparameter['hidden_units'])
latent_units=int(ae_hyperparameter['latentspace_size'])
##For task 20731
minimum_threshold_user = str(ae_hyperparameter['min_threshold'])
maximum_threshold_user = str(ae_hyperparameter['max_threshold'])
autoencoder = AeDetector(n_train_data,n_test_data,units,latent_units,activation)
opt=ae_hyperparameter['optimizer']
loss_fn=ae_hyperparameter["loss"]
batch_size=int(ae_hyperparameter['batch_size'])
# loss_fn='binary_crossentropy'
epochs=int(ae_hyperparameter['epochs'])
learning_rate=float(ae_hyperparameter['learning_rate'])
cwd=self.deployLocation
try:
artifact_dir=os.path.normpath(os.path.join(cwd,'output','autoenc_artifact_dir'))
if not os.path.isdir(artifact_dir):
os.makedirs(artifact_dir)
except Exception as e:
self.log.info("<---- artifact_dir path error. Error Msg: ---->"+str(e))
es,mc=self.callbacks(filename = artifact_dir, patience = 5, monitor = "val_loss")
self.model_compile(autoencoder,learning_rate, loss_fn, opt)
# autoencoder.compile(optimizer='adam', loss='mae')
autoencoder.fit(n_train_data, n_train_data, epochs = epochs, batch_size=batch_size, validation_data=(n_test_data, n_test_data),callbacks = [mc, es])
model_info=self.summary(autoencoder)
self.log.info("<---- Auto encoder anomaly detection model information: ---->"+str(model_info))
# reconstructed = autoencoder(n_train_data)
reconstructed = autoencoder.predict(n_train_data)
#threshold = self.find_threshold(autoencoder, n_train_data)
train_mae_loss = tf.keras.losses.mae(reconstructed, n_train_data)
pred=tf.math.less(train_mae_loss, threshold)
## Task 20731
if ((minimum_threshold_user and minimum_threshold_user.strip()) and (maximum_threshold_user and maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = float(minimum_threshold_user)
elif ((minimum_threshold_user.strip()) and (not maximum_threshold_user.strip())):
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = float(minimum_threshold_user)
elif ((not minimum_threshold_user.strip()) and (maximum_threshold_user.strip())):
threshold = float(maximum_threshold_user)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
else:
threshold = np.mean(train_mae_loss) + 2*np.std(train_mae_loss)
min_threshold = np.mean(train_mae_loss) - 2*np.std(train_mae_loss)
self.log.info("Anomaly threshold max value based on loss fn (MAE): "+str(threshold))
self.log.info("Anomaly threshold min value based on loss fn (MAE): "+str(min_threshold))
pred,test_score_df,actual_data,anomaly_info_df = self.prediction(autoencoder, n_test_data, min_threshold,threshold,test_labels,'False',None,None)
# normal_prediction_df=(anomaly_info_df[~anomaly_info_df['anomaly_value']])
normal_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==False]
# normal_prediction_df.to_csv('normal_prediction_df.csv')
# anomaly_prediction_df=(anomaly_info_df[anomaly_info_df['anomaly_value']])
anomaly_prediction_df=anomaly_info_df.loc[anomaly_info_df['anomaly_value']==True]
self.log.info("<---- Autoencoder (non timeseries) based anomaly detection, overall data (both anomaly and non-anomaly ) in data: ---->"+str(anomaly_info_df))
# self.save_anomalyvalues(combined_df,'overall_dataframe')
## If categorical features in original df, then inverse transform the values.
try:
##anomaly_info_df,total dataframe.
self.naming_anomalyvalues(anomaly_info_df)
except Exception as e:
self.log.info("anomaly_info_df exception err msg: \\n"+str(e))
##Now we are storing anomaly log (as dataframe) based on two options: 1. Anomalies based on all features, 2. Anomalies based on each individual feature.
if (mv_unique_feature_ad.lower()=='true'):
## Multivariate and saving individual feature based anomalies
self.save_anomalyvalues(anomaly_prediction_df,(str(feature_name)+'_anomaly_dataframe'))
try:
anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df)
except Exception as e:
#If merge fails, just out!.
self.log.info("Anomaly Detection Merge df exception :\\n"+str(e))
finally:
#check merging done or not, to be imp.
pass
self.save_anomalyvalues(anomaly_info_df,(str(feature_name)+'_overall_dataframe'))
## Save actual test data actual_data
#self.save_anomalyvalues(actual_data,(str(feature_name)+'_testdata'))
else:
self.save_anomalyvalues(anomaly_prediction_df,'anomaly_dataframe')
try:
anomaly_info_df=self.merge_pre_post_dfs(anomaly_info_df)
except Exception as e:
#If merge fails, just out!.
self.log.info("Anomaly Detection Merge df exception :\\n"+str(e))
finally:
#check merging done or not, to be imp.
pass
self.save_anomalyvalues(anomaly_info_df,'overall_dataframe')
## Save actual test data test_score_df
#self.save_anomalyvalues(actual_data,'testdata')
self.log.info("<----Number of anomalies in data: ---->"+str(len(anomaly_prediction_df)))
""" Plot to show case anomalies, now commented, for testing purpose uncomment and check visually anomalies. """
#ploting
df_subset = anomaly_prediction_df
fig, ax = plt.subplots()
df.plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
# plt.show()
ax.set_title("Anomaly Data Plot")
ax.set_xlabel("DateTime")
ax.set_ylabel("Values")
plot_name='anomalyplot.png'
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
# plt.savefig(str(plot_dir)+'/'+plot_name)
plt.clf()
plt.cla()
plt.close()
else:
self.log.info("<---- Check dataset and basic configurations. ---->")
except Exception as e:
self.log.info("<---- Non time series anomaly detection error msg: ---->"+str(e))
self.log.info("<---- Non time series anomaly detection error msg (detailed): ---->"+str(traceback.format_exc()))
return autoencoder,anomaly_prediction_df,anomaly_info_df
## Hyperparameter tuning autoencoders, not implemented
def hyperparamtuning_AE(self):
try:
self.log.info ("autoencoder hyperparam tuning.not implemented.")
except Exception as e:
self.log.info("autoencoder hyperparam tuning error: "+str(e))
pass
## randomsearch for dbscan
def hyperparamtuning_dbscan(self,model,tuner,Parameter_Trials,data):
params=model.get_params().keys()
try:
labels = model.labels_
#df = pd.DataFrame(labels)
try:
scorer = metrics.silhouette_score(data, labels)
except:
pass
if (tuner.lower() == 'randomsearch'):
# Parameters to try
cluster_labels = model.labels_
Random_Search = RandomizedSearchCV(model, Parameter_Trials, n_iter=50,cv=5, scoring='adjusted_rand_score', refit=True, n_jobs=1, verbose=5)
RandomSearchResults=Random_Search.fit(data)
# Fetching the best hyperparameters
best_params=RandomSearchResults.best_params_
# All the parameter combinations tried by RandomizedSearchCV
RandomSearchResults.cv_results_['params']
except Exception as e:
self.log.info("<---- dbscan hpt error msg: ---->"+str(e))
self.log.info("<---- dbscan hpt error msg (detailed): ---->"+str(traceback.format_exc()))
return best_params
## Reading aion postprocess data from target->AION_usecaseNo->data->postprocess data
def read_inputdata(self):
cwd=self.deployLocation
try:
in_path=os.path.normpath(os.path.join(cwd,'data'))
if not os.path.isdir(in_path):
self.log.info("<---- Anomaly detection target data folder not available.--->\\n")
postprocesseddata=os.path.normpath(os.path.join(cwd,'data','postprocesseddata.csv'))
postprocessed_df=pd.read_csv(postprocesseddata)
except Exception as e:
self.log.info("<---- Anomaly detection target data folder not available, Reading postprocess csv file issue. Error Msg: ---->"+str(e))
return postprocessed_df
## Get original dataframe values using preprocess pipe after output data created.
##get_label_dict fn not used now. Use if preprocess_pipe based transform needed.
def get_label_dict(self, pipe):
label_dict = {}
dict_pipe={}
for (comp_name, component) in pipe.transformer_list:
if 'labelencoding' in comp_name:
i=1
for step in component.steps:
key='val'+'_'+str(i)
ordinalencoder=step[1]
dict_pipe[f'val_{i}']=ordinalencoder
# dict_pipe[key].append(ordinalencoder)
label_dict.update(dict_pipe)
i=i+1
return label_dict
else:
continue
return label_dict
## Decode label features using aion preprocessed_pipe model,not used now. If we need to use preprocess pipe for inverse transform,use below block.
def decoder_labeled_features(self,df):
import joblib
try:
cwd=self.deployLocation
# in_path=os.path.normpath(os.path.join(cwd,'data'))
if not os.path.isdir(in_path):
self.log.info("<---- Anomaly detection target model folder not available.--->\\n")
preprocessed_pipe=os.path.normpath(os.path.join(cwd,'model','preprocess_pipe.pkl'))
model = joblib.load(preprocessed_pipe)
label_dict = get_label_dict(model)
encoder=label_dict.get('val_4')
num_cols = orig_data.select_dtypes(include=np.number).columns.tolist()
cat_cols = orig_data.select_dtypes(exclude=np.number).columns.tolist()
cat_col_actual=[]
for col in cat_cols:
try:
df1=encoder.inverse_transform(df[col])
cat_col_actual.append(col)
except:
pass
df1=pd.DataFrame(data=df1)
df1.columns=cat_cols
df2=df[num_cols]
df_anomalyinfo_col=df['anomaly_value']
df_list = [df2, df1, df_anomalyinfo_col] # List of your dataframes
combined_df = pd.concat(df_list, join='outer', axis=1).fillna(0)
except:
combined_df=None
pass
return combined_df
## save predicted data and actual data columns. For get back user original data features
#
|
def merge_pre_post_dfs(self,out_df=None):
cwd=self.deployLocation
anomaly_algorithm=str(self.anomalyMethod)
try:
in_path=os.path.normpath(os.path.join(cwd,'data'))
if not os.path.isdir(in_path):
self.log.info("<---- Anomaly detection target data folder not available.--->\\n")
preprocessed_file=os.path.normpath(os.path.join(cwd,'data','preprocesseddata.csv'))
preprocessed_df=pd.read_csv(preprocessed_file)
## cat_cols will get categorical col from preprocessed, cat_diff_cols will get common cat col between output df and preprocessed.
cat_cols=preprocessed_df.select_dtypes(exclude=np.number).columns.tolist()
num_cols = preprocessed_df.select_dtypes(include=np.number).columns.tolist()
cat_diff_cols=list(set(cat_cols).intersection(out_df.columns.tolist()))
diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns))
if (cat_diff_cols):
if (len(preprocessed_df) == len(out_df)):
#Drop each categorical col of original data from output df (which have numerical converted values). So, in merging can be done on perfect columns
try:
## get common categorical col name between actual and output df
for col in cat_diff_cols :
out_df.drop(col,axis=1,inplace=True)
except:
self.log.info("drop col not possible, pass the step.")
#Just continue
pass
diff_cols=list(set(preprocessed_df.columns).difference(out_df.columns))
try:
## Check any datetime column in output df and preprocesseddata
import pandas.api.types as ptypes
outdf_dt_index_check=ptypes.is_datetime64_dtype(out_df.index)
#Is output df have datetime col
if (outdf_dt_index_check):
if ((self.datetimeFeature.lower() !='na' and self.datetimeFeature)):
try:
preprocessed_df[self.datetimeFeature] = pd.to_datetime(preprocessed_df[self.datetimeFeature])
preprocessed_df.set_index(self.datetimeFeature, inplace=True)
except Exception as e:
self.log.info("Given data not contain datetime specified."+str(traceback.format_exc()))
## Below step ,making datetime index to date time column. for merging and droping purpose.
preprocessed_df.reset_index(inplace=True)
preprocessed_df.rename(columns={"index":self.datetimeFeature},inplace=True)
out_df.reset_index(inplace=True)
out_df.rename(columns={"index":self.datetimeFeature},inplace=True)
else:
## If no datetime column, we need to keep both dataframe index columns as unique. so making them as int index.
preprocessed_df.reset_index(inplace=True, drop=True)
out_df.reset_index(inplace=True, drop=True)
pass
## below part is to get status of index columns type (datetime,int or str), commented now. If needed for debug,pls use.
# dt_index_check=ptypes.is_datetime64_dtype(out_df.index)
# int_index_check=ptypes.is_numeric_dtype(out_df.index)
# str_index_check=ptypes.is_string_dtype(out_df.index)
## Get common column between preprocess and output df
try:
if (anomaly_algorithm.lower() == 'autoencoder'):
common_cols=out_df.drop(['loss','max_threshold','min_threshold','anomaly_value'],axis=1)
common_cols.drop(common_cols.filter(regex="Unname"),axis=1, inplace=True)
merge_on_cols=common_cols.columns.tolist()
combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner')
## Drop duplicate based on columns except time
# drop_duplicate_on=merge_on_cols.extend(cat_diff_cols)
drop_duplicate_on=merge_on_cols+cat_diff_cols
combined_df = combined_df.drop_duplicates(drop_duplicate_on, keep=False)
else:
## otherwise, it is dbscan algorithm
common_cols=out_df.drop(['cluster','anomaly_value'],axis=1)
common_cols.drop(common_cols.filter(regex="Unname"),axis=1, inplace=True)
merge_on_cols=common_cols.columns.tolist()
combined_df = preprocessed_df.merge(out_df, on=merge_on_cols,how='inner')
## Drop duplicate based on columns except time
# drop_duplicate_on=merge_on_cols+cat_diff_cols
combined_df = combined_df.drop_duplicates(merge_on_cols, keep='last')
except:
combined_df=out_df
pass
## Just for reference, in future if you want different merge/join option
# combined_df = pd.merge(preprocessed_df[diff_cols],out_df, left_index=True, right_index=True, how='inner')
except Exception as e:
self.log.info("<---- merge error msg : ---->"+str(e))
self.log.info("<---- merge error msg (detailed): ---->"+str(traceback.format_exc()))
pass
## if both data frame have different columns (preprocessed and outdf)
else:
self.log.info("User data is preprocessed and data cleaning happened.So, actual data and processed data length mismatch. So,data records range may vary.")
try:
# combined_df=self.decoder_labeled_features(out_df)
combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner')
# combined_df = combined_df.drop_duplicates(cat_cols, keep='last')
combined_df = combined_df.drop_duplicates(num_cols, keep='last')
except:
## If nothing in merge works,then make outdf as final dataframe.
try:
## If above merge fails, change drop_duplicate hyperparam keep='last' last appearance of key occurance.
combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner')
combined_df = combined_df.drop_duplicates(cat_cols, keep=False)
except:
#If nothing is works, just keep out_df as combined df
combined_df=out_df
## if no common categorical col found between preprocessed and outdf.
else:
## If merge not works,then make outdf as final dataframe.
if (len(cat_cols) > 0):
try:
combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner')
combined_df = combined_df.drop_duplicates(cat_cols, keep='last')
except:
#make safe for return
combined_df=out_df
else:
##If no categorical features available
combined_df = preprocessed_df.merge(out_df,on=num_cols,how='inner')
combined_df = combined_df.drop_duplicates(num_cols, keep=False)
except Exception as e:
self.log.info("<---- Anomaly detection target data folder not available, dataframe merging issue. Error Msg: ---->"+str(e))
self.log.info("making output df as final merged data, no categorical column found in output anomaly data. It is user responsibility to check the anomaly data.")
#make safe for return
combined_df=out_df
return combined_df
## for module reusability, this below naming fn created.
def naming_anomalyvalues(self,df):
replace_values_T='|'.join(['TRUE','True','true'])
replace_values_F='|'.join(['FALSE','False','false'])
try:
df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_T,'AnomalyDataPoint', regex=True)
except:
df['anomaly_value']=df['anomaly_value'].replace(replace_values_T,'AnomalyDataPoint', regex=True)
df['anomaly_value']=df['anomaly_value'].astype(str).replace(replace_values_F,'NormalDataPoint', regex=True)
return df
## DBScan based anomaly detection
def dbscan_ad(self,data,eps,min_samples,cols):
try:
tuner='randomsearch'
Parameter_Trials={'eps':eps,
'min_samples':min_samples}
model = DBSCAN(algorithm='auto')
hist = model.fit(data)
pred = model.fit_predict(data)
best_params = self.hyperparamtuning_dbscan(model,tuner,Parameter_Trials,data)
self.log.info("<---- Best hyper parameters for dbscan: ---->"+str(best_params))
best_eps=best_params['eps']
best_min_samples=best_params['min_samples']
if (best_min_samples < len(cols)):
min_samples=len(cols)+1
if (best_eps < 0.2):
best_eps=0.2
self.log.info("best_eps: \\n"+str(best_eps))
self.log.info("best_min_samples: \\n"+str(best_min_samples))
best_model=DBSCAN(algorithm='auto',eps = best_eps, min_samples = best_min_samples)
hist = best_model.fit(data)
pred = best_model.fit_predict(data)
best_labels=best_model.labels_
cluster_name = ["Cluster"+str(i) for i in set(best_labels)]
# outliers = data[best_model.labels_ == -1]
outlier_df = data.copy()
outlier_df.loc[:,'cluster'] = best_model.labels_
outliers_final=outlier_df[outlier_df['cluster']==-1]
outliers_final['anomaly_value']=outliers_final['cluster']==-1
normaldata= outlier_df[outlier_df['cluster']!=-1]
self.log.info("<---- DBScan: Anomalies in data: ---->"+str(outliers_final))
self.log.info("<---- DBScan: Number of anomalies in data: ---->"+str(len(outliers_final)))
# num_cat_features=len(self.cat_cols)
try:
self.save_anomalyvalues(outliers_final,'dbscan_anomaly_dataframe')
self.save_anomalyvalues(normaldata,'dbscan_normaldata_dataframe')
outlier_df['anomaly_value']=outlier_df['cluster']==-1
outlier_df=self.naming_anomalyvalues(outlier_df)
##Convert results to original input data form for end user ease of understanding
try:
outlier_df=self.merge_pre_post_dfs(outlier_df)
except Exception as e:
self.log.info("Anomaly Detection Merge df exception:\\n"+str(e))
#If merge fails, just out!
pass
self.save_anomalyvalues(outlier_df,'dbscan_overall_dataframe')
except Exception as e:
self.log.info("DBScan inversetransform err. msg: \\n"+str(e))
no_clusters = len(set(best_labels)) - (1 if -1 in best_labels else 0)
self.log.info("<---- DBScan: No of clusters: ---->"+str(no_clusters))
n_noise_ = list(best_labels).count(-1)
## Ploting the dbscan clusters
plot_name='dbscan_anomalyplot.png'
fig, ax = plt.subplots()
ax.set_title("DBScan Clusters")
ax.scatter(data.iloc[:, 0], data.iloc[:, 1], c=best_labels)
outliers_plot = data[best_model.labels_ == -1]
ax.scatter(outliers_plot.iloc[:, 0], outliers_plot.iloc[:, 1], c='red')
cwd=self.deployLocation
try:
plot_dir=os.path.normpath(os.path.join(cwd,'output','anomaly_plot'))
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
plotpath=str(plot_dir)+'/'+plot_name
except Exception as e:
self.log.info("<---- plot_dir path error. Error Msg: ---->"+str(e))
if os.path.exists(plotpath):
os.remove(plotpath)
plt.savefig(plotpath)
plt.clf()
plt.cla()
plt.close()
except Exception as e:
self.log.info("<---- dbscan error msg: ---->"+str(e))
self.log.info("<---- dbscan error msg (detailed): ---->"+str(traceback.format_exc()))
return best_model,outliers_final
## Inverse transform fn for categorical data
def inverse_transform(self,df,cat_cols,le_model):
df_new=pd.DataFrame()
df_new.index=df.index
df_reset_index=df.reset_index(drop=True)
for col in cat_cols:
df_reset_index[col] = le_model.inverse_transform(df_reset_index[col].astype(int))
df_reset_index.index=df_new.index
df=df_reset_index
return df
##If data comes without going via aion data profiler, we can use this below preprcessing fn ()
##Preprocess fn for categorical data , not used now.
def preprocessfn_categorical(self,df):
try:
cat_cols=self.cat_cols
preprocessed_df=None
le=preprocessing.LabelEncoder()
self.le_model=le
label_encoded_df = df.copy()
for col in cat_cols:
label_encoded_df[col]=le.fit_transform(label_encoded_df[col])
except Exception as e:
self.log.info("preprocessfn_categorical error traceback."+str(traceback.format_exc()))
return label_encoded_df,cat_cols
## Design pattern: Factory,Adapter. Detect antoencoder object or dbscan object based on input params. The interface can be used for anyother extention. Not created any abstract class.
##Main autoencoder based anomaly detection function, from here, sub modules will be called.
def mainAnomalyDetectionfn(self):
df=self.df
## reading post processed data from target |
->usecase->data directory
# df=self.read_inputdata()
## Below line overwrite incoming df with postprocesseddata
self.log.info("<----------- In autoencoder based anomaly detection algorithm main process module, the incoming datafra |
join(home,'HCLT','AION','PreTrainedModels','ObjectDetection')
pipeline_config = str(modelPath/self.modelDirName/"pipeline.config")
checkPoint = "ckpt-0"
with open(str(modelPath/self.modelDirName/"checkpoint/checkpoint")) as f:
line = f.readline()
checkPoint = line.split(':')[1].strip()[1:-1] #(model_checkpoint_path: "ckpt-301") to ckpt-301
checkPoint = "checkpoint/"+checkPoint
from object_detection.utils import config_util
configs = config_util.get_configs_from_pipeline_file(pipeline_config)
model_config = configs['model']
if detectionModel.lower() == 'ssd':
model_config.ssd.num_classes = num_classes
configs['train_config'].fine_tune_checkpoint_type = "detection"
elif detectionModel.lower() == 'centernet':
model_config.center_net.num_classes = num_classes
configs['train_config'].fine_tune_checkpoint_type = "fine_tune"
elif detectionModel.lower() == 'fasterrcnn':
model_config.faster_rcnn.num_classes = num_classes
configs['train_config'].fine_tune_checkpoint_type = "detection"
else:
raise ValueError("{} Model is not supported for object detection.\\n".format(detectionModel))
if self.gpu:
checkpointPath = str(self.gpuPretrainedModelPath / checkPoint)
else:
checkpointPath = str(modelPath/self.modelDirName/checkPoint)
configs['train_config'].fine_tune_checkpoint = checkpointPath
configs['train_config'].num_steps = n_epoch
configs['train_config'].batch_size = batch_size
configs['train_input_config'].tf_record_input_reader.input_path[:] = [str(self.tfRecordLoc/"train.tfrecord")]
configs['train_input_config'].label_map_path = str(self.labelMapLoc/"label_map.pbtxt")
configs['eval_input_config'].tf_record_input_reader.input_path[:] = [self.dataLocation + "/test.tfrecord"]
configs['eval_input_config'].label_map_path = self.dataLocation + "/label_map.pbtxt"
# Save new pipeline config
new_pipeline_proto = config_util.create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(new_pipeline_proto, self.dataLocation)
def __exportModel(self):
self.log.info('-------> exporting trained Model')
from object_detection.protos import pipeline_pb2
from object_detection import exporter_lib_v2
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(str(self.pipelineLoc/"pipeline.config"), 'r') as f:
text_format.Merge(f.read(), pipeline_config)
text_format.Merge('', pipeline_config)
exporter_lib_v2.export_inference_graph(
'image_tensor', pipeline_config, self.dataLocation,
str(self.modelOutput))
def startObjectDetector(self):
if self.gpu:
self.log.info('-------> Training on the cloud machine')
self.log.info('Status:- |...Remote Machine Training')
with open(self.dataLocation+'\\model.config', 'w')as f:
json.dump( self.modelURLDict, f)
awsGpu = awsGPUTraining(self.serverConfig)
try:
awsGpu.start_instance()
awsGpu.copy_files_to_server(self.dataLocation)
awsGpu.start_executing_notebook()
self.log.info('-------> Downloading trained model file')
tarFile = awsGpu.copy_file_from_server(self.dataLocation)
with tarfile.open(tarFile) as tar:
tar.extractall(self.dataLocation)
awsGpu.stop_server_instance()
except:
awsGpu.stop_server_instance()
raise
extractedPath = Path(self.dataLocation)/Path(tarFile).name.split('.')[0]
filesList = extractedPath.glob('**/*')
for file in filesList:
if file.parent == extractedPath:
if file.name == "export":
shutil.copytree(file, self.modelOutput)
elif file.is_dir():
shutil.copytree(file, Path(self.dataLocation)/file.name)
else:
shutil.copy2(file, self.dataLocation)
shutil.rmtree(extractedPath)
Path(tarFile).unlink()
shutil.copy2(self.dataLocation + "/label_map.pbtxt", str(self.modelOutput))
else:
self.log.info('-------> Training on the local machine')
self.log.info('Status:- |...Local Machine Training')
tf.config.set_soft_device_placement(True)
strategy = tf.compat.v2.distribute.MirroredStrategy()
with strategy.scope():
try:
from object_detection import model_lib_v2
model_lib_v2.train_loop(
pipeline_config_path=str(self.pipelineLoc/"pipeline.config"),
model_dir=str(self.dataLocation))
except Exception:
raise
self.__exportModel()
shutil.copy2(str(self.labelMapLoc/"label_map.pbtxt"), str(self.modelOutput))
def evaluateObjectDetector(self, model_dir, pipeline_config_dir=None, checkpoint_dir=None):
if checkpoint_dir == None:
checkpoint_dir = model_dir
if pipeline_config_dir == None:
pipeline_config_dir = model_dir
self.log.info('-------> Evaluation started')
from object_detection import model_main_tf2
cmd = '"{}" "{}" --model_dir="{}" --pipeline_config_path="{}/pipeline.config" --checkpoint_dir="{}" --eval_timeout=6'.format(sys.executable, model_main_tf2.__file__, model_dir, model_dir, checkpoint_dir)
result = subprocess.run(cmd , capture_output=True, text=True,shell=True)
precisionParam = ['Average Precision', 'Average Recall']
text = result.stdout.split('\\n')
stats = {}
keys = []
try:
for x in text:
for y in precisionParam:
indx = x.find(y)
if indx != -1:
keyValue = x[indx:].split(' = ')
stats[keyValue[0]] = keyValue[1]
keys.append(keyValue[0])
except Exception as e:
raise ValueError("Error in evaluation: " + str(e))
self.log.info('-------> Evaluation statistics:')
self.log.info(stats)
return stats, keys
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from learner.optimizetechnique import OptimizationTq
import warnings
from learner.parameters import parametersDefine
from learner.defaultAlgos import defaultParams
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import time
import logging
import os
import sys
import json
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
from learner.aion_matrix import aion_matrix
from uncertainties.aionUQ import aionUQ
import mlflow
class RegressionModel():
def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,deployLocation):
self.modelList =modelList
self.params =params
self.trainX =trainX
self.trainY =trainY
self.testX = testX
self.testY = testY
self.method =method
self.scoreParam=scoreParam
self.cvSplit=cvSplit
self.numIter=numIter
self.geneticParam=geneticParam
self.log = logging.getLogger('eion')
self.deployLocation = deployLocation
self.uq_x_train = trainX
self.uq_x_test = testX
self.uq_y_train = trainY
self.uq_y_test = testY
self.AlgorithmNames={'Linear Regression':'LinearRegression','Lasso':'Lasso','Ridge':'Ridge','Decision Tree':'DecisionTreeRegressor','Random Forest':'RandomForestRegressor','Extreme Gradient Boosting (XGBoost)':'XGBRegressor','Light Gradient Boosting (LightGBM)': 'LGBMRegressor',
'Categorical Boosting (CatBoost)': 'CatBoostRegressor','Bagging (Ensemble)':'BaggingRegressor','Stacking (Ensemble)':'StackingRegressor','Voting (Ensemble)':'VotingRegressor','Neural Architecture Search':'NAS'}
self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()}
def logMlflow(self, runName, params, metrices, estimator, algoName=None):
with mlflow.start_run(run_name = runName):
for k,v in params.items():
mlflow.log_param(k, v)
for k,v in metrices.items():
mlflow.log_metric(k, v)
if algoName == 'CatBoostRegressor':
mlflow.catboost.log_model(estimator, "model")
else:
mlflow.sklearn.log_model(estimator, "model")
model_uri = mlflow.get_artifact_uri("model")
""" for some dataset evaluate takes more than 90 min, so commenting till some solution is not found
evaluate_data = self.testX.copy()
evaluate_data['target'] = self.testY.copy()
mlflow.evaluate(model_uri, data=evaluate_data, targets='target', model_type="regressor")
del evaluate_data
"""
def regressionModelling(self,modelOrFeatureBased, code_configure):
paramObj=parametersDefine()
bestModel=''
bestParams={}
import sys
bestScore=-sys.float_info.max #bugfix 11656
scoredetails = ''
self.log.info('\\n---------- Regression Model has started ----------')
try:
self.log.info('Status:- |... Search Optimization Method applied: '+self.method)
for modelName in self.modelList:
objClf = aion_matrix()
if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Neural Architecture Search']:
if modelName == 'Bagging (Ensemble)':
from ensemble.ensemble_bagging import ensemble_bagging
ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,0,0)
estimator,modelParams,score,model = ensemble_bagging_obj.ensemble_bagging__regressor(self.trainX,self.trainY,self.testX,self.testY)
if modelName == 'Stacking (Ensemble)':
from ensemble.ensemble_stacking import ensemble_stacking
ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam)
estimator,modelParams,score,model = ensemble_stacking_obj.ensemble_stacking__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList)
if modelName == 'Voting (Ensemble)':
from ensemble.ensemble_voting import ensemble_voting
ensemble_voting_obj = ensemble_voting(self.params[modelName],self.scoreParam)
estimator,modelParams,score,model = ensemble_voting_obj.ensemble_voting__regressor(self.trainX,self.trainY,self.testX,self.testY,self.modelList)
'''
if modelName == 'Neural Architecture Search':
from nas.aionNAS import aionNAS
objNAS = aionNAS('Regression',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation)
estimator,modelParams,score,model=objNAS.nasMain(self.scoreParam)
'''
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":"NA"}'
if self.scoreParam == "r2":
if score > bestScore |
:
bestScore =score
bestModel =model
bestParams=modelParams
bestEstimator=estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore =abs(score)
bestModel =model
bestParams=modelParams
bestEstimator=estimator
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n')
continue
if modelName not in self.params:
continue
paramSpace=self.params[modelName].copy()
algoName = self.AlgorithmNames[modelName]
paramDict =paramObj.paramDefine(paramSpace,self.method)
if self.method == 'bayesopt':
code_configure.add_model(algoName,paramSpace)
else:
paramDictCopy = paramDict
# numpy array is not json serializable
#numpy is already imported but still np.ndarray raise error
import numpy as np
for key,value in paramDictCopy.items():
if isinstance(value, np.ndarray):
paramDictCopy[key] = paramDictCopy[key].tolist()
code_configure.add_model(algoName,paramDictCopy)
if not self.method == 'bayesopt':
paramSize = paramObj.getParamSpaceSize(paramDict)
else:
paramSize = 0
if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=1):
try:
start = time.time()
#function call
defObj = defaultParams(algoName,paramDict,self.scoreParam,0,0,paramSize)
estimator, modelParams, model,score =defObj.startTrainingRegression(self.trainX,self.trainY,self.testX,self.testY)
executionTime = time.time() - start
self.log.info('---------> Total Execution: ' + str(executionTime) + '\\n')
if (scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"' + self.modelToAlgoNames[model] + '","FeatureEngineering":"' + str(
modelOrFeatureBased) + '","Score":' + str(score) + ',"ModelUncertainty":"NA"}'
if self.scoreParam == "r2":
if score > bestScore:
bestScore = score
bestModel = model
bestParams = modelParams
bestEstimator = estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore = abs(score)
bestModel = model
bestParams = modelParams
bestEstimator = estimator
self.log.info('Status:- |... ML Algorithm applied: ' + modelName)
self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str(
round(score, 2)) + '\\n')
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
continue
trainingStatus = 'Success'
if self.method =='grid':
try:
self.log.info("-------> Optimization Method :Grid Search")
self.log.info("-------> Model Name: "+str(modelName))
opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY)
start = time.time()
model,modelParams,score,estimator=opTq.gridSearchOpt()
executionTime=time.time() - start
if not self.testX.empty:
predictedData = estimator.predict(self.testX)
if 'neg_mean_squared_error' in self.scoreParam:
meanssquatederror = mean_squared_error(self.testY,predictedData)
score = meanssquatederror
elif 'neg_root_mean_squared_error' in self.scoreParam:
rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False)
score = rootmeanssquatederror
elif 'mae' in self.scoreParam:
meanabsoluteerror=mean_absolute_error(self.testY,predictedData)
score = meanabsoluteerror
elif 'r2' in self.scoreParam:
r2score=r2_score(self.testY,predictedData)
score = r2score
problemName = estimator.__class__.__name__
runName = algoName + '_' + modelOrFeatureBased
metrices = {}
metrices["score"] = score
try:
self.logMlflow(runName, modelParams, metrices, estimator,algoName)
except Exception as e:
self.log.info('\\n-----> ML flow error!!!.' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
# raise
pass
uq_jsonobject = ''
try:
if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']:
self.log.info('-----> Model Uncertainty Not Supported')
else:
problemName = estimator.__class__.__name__
uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%'))
self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%'))
except:
pass
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}'
self.log.info('---------> Total Execution: '+str(executionTime)+'\\n')
if self.scoreParam == "r2":
if score > bestScore:
bestScore =score
bestModel =model
bestParams=modelParams
bestEstimator=estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore =abs(score)
bestModel =model
bestParams=modelParams
bestEstimator=estimator
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
trainingStatus = 'Error (Exception)'
elif self.method == 'random':
try:
self.log.info("-------> Optimization Method :Random Search")
self.log.info("-------> Model Name: "+str(modelName))
opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY)
start = time.time()
model,modelParams,score,estimator=opTq.randomSearchOpt()
executionTime=time.time() - start
if not self.testX.empty:
predictedData = estimator.predict(self.testX)
if 'neg_mean_squared_error' in self.scoreParam:
meanssquatederror = mean_squared_error(self.testY,predictedData)
score = meanssquatederror
elif 'neg_root_mean_squared_error' in self.scoreParam:
rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False)
score = rootmeanssquatederror
elif 'mae' in self.scoreParam:
meanabsoluteerror=mean_absolute_error(self.testY,predictedData)
score = meanabsoluteerror
elif 'r2' in self.scoreParam:
r2score=r2_score(self.testY,predictedData)
score = r2score
if self.scoreParam == "r2":
if score>bestScore:
bestScore =score
bestModel =model
bestParams=modelParams
bestEstimator=estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore =abs(score)
bestModel =model
bestParams=modelParams
bestEstimator=estimator
problemName = estimator.__class__.__name__
runName = algoName + '_' + modelOrFeatureBased
metrices = {}
metrices["score"] = score
try:
self.logMlflow(runName, modelParams, metrices, estimator,algoName)
except Exception as e:
self.log.info('\\n-----> ML flow error!!!.' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
# raise
pass
uq_jsonobject = ''
try:
if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']:
self.log.info('-----> Model Uncertainty Not Supported')
else:
problemName = estimator.__class__.__name__
uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_json |
object=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%'))
self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%'))
except Exception as e:
print(e)
pass
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}'
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
trainingStatus = 'Error (Exception)'
elif self.method == 'bayesopt':
try:
self.log.info("-------> Optimization Method :BayesOpt Search")
self.log.info("-------> Model Name: "+str(modelName))
opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY)
fun=opTq.f
trials = Trials()
start = time.time()
best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=100, trials=trials)
executionTime=time.time() - start
results = sorted(trials.results, key = lambda x: x['loss'])
bestresult=results[0]
model=bestresult['model']
score=bestresult['score']
modelParams=bestresult['params']
res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in modelParams.items())
modelObj=eval(model+'('+res+')')
estimator = modelObj.fit(self.trainX,self.trainY)
if not self.testX.empty:
predictedData = estimator.predict(self.testX)
if 'neg_mean_squared_error' in self.scoreParam:
meanssquatederror = mean_squared_error(self.testY,predictedData)
score = meanssquatederror
elif 'neg_root_mean_squared_error' in self.scoreParam:
rootmeanssquatederror=mean_squared_error(self.testY,predictedData,squared=False)
score = rootmeanssquatederror
elif 'mae' in self.scoreParam:
meanabsoluteerror=mean_absolute_error(self.testY,predictedData)
score = meanabsoluteerror
elif 'r2' in self.scoreParam:
r2score=r2_score(self.testY,predictedData)
score = r2score
problemName = estimator.__class__.__name__
runName = algoName + '_' + modelOrFeatureBased
metrices = {}
metrices["score"] = score
try:
self.logMlflow(runName, modelParams, metrices, estimator,algoName)
except Exception as e:
self.log.info('\\n-----> ML flow error!!!.' + str(e))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
# raise
pass
if self.scoreParam == "r2":
if score>bestScore:
bestScore =score
bestModel =model
bestParams=modelParams
bestEstimator=estimator
else:
if abs(score) < bestScore or bestScore == -sys.float_info.max:
bestScore =abs(score)
bestModel =model
bestParams=modelParams
bestEstimator=estimator
uq_jsonobject = ''
try:
if model in ['XGBRegressor','LGBMRegressor','CatBoostRegressor']:
self.log.info('-----> Model Uncertainty Not Supported')
else:
problemName = estimator.__class__.__name__
uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation)
total_picp_percentage,total_Uncertainty_percentage,uq_medium,uq_best,scoringCriteria,uq_jsonobject=uqObj.uqMain_BBMRegression(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(total_picp_percentage)+str('%'))
self.log.info("-------> model_uncertainty: "+str(total_Uncertainty_percentage)+str('%'))
except:
pass
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(abs(score))+',"ModelUncertainty":'+str(json.dumps(uq_jsonobject))+'}'
self.log.info('---------> Total Execution: '+str(executionTime)+'\\n')
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
trainingStatus = 'Error (Exception)'
else:
trainingStatus = 'Error (HyperTunning Algo Not Supported)'
pass
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
if trainingStatus.lower() == 'success':
self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n')
else:
self.log.info('Status:- |... Training Error : '+trainingStatus+'\\n')
if bestModel != 'None':
self.log.info('---------- Regression Model End ---------- \\n')
self.log.info('\\n------- Best Model and its parameters -------------')
self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2)))
self.log.info("-------> Best Name: "+str(bestModel))
self.log.info("-------> Best Score: "+str(bestScore))
else:
raise Exception("Sorry, no model is trained")
return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails
except Exception as inst:
self.log.info( '\\n-----> regressionModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
import logging
import numpy as np
from sklearn.preprocessing import binarize
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
class aion_matrix:
def __init__(self):
self.log = logging.getLogger('eion')
def get_print_score(self,matrix):
if 'accuracy' in str(matrix).lower():
return 'Accuracy'
elif 'recall' in str(matrix).lower():
return 'Recall'
elif 'precision' in str(matrix).lower():
return 'Precision'
elif 'f1_score' in str(matrix).lower():
return 'F1_Score'
elif 'roc_auc' in str(matrix).lower():
return 'ROC_AUC'
elif 'mse' in str(matrix).lower() or 'neg_mean_squared_error' in str(matrix).lower():
return 'Mean Squared Error(MSE)'
elif 'rmse' in str(matrix).lower() or 'neg_root_mean_squared_error' in str(matrix).lower():
return 'Root Mean Suared Error(RMSE)'
elif 'mae' in str(matrix).lower() or 'neg_mean_absolute_error' in str(matrix).lower():
return 'Mean Absolute Error (MAE)'
elif 'r2' in str(matrix).lower():
return 'R-Squared(R2)'
else:
return 'Unknown'
def get_score(self,matrix,actual,predict):
if 'accuracy' in str(matrix).lower():
ensemble_score = accuracy_score(actual,predict)
ensemble_score = ensemble_score*100
elif 'recall' in str(matrix).lower():
ensemble_score = recall_score(actual,predict,average='macro')
ensemble_score = ensemble_score*100
elif 'precision' in str(matrix).lower():
ensemble_score = precision_score(actual,predict,average='macro')
ensemble_score = ensemble_score*100
elif 'f1_score' in str(matrix).lower():
ensemble_score = f1_score(actual,predict, average='macro')
ensemble_score = ensemble_score*100
elif 'roc_auc' in str(matrix).lower():
try:
ensemble_score = roc_auc_score(actual,predict,average="macro")
except:
try:
actual = pd.get_dummies(actual)
predict = pd.get_dummies(predict)
ensemble_score = roc_auc_score(actual,predict, average='weighted', multi_class='ovr')
except:
ensemble_score = 0
ensemble_score = ensemble_score*100
elif ('mse' in str(matrix).lower()) or ('neg_mean_squared_error' in str(matrix).lower()):
ensemble_score = mean_squared_error(actual,predict)
elif ('rmse' in str(matrix).lower()) or ('neg_root_mean_squared_error' in str(matrix).lower()):
ensemble_score=mean_squared_error(actual,predict,squared=False)
elif ('mae' in str(matrix).lower()) or ('neg_mean_absolute_error' in str(matrix).lower()):
ensemble_score=mean_absolute_error(actual,predict)
elif 'r2' in str(matrix).lower():
ensemble_score=r2_score(actual,predict)
return round(ensemble_score,2)
def getClassificationPerformaceMatrix(self,le_trainY,predictedData,labelMaps):
setOfyTrue = set(le_trainY)
|
unqClassLst = list(setOfyTrue)
if(str(labelMaps) != '{}'):
inv_mapping_dict = {v: k for k, v in labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
targetnames=[]
for item in unqClassLst2:
indexName.append("act:"+str(item))
columnName.append("pre:"+str(item))
targetnames.append(str(item))
matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName)
#pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_columns',len(targetnames)+2)
self.log.info('-------> Confusion Matrix: ')
self.log.info(matrixconfusion)
pd.reset_option('display.max_columns')
#pd.reset_option('expand_frame_repr')
#self.log.info('-------> Confusion Matrix With Labels: ')
#self.log.info(confusion_matrix(le_trainY,predictedData, labels = unqClassLst))
#print(unqClassLst2)
classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose()
self.log.info('-------> Classification Report: ')
self.log.info(classificationreport)
lb = LabelBinarizer()
lb.fit(le_trainY)
transformTarget= lb.transform(le_trainY)
transformPredict = lb.transform(predictedData)
rocaucscore = roc_auc_score(transformTarget,transformPredict,average="macro")
self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore))
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = classificationreport.to_json(orient='index')
matrix = '"ConfusionMatrix":'+matrixconfusion+',"ClassificationReport":'+classificationreport+',"ROC_AUC_SCORE":'+str(rocaucscore)
return(matrix)
def get_regression_matrix(self,targetData,predictedData):
r2score=r2_score(targetData, predictedData)
self.log.info('-------> R2_score :'+str(r2score))
meanabsoluteerror=(mean_absolute_error(targetData, predictedData))
self.log.info('-------> MAE :'+str(meanabsoluteerror))
meanssquatederror=mean_squared_error(targetData, predictedData)
self.log.info('-------> MSE :'+str(meanssquatederror))
rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False)
self.log.info('-------> RMSE :'+str(rootmeanssquatederror))
targetArray, predictedArray = np.array(targetData), np.array(predictedData)
try:
EPSILON = 1e-10
meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100
except ZeroDivisionError:
meanpercentageerror = 0
self.log.info('-------> MAPE :'+str(meanpercentageerror))
try:
normalised_rmse_percentage = round(((rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100), 4)
except Exception as e:
normalised_rmse_percentage = -1
self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage))
matrix = '"MAE":'+str(meanabsoluteerror)+',"R2Score":'+str(r2score)+',"MSE":'+str(meanssquatederror)+',"MAPE":'+str(meanpercentageerror)+',"RMSE":'+str(rootmeanssquatederror)+',"Normalised RMSE(%)":'+str(normalised_rmse_percentage)
return matrix
def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2):
best_feature_model = 'Model1'
self.log.info('\\n ---------- Summary Start ------------')
if modelType.lower() == "classification":
if(threshold1 == -1 and threshold2 == -1):
if score1> score2:
self.log.info('-------> Best Features: Model1')
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model1'
else:
self.log.info('-------> Best Features: Model2')
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model2'
elif(threshold1 == -1):
self.log.info('-------> Best Features: Model2')
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model2'
elif(threshold1 == -2):
self.log.info('-------> Best Features: Model1')
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model1'
else:
if pscore1 == pscore2:
if rscore1 > rscore2:
self.log.info('-------> Best Features: Model1')
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model1'
else:
self.log.info('-------> Best Features: Model2')
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model2'
elif rscore1 == rscore2:
if pscore1 > pscore2:
self.log.info('-------> Best Features: Model1')
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model1'
else:
self.log.info('-------> Best Features: Model2')
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model2'
elif modelType.lower() == "regression":
if scoreParam == "r2" or scoreParam == "explained_variance":
if score1> score2 :
self.log.info('-------> Best Features: Model1')
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model1'
else:
self.log.info('-------> Best Features: Model2')
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model2'
else:
if score1< score2 :
self.log.info('-------> Best Features: Model1')
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model1'
else:
self.log.info('-------> Best Features: Model2')
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = 'Model2'
self.log.info('---------- Summary End ------------\\n')
return(best_feature_model)
def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName):
thresholdx = -1
for threshold in threshold_range:
predictedData = estimator.predict_proba(testX)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)#bug 12437
p_score = precision_score(testY, predictedData)
#self.log.info('-------------> Precision:'+str(p_score))
r_score = recall_score(testY, predictedData)
#self.log.info('-------------> Rscore:'+str(r_score))
#self.log.info(confusion_matrix(testY, predictedData))
tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel()
if(checkParameter.lower() == 'fp'):
if fp == 0:
if(p_score == 1):
thresholdx = threshold
self.log.info('---------------> Best Threshold:'+str(threshold))
self.log.info('---------------> Best Precision:'+str(p_score))
self.log.info('---------------> Best Recall:'+str(r_score))
self.log.info('---------------> TN:'+str(tn))
self.log.info('---------------> FP:'+str(fp))
self.log.info('---------------> FN:'+str(fn))
self.log.info('---------------> TP:'+str(tp))
break
if(checkParameter.lower() == 'fn'):
if fn == 0:
if(r_score == 1):
thresholdx = threshold
self.log.info('---------------> Best Threshold:'+str(threshold))
self.log.info('---------------> Best Precision:'+str(p_score))
self.log.info('---------------> Best Recall:'+str(r_score))
self.log.info('---------------> TN:'+str(tn))
self.log.info('---------------> FP:'+str(fp))
self.log.info('---------------> FN:'+str(fn))
self.log.info('---------------> TP:'+str(tp))
break
return(thresholdx,p_score,r_score)
def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore):
cmodel = False
if(threshold != -1):
if(bestthreshold == -1):
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif fp0:
if rscore > brscore:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif rscore == brscore:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif fn0:
if pscore > bpscore:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif pscore == bpscore:
if tscore > btscore or btscore == -0 |
xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
else:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
else:
if(bestthreshold == -1):
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
return cmodel,btscore,bestthreshold,brscore,bpscore<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import time
import os
import sys
import logging
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.model_selection import train_test_split
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest
import pickle
from sklearn import metrics
import numpy as np
import pandas as pd
from learner.aion_matrix import aion_matrix
from learner.parameters import parametersDefine
from sklearn.metrics import f1_score
from sklearn import model_selection
from learner.anomalyDetectionAE import anomalyDetectionAE
class anomalyDetector(object):
def __init__(self):
self.log = logging.getLogger('eion')
def startanomalydetector(self,df,target,labelMaps,inlierLabels,learnerJson,model_type,saved_model,anomalyMethod,deployLocation,predicted_data_file,testPercentage,anomalyconfig,datetimeFeature,mv_featurebased_ad_status):
try:
self.log.info("startanomalydetector.... \\n")
from io import StringIO
buf = StringIO()
df.info(buf=buf)
#self.log.info(buf.getvalue())
self.log.info("User data info : \\n"+str(buf.getvalue()))
try:
df[datetimeFeature] = pd.to_datetime(df[datetimeFeature])
df.set_index(datetimeFeature, inplace=True)
#If still the datetime column exist in feature list, drop it. Because we already made datetime feature as index (datetimeindex)
df.drop(datetimeFeature,axis=1,inplace=True)
except Exception as e:
pass
ae_df=df
paramObj=parametersDefine()
anomalyMethod=anomalyMethod
inlierLabels=inlierLabels
anomalyDetectionType=""
inlierLabelList=inlierLabels.split(",")
self.log.info("<---- inlierLabels ---->"+inlierLabels)
self.log.info("<---- anomalyMethod ---->"+str(anomalyMethod))
if target != "":
self.log.info('Status:- |... AnomalyDetection: Supervised')
self.log.info("One class based anomaly Detection by relabeling data to fit one class models")
combinedString=""
dfStr=""
anomalyDetectionType="supervised"
if not anomalyMethod.lower() == "autoencoder": ##Added for auto encoder
self.log.info("startanomalydetector: df: \\n"+str(df)) #task 12627
if labelMaps == {}:
for inlierVal in inlierLabelList:
inlier=inlierVal
dfStr = "x ==" + inlier + " or "
combinedString+= dfStr
func= combinedString.strip(" or ")
else:
for inlierVal in inlierLabelList:
try:
if inlierVal.isnumeric():
inlierVal = int(inlierVal)
# inlier=str(labelMaps[inlierVal]) ##Wrongly assigned inlier values to labelMaps dict key.
inlier = str(inlierVal)
dfStr = "x ==" + str(inlier) + " or "
combinedString+= dfStr
except Exception as e:
raise Exception(e)
func= combinedString.strip(" or ")
labelMaps={'InlierLabel':1,'NonInlierLabel':-1}
targetData=df[target]
df['anomaly'] = df[target].apply(lambda x: 1 if eval(func) else -1 )
anomtargetData=df['anomaly']
self.log.info("dataframe after relabeling the data")
self.log.info(df.head())
self.log.info("target column value counts with inliers and outliers")
self.log.info(df['anomaly'].value_counts())
df.drop([target, "anomaly"], axis=1, inplace=True)
outliers = anomtargetData[anomtargetData == -1]
self.log.info("outliers in data")
self.log.info(outliers.shape[0])
self.log.info("outlier fraction")
self.log.info(outliers.shape[0]/targetData.shape[0])
if int(testPercentage) != 0:
testSize= testPercentage/100
xtrain, xtest, ytrain, ytest = train_test_split(df, anomtargetData, test_size = testSize)
else:
xtrain =df
xtest =df
ytrain=anomtargetData
ytest =anomtargetData
if anomalyMethod.lower() == "isolationforest":
modelName="isolationforest"
paramSpace=anomalyconfig['modelParams']['IsolationForest']
paramDict =paramObj.paramDefine(paramSpace,'random')
ftwo_scorer = make_scorer(accuracy_score)
isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=ftwo_scorer, n_iter=10)
mod = isolation_forest.fit(xtrain,ytrain)
model = mod.best_estimator_
elif anomalyMethod.lower() == "oneclasssvm":
modelName="oneClassSVM"
fthree_scorer = make_scorer(accuracy_score)
paramSpace=anomalyconfig['modelParams']['oneclassSVM']
paramDict =paramObj.paramDefine(paramSpace,'random')
one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=fthree_scorer, n_iter=10)
mod = one_class.fit(xtrain,ytrain)
model = mod.best_estimator_
elif anomalyMethod.lower() == "autoencoder":
modelName='autoencoder'
testSize=testPercentage/100
self.log.info("Aion Autoencoder anomaly detection started..")
paramSpace=anomalyconfig['modelParams']['AutoEncoder']
adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status)
model=adae_obj.mainAnomalyDetectionfn()
self.log.info("Aion Autoencoder anomaly detection completed..")
else:
self.log.info("IsolationForest, OneClassSVM and autoencoder are supported models")
modelName = ""
model = ""
else:
self.log.info('Status:- |... AnomalyDetection: Unsupervised')
self.log.info("unsupervised anomaly detection")
anomalyDetectionType="unsupervised"
model=None
xtrain =df
xtest = df
ytrain = pd.DataFrame()
if anomalyMethod.lower() == "isolationforest":
paramSpace=anomalyconfig['modelParams']['IsolationForest']
paramDict =paramObj.paramDefine(paramSpace,'random')
modelName="isolationforest"
def scorer_f(estimator, X):
return np.mean(estimator.score_samples(X))
isolation_forest = model_selection.RandomizedSearchCV(IsolationForest(), paramDict, scoring=scorer_f, n_iter=10)
mod = isolation_forest.fit(xtrain)
self.log.info('---------> Best Param: '+str(mod.best_params_))
model = mod.best_estimator_
elif anomalyMethod.lower() == "oneclasssvm":
paramSpace=anomalyconfig['modelParams']['oneclassSVM']
paramDict =paramObj.paramDefine(paramSpace,'random')
modelName="oneClassSVM"
def scorer_f1(estimator, X):
return np.mean(estimator.score_samples(X))
one_class = model_selection.RandomizedSearchCV(OneClassSVM(), paramDict, scoring=scorer_f1, n_iter=10)
model = one_class.fit(xtrain)
self.log.info('---------> Best Param: '+str(model.best_params_))
model = model.best_estimator_
elif anomalyMethod.lower() == "autoencoder":
ae_df.drop(ae_df.filter(regex="Unname"),axis=1, inplace=True)
modelName='autoencoder'
testSize= testPercentage/100
self.log.info("Aion Autoencoder anomaly detection started..")
paramSpace=anomalyconfig['modelParams']['AutoEncoder']
adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status)
model=adae_obj.mainAnomalyDetectionfn()
self.log.info("Aion Autoencoder anomaly detection completed..")
elif anomalyMethod.lower() == "dbscan":
testSize=testPercentage/100
ae_df.drop(ae_df.filter(regex="Unname"),axis=1, inplace=True)
modelName='dbscan'
self.log.info("Aion DBScan anomaly detection started..")
paramSpace=anomalyconfig['modelParams']['DBScan']
adae_obj=anomalyDetectionAE(ae_df,paramSpace,deployLocation,target,anomalyMethod,testSize,datetimeFeature,mv_featurebased_ad_status)
model=adae_obj.mainAnomalyDetectionfn()
self.log.info("Aion DBScan anomaly detection completed..")
else:
self.log.info("IsolationForest,OneClassSVM,autoencoder and DBScan are supported models")
modelName = ""
model = ""
self.log.info('Status:- |... AnomalyDetection Algorithm applied: '+modelName)
if (anomalyMethod.lower() == "autoencoder" or anomalyMethod.lower() == "dbscan"):
if (anomalyMethod.lower() == "autoencoder"):
## Since autoencoder is implemented using tf.keras, saving the model in h5 format. If we save it in .sav format will give 'TensorSliceReader constructor' error.
saved_model=saved_model.replace('.sav','')
filename = os.path.join(deployLocation,'model',saved_model)
model.save(filename,save_format="tf")
elif (anomalyMethod.lower() == "dbscan"):
filename = os.path.join(deployLocation,'model',saved_model)
pickle.dump(model, open(filename, 'wb'))
matrix=''
trainmatrix=''
accuracy = 0
else:
filename = os.path.join(deployLocation,'model',saved_model)
pickle.dump(model, open(filename, 'wb'))
loaded_model=pickle.load(open(filename, 'rb'))
pred_testData=loaded_model.predict(xtest)
pred_trainData = loaded_model.predict(xtrain)
pred_trainScore = loaded_model.decision_function(xtrain)
self.log.info("<--- predicted values of testdata --->")
self.log.info(pred_testData)
if anomalyDetectionType == "supervised" :
df_predicted = pd.DataFrame()
df_predicted['actual'] = ytest
df_predicted['predict'] = pred_testData
df_predicted.to_csv(predicted_data_file)
preds = pred_testData
targs = ytest
unique_elements_ytest, counts_elements_ytest = np.unique(targs, return_counts=True)
unique_elements_pred, counts_elements_pred = np.unique(preds, return_counts=True)
'''
for i in range(0,len(unique_elements_ytest)):
self.log.info("unique value :" +str(unique_elements_ytest[i]) + " count in input testdata: " + str(counts_elements_ytest[i]) +" count in predicted testdata: " + str(counts_elements_pred[i]))
self.log.info("\\n")
'''
self.log.info("\\n======= Performance matrix on Test Data ======")
aion_matrixobj = aion_matrix()
self.log.info("-------> Test Matrix: ")
matrix = aion_matrixobj.getClassificationPerformaceMatrix(targs,preds,labelMaps)
self.log.info("-------> Train Matrix: ")
trainmatrix = aion_matrixobj.getClassificationPerformaceMatrix(ytrain,pred_trainData,labelMaps)
#self.log.info("-------> Confusion Matrix: ")
self.log.info(metrics.confusion_matrix(targs,preds))
self.log.info("-------> accuracy for inliers: ")
accuracy = metrics.accuracy_score(targs, preds)
self.log.info(metrics.accuracy_score(targs, preds))
self.log.info("-------> precision for inliers --->")
self.log.info(metrics.precision_score(targs, preds))
self.log.info("-------> recall for inliers ---> ")
self.log.info(metrics.recall_score(targs, preds))
self.log.info("-------> f1 for inliers--->")
self.log.info(metrics.f1_score(targs, preds))
self.log.info("-------> area under curve (auc) for inliers --->")
self.log.info(metrics.roc_auc_score(targs, preds))
self.log.info("-------> precision for outliers ---> |
")
self.log.info(1-metrics.precision_score(targs, preds))
self.log.info("-------> recall for outliers ---> ")
self.log.info(1-metrics.recall_score(targs, preds))
self.log.info("-------> f1 for outliers--->")
self.log.info(1-metrics.f1_score(targs, preds))
self.log.info("======= Performance matrix on Test Data End ======\\n")
else:
df_predicted = xtrain
df_predicted['predict'] = pred_trainData
df_predicted['score'] = pred_trainScore
df_predicted.to_csv(predicted_data_file, index=False)
matrix = ''
trainmatrix = ''
accuracy = 'NA'
return modelName,model,matrix,trainmatrix,accuracy,labelMaps
except Exception as inst:
self.log.info("Error: anomalyDetector failed "+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier
from sklearn.linear_model import SGDRegressor, PassiveAggressiveRegressor
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.svm import SVR
import xgboost as xgb
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings('ignore')
import time
import logging
import sys,os
class StreamToLogger(object):
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, 'Model:- Iteration:: '+line.rstrip())
class OptimizationTq():
def __init__(self,modelName,tuneParams,cvSplit,scoreParam,nIter,trainX,trainY,geneticParam=None):
self.data = None
self.model=modelName
self.params =tuneParams
self.cvSplit=cvSplit
self.scoreParam=scoreParam
self.trainX =trainX
self.trainY = trainY
self.geneticParam=geneticParam if geneticParam else {}
self.nIter =nIter
self.count =0
self.best =0
self.log = logging.getLogger('eion')
def gridSearchOpt(self):
try:
sl = StreamToLogger(self.log, logging.INFO)
oldStdout = sys.stdout
sys.stdout = sl
self.log.info('Model:-Model Name:: '+str(self.model))
modelObj=eval(self.model+'()')
gridOp = GridSearchCV(modelObj, param_grid=self.params,scoring=self.scoreParam, cv=self.cvSplit,verbose=10)
gridFit=gridOp.fit(self.trainX,self.trainY)
self.log.info('Model:-Model Name:: '+str(self.model))
self.log.info('Model:-ScoringType:: '+str(gridFit.scorer_))
self.log.info('Model:-Best Param:: '+str(gridFit.best_params_))
self.log.info('Model:-Validation Score:: '+str(gridFit.best_score_))
self.log.info('Model:-CV Result:: '+str(gridFit.cv_results_))
self.log.info('Model:-Best Estimator:: '+str(gridFit.best_estimator_))
sys.stdout = oldStdout
return self.model,gridFit.best_params_,gridFit.best_score_,gridFit.best_estimator_
except Exception as inst:
self.log.info("gridSearchOpt failed ==>"+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def randomSearchOpt(self):
try:
sl = StreamToLogger(self.log, logging.INFO)
oldStdout = sys.stdout
sys.stdout = sl
self.log.info('Model:-Model Name:: '+str(self.model))
modelObj=eval(self.model+'()')
randomOp = RandomizedSearchCV(modelObj, param_distributions=self.params,scoring=self.scoreParam,n_iter=self.nIter,cv=self.cvSplit,verbose=10)
randomFit=randomOp.fit(self.trainX,self.trainY)
self.log.info('Model:-Model Name:: '+str(self.model))
self.log.info('Model:-ScoringType:: '+str(randomFit.scorer_))
self.log.info('Model:-Best Param:: '+str(randomFit.best_params_))
self.log.info('Model:-Validation Score:: '+str(randomFit.best_score_))
self.log.info('Model:-CV Result:: '+str(randomFit.cv_results_))
self.log.info('Model:-Best Estimator:: '+str(randomFit.best_estimator_))
sys.stdout = oldStdout
return self.model,randomFit.best_params_,randomFit.best_score_,randomFit.best_estimator_
except Exception as inst:
self.log.info("RandomsearchOptimization failed ==>"+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def bayesianOpt(self,params):
modelObj=eval(self.model+'(**'+str(params)+')')
score=cross_val_score(modelObj, self.trainX, self.trainY,scoring=self.scoreParam,cv=self.cvSplit)
return score.mean()
def f(self,params):
best=self.best
count=self.count
parameters=params
count += 1
classObj=OptimizationTq(self.model,self.params,self.cvSplit,self.scoreParam,self.nIter,self.trainX,self.trainY)
acc = classObj.bayesianOpt(parameters.copy())
return {'loss':-acc,'score': acc, 'status': STATUS_OK,'model' :self.model,'params': params}
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import warnings
warnings.filterwarnings('ignore')
import logging
import sklearn
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from random import sample
from numpy.random import uniform
import numpy as np
import math
import pickle
import os
from math import isnan
from sklearn.preprocessing import binarize
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import davies_bouldin_score
from utils.file_ops import save_csv_compressed
from sklearn.metrics import silhouette_score
try:
from sklearn.metrics import calinski_harabasz_score as calinski_harabaz_score
except:
from sklearn.metrics import calinski_harabaz_score
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import brier_score_loss
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.decomposition import LatentDirichletAllocation
from learner.classificationModel import ClassifierModel
from learner.regressionModel import RegressionModel
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error,make_scorer
from sklearn.metrics import mean_squared_error
from sklearn.metrics import RocCurveDisplay, auc, roc_curve
import matplotlib.pyplot as plt
#print("1")
#from alibi.explainers import ALE,plot_ale
#pd.set_option('display.max_columns', 10)
#pd.set_option('display.width', None)
def get_prediction( model, loaded_model, xtrain, xtest=None):
train_prob = None
test_prob = None
predictedData = []
if xtest.empty:
is_xtest = False
else:
is_xtest = True
if model.lower() == 'lda':
if is_xtest:
predictedData = loaded_model.transform(xtest).argmax(axis=1)
trainPredictedData = loaded_model.transform(xtrain)
elif model.lower() == 'dbscan':
if is_xtest:
predictedData = loaded_model.fit_predict(xtest)
predictedData = loaded_model.labels_
trainPredictedData = loaded_model.fit_predict(xtrain)
trainPredictedData = loaded_model.labels_
elif model == 'Neural Architecture Search':
train_prob = estimator.predict(xtrain)
if train_prob.shape[1] == 1:
train_prob = np.hstack(( 1-train_prob, train_prob))
trainPredictedData = np.argmax(train_prob, axis=1)
if is_xtest:
test_prob = estimator.predict(xtest)
if test_prob.shape[1] == 1:
test_prob = np.hstack(( 1-test_prob, test_prob))
predictedData = np.argmax(test_prob, axis=1)
elif model in ['Deep Q Network','Dueling Deep Q Network']:
from tf_agents.trajectories import time_step
from tensorflow import constant
q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False)
train_prob = q.numpy()
if train_prob.shape[1] == 1:
train_prob = np.hstack(( 1-train_prob, train_prob))
trainPredictedData = np.argmax(train_prob, axis=1)
predictedData = np.argmax(test_prob, axis=1)
if is_xtest:
q,_ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False)
test_prob = q.numpy()
if test_prob.shape[1] == 1:
test_prob = np.hstack(( 1-test_prob, test_prob))
predictedData = np.argmax(test_prob, axis=1)
else:
if is_xtest:
predictedData = loaded_model.predict(xtest)
trainPredictedData = loaded_model.predict(xtrain)
if hasattr(loaded_model, 'predict_proba'):
train_prob = loaded_model.predict_proba(xtrain)
if is_xtest:
test_prob = loaded_model.predict_proba(xtest)
return trainPredictedData, predictedData, train_prob, test_prob
class machinelearning(object):
def __init__(self):
self.features=[]
self.log = logging.getLogger('eion')
self.plots = []
def cluster_tendency(self,featureData):
self.log.info("\\n------------- Cluster Tendency Check -------------")
d = featureData.shape[1]
n = len(featureData)
m = int(0.1 * n)
nbrs = NearestNeighbors(n_neighbors=1).fit(featureData.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(featureData,axis=0),np.amax |
(featureData,axis=0),d).reshape(1, -1), 2, return_distance=True)
ujd.append(u_dist[0][1])
if isinstance(featureData.iloc[rand_X[j]].values, pd.core.arrays.sparse.array.SparseArray):
featureData_reshaped = np.asarray(featureData.iloc[rand_X[j]].values).reshape(1, -1)
else:
featureData_reshaped = featureData.iloc[rand_X[j]].values.reshape(1, -1)
w_dist, _ = nbrs.kneighbors(featureData_reshaped, 2, return_distance=True)
wjd.append(w_dist[0][1])
try:
clusetTendency = sum(ujd) / (sum(ujd) + sum(wjd))
except:
clusetTendency = 0
if isnan(clusetTendency):
clusetTendency = 0
self.log.info("-------> Cluster Tendency value using Hopkins Statistic: "+str(clusetTendency))
self.log.info("------------- Cluster Tendency Check End-------------\\n")
return (clusetTendency)
def calculateNumberofCluster(self,featureData):
self.log.info("\\n------------- Calculate Number of Cluster -------------")
Sum_of_squared_distances = []
K = range(1,15)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(featureData)
Sum_of_squared_distances.append(km.inertia_)
x1, y1 = 1, Sum_of_squared_distances[0]
x2, y2 = 15, Sum_of_squared_distances[len(Sum_of_squared_distances)-1]
distances = []
for inertia in range(len(Sum_of_squared_distances)):
x0 = inertia+2
y0 = Sum_of_squared_distances[inertia]
numerator = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)
denominator = math.sqrt((y2 - y1)**2 + (x2 - x1)**2)
distances.append(numerator/denominator)
n_clusters=distances.index(max(distances)) + 2
self.log.info("-------> n_clusters: "+str(n_clusters-1))
self.log.info("------------- Calculate Number of Cluster End-------------\\n")
return(n_clusters-1)
def getclusterMatrix(self,featureData,targetData):
silhouetteAvg = silhouette_score(featureData,targetData)
self.log.info("-------> SilHouette_Avg: "+str(silhouetteAvg))
daviesBouldinScore=davies_bouldin_score(featureData, targetData)
self.log.info("-------> DaviesBouldinScore: "+str(daviesBouldinScore))
calinskiHarabazScore=calinski_harabaz_score(featureData,targetData)
self.log.info("-------> CalinskiHarabazScore: "+str(calinskiHarabazScore))
matrix = '"SilHouette_Avg":'+str(silhouetteAvg)+',"DaviesBouldinScore":'+str(daviesBouldinScore)+',"CalinskiHarabazScore":'+str(calinskiHarabazScore)
return(matrix)
def get_regression_matrix(self,targetData,predictedData):
try:
r2score=r2_score(targetData, predictedData)
self.log.info('-------> R2_score :'+str(r2score))
except Exception as e:
self.log.info('\\n--------- r2_score ',str(e))
r2score = 0
try:
meanabsoluteerror=(mean_absolute_error(targetData, predictedData))
self.log.info('-------> MAE :'+str(meanabsoluteerror))
except Exception as e:
self.log.info('\\n---------Error: meanabsoluteerror ',str(e))
meanabsoluteerror = 0
try:
meanssquatederror=mean_squared_error(targetData, predictedData)
self.log.info('-------> MSE :'+str(meanssquatederror))
except Exception as e:
self.log.info('\\n---------Error: meanssquatederror ',str(e))
meanssquatederror = 0
try:
rootmeanssquatederror=mean_squared_error(targetData, predictedData,squared=False)
self.log.info('-------> RMSE :'+str(rootmeanssquatederror))
except Exception as e:
self.log.info('\\n---------Error: rootmeanssquatederror ',str(e))
rootmeanssquatederror = 0
try:
normalised_rmse_percentage = (rootmeanssquatederror/ ( np.max(targetData) - np.min(targetData) )) * 100
self.log.info('-------> Normalised RMSE percentage :'+str(normalised_rmse_percentage))
except Exception as e:
self.log.info('\\n---------Error: Normalised RMSE percentage ',str(e))
normalised_rmse_percentage = -1
try:
targetArray, predictedArray = np.array(targetData), np.array(predictedData)
try:
EPSILON = 1e-10
meanpercentageerror=np.mean(np.abs((targetArray - predictedArray) / (targetArray+EPSILON)))*100
except ZeroDivisionError:
meanpercentageerror = 0
self.log.info('-------> MAPE :'+str(meanpercentageerror))
except Exception as e:
self.log.info('\\n---------Error: meanpercentageerror ',str(e))
meanpercentageerror = 0
matrix = '"MAE":'+str(round(meanabsoluteerror,2))+',"R2Score":'+str(round(r2score,2))+',"MSE":'+str(round(meanssquatederror,2))+',"MAPE":'+str(round(meanpercentageerror,2))+',"RMSE":'+str(round(rootmeanssquatederror,2))+',"Normalised RMSE(%)":'+str(round(normalised_rmse_percentage,2))
return matrix
def getClassificationPerformaceMatrix(self,le_trainY,predictedData,prob,labelMaps):
setOfyTrue = set(le_trainY)
unqClassLst = list(setOfyTrue)
if len(unqClassLst) <= 20:
if str(labelMaps) != '{}':
inv_mapping_dict = {v: k for k, v in labelMaps.items()}
unqClassLst2 = (pd.Series(unqClassLst)).map(inv_mapping_dict)
unqClassLst2 = list(unqClassLst2)
else:
unqClassLst2 = unqClassLst
indexName = []
columnName = []
targetnames=[]
for item in unqClassLst2:
indexName.append("act:"+str(item))
columnName.append("pre:"+str(item))
targetnames.append(str(item))
matrixconfusion = pd.DataFrame(confusion_matrix(le_trainY,predictedData, labels = unqClassLst),index = indexName, columns = columnName)
pd.set_option('display.max_columns',len(targetnames)+2)
self.log.info('-------> Confusion Matrix: ')
self.log.info(matrixconfusion)
pd.reset_option('display.max_columns')
classificationreport = pd.DataFrame(classification_report(le_trainY, predictedData, labels = unqClassLst,target_names=targetnames,output_dict=True)).transpose()
self.log.info('-------> Classification Report: ')
self.log.info(classificationreport)
matrixconfusion = matrixconfusion.to_json(orient='index')
classificationreport = classificationreport.to_json(orient='index')
else: #bugid: 14540
self.log.info('-------> As the number of class is more than 20, skipping the creation of confusion_matrix and classification Report')
return ""
lb = LabelBinarizer()
lb.fit(le_trainY)
transformTarget= lb.transform(le_trainY)
if transformTarget.shape[-1] == 1:
transformTarget = le_trainY
prob = np.delete( prob, 0, 1)
rocaucscore = roc_auc_score(transformTarget,prob,average="macro")
brier_score = None
mcc_score = matthews_corrcoef(le_trainY,predictedData)
if len(unqClassLst) > 2:
brier_score = np.mean(np.sum(np.square(prob - transformTarget), axis=1))
else:
brier_score = brier_score_loss(transformTarget,prob)
self.log.info('-------> ROC AUC SCORE :'+str(rocaucscore))
self.log.info(f'-------> Matthews correlation coefficient SCORE : {mcc_score}')
self.log.info(f'-------> BRIER SCORE : {brier_score}')
matrix = f'"ConfusionMatrix": {matrixconfusion},"ClassificationReport": {classificationreport},"ROC_AUC_SCORE": {rocaucscore},"MCC_SCORE": {mcc_score},"BRIER_SCORE": {brier_score}'
return(matrix)
def split_into_train_test_data(self,featureData,targetData,testPercentage,modelType='classification'):
'''
if cvSplit == None:
'''
self.log.info('\\n-------------- Test Train Split ----------------')
if testPercentage == 0:
xtrain=featureData
ytrain=targetData
xtest=featureData
ytest=targetData
else:
testSize=testPercentage/100
if modelType == 'regression':
self.log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True)
else:
try:
self.log.info('-------> Split Type: Stratify Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,stratify=targetData,test_size=testSize,shuffle=True)
except:
self.log.info('-------> Split Type: Random Split')
xtrain,xtest,ytrain,ytest=train_test_split(featureData,targetData,test_size=testSize,shuffle=True)
self.log.info('Status:- !... Train / test split done: '+str(100-testPercentage)+'% train,'+str(testPercentage)+'% test')
self.log.info('-------> Train Data Shape: '+str(xtrain.shape)+' ---------->')
self.log.info('-------> Test Data Shape: '+str(xtest.shape)+' ---------->')
self.log.info('-------------- Test Train Split End ----------------\\n')
'''
else:
xtrain=featureData
ytrain=targetData
xtest=featureData
ytest=targetData
'''
return(xtrain,ytrain,xtest,ytest)
def checkForClassBalancing(self,targetData):
imbalancedCount=0
valueCount=targetData.value_counts()
self.log.info("---------- Checking for Class Imbalance on Train Data---------")
self.log.info("-------> Categories and Count:")
self.log.info(valueCount)
categoryList=valueCount.keys().tolist()
categoryCountList=valueCount.tolist()
for i in range(0,len(categoryCountList)):
if float(categoryCountList[i])<=float(0.5*max(categoryCountList)):
self.log.info("-------> Found Imbalanced class: '"+str(categoryList[i])+"' Count: "+str(categoryCountList[i]))
imbalancedCount=imbalancedCount+1
if imbalancedCount == 0:
self.log.info("-------> Status: Balanced")
self.log.info('Status:- |... Check for Data balancing done: Balanced')
else:
self.log.info("-------> Status: Unbalanced")
self.log.info('Status:- |... Check for Data balancing done: Unbalanced')
self.log.info("---------- Checking for Class Imbalance on Train Data End---------")
return(imbalancedCount)
def ExecuteClassBalancing(self,featureData,targetData,balancingMethod):
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import TomekLinks
from collections import Counter
self.log.info('\\n------------ Balancing Start --------------')
if balancingMethod.lower() == "oversample":
self.log.info("-------> Method: SMOTE OverSampling Technique")
k=1
seed=100
try:
oversample = SMOTE(sampling_strategy='auto', k_neighbors=k, random_state=seed)
balfeatureData, baltargetData = oversample.fit_resample(featureData, targetData)
self.log.info(baltargetData.value_counts())
except Exception as inst:
self.log.info("\\n!!!!!!!!! OverSampling Fails "+str(inst)+" !!!!!!!!!!!!!!\\n")
balfeatureData = featureData
baltargetData = targetData
elif balancingMethod.lower() == "undersample":
self.log.info("-------> Method: Tomelinks UnderSampling Technique")
tLinks = TomekLinks()
|
balfeatureData, baltargetData= tLinks.fit_resample(featureData, targetData)
#Added for checking balancing act by the algorithm.
counter = Counter(baltargetData)
self.log.info("Class counter:\\t"+str(baltargetData.value_counts()))
max_class = max(counter,key=counter.get)
max_value = max(counter.values())
self.log.info("Max samples: "+str(max_value)+ " in the class: "+str(max_class))
for k,v in counter.items():
if v < (max_value*98/100):
self.log.info("Undersampling is not able to do perfect data balancing.")
self.log.info("The method is used to identify the desired samples of data from the majority class that is having the lowest Euclidean distance with the minority class data. Downsampling may not balance the class after applying this method.\\n")
self.log.info(baltargetData.value_counts())
else:
balfeatureData = featureData
baltargetData = targetData
self.log.info("-------> Method: Balancing Not Applied")
self.log.info('-------> Memory Usage by Training DataFrame After Class Balancing '+str(featureData.memory_usage(deep=True).sum()))
self.log.info('Status:- |... Data balancing done: '+str(balancingMethod))
self.log.info('------------ Balancing End --------------\\n')
return(balfeatureData,baltargetData)
def combine_text_features(self,dataFrame,dataColumns):
column_merge_flag = False
merge_columns = []
if(len(dataColumns) > 1):
dataFrame['combined'] = dataFrame[dataColumns].apply(lambda row: ' '.join(row.values.astype(str)), axis=1)
merge_columns = dataColumns
features = ['combined']
column_merge_flag = True
self.log.info("After Text Concatenation")
self.log.info(dataFrame['combined'].head(10))
self.log.info("List of Combined Columns ---> "+ str(dataColumns) +"\\n")
else:
features = dataColumns
return(dataFrame,features,column_merge_flag,merge_columns)
'''
def create_preprocessing_pipeline(self,X):
textDataProfilerObj=textDataProfiler()
tfidfVector = TfidfVectorizer(tokenizer = textDataProfilerObj.textTokenizer)
pipe = Pipeline([("cleaner", TextCleaner()),('vectorizer', tfidfVector)])
vectors=pipe.fit(X)
transformedVector=pipe.transform(X)
return(pipe,transformedVector)
'''
def get_topics(self, model, feature_names, no_top_words):
topicDict = {}
for topic_idx, topic in enumerate(model.components_):
wordDict = {}
topicProb = [(feature_names[i],topic[i]/topic.sum()) for i in topic.argsort()[:-no_top_words - 1:-1]]
for word, prob in topicProb:
if word.endswith('_vect'):
word = word[:-len('_vect')]
wordDict[word] = prob
topicDict[ topic_idx] = wordDict
return topicDict
def transform_target_feature(self,dataFrame,targetColumn):
targetDataType=dataFrame[targetColumn].dtypes
pandasNumericDtypes=['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
labelMapping= {}
if targetDataType not in pandasNumericDtypes:
le = LabelEncoder()
le.fit(dataFrame[targetColumn])
le_trainY = le.transform(dataFrame[targetColumn])
labelMapping = dict(zip(le.classes_, le.transform(le.classes_)))
self.log.info(" \\n encoded Values of predicator column ===>"+str(labelMapping))
else:
le_trainY = dataFrame[targetColumn]
return le_trainY,labelMapping
def setScoreParams(self,scoreParam,modelType,categoryCountList):
if modelType == 'classification' or modelType == 'TextClassification':
allowedmatrix = ['accuracy','recall','f1_score','precision','roc_auc']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'accuracy'
elif scoreParam.lower() == 'none':
scoreParam = 'accuracy'
elif scoreParam.lower() == "recall":
if len(categoryCountList) > 2:
scoreParam = make_scorer(sklearn.metrics.recall_score, average = 'weighted')
else:
scoreParam = make_scorer(sklearn.metrics.recall_score)
elif scoreParam.lower() == "precision" :
if len(categoryCountList) > 2:
scoreParam = make_scorer(sklearn.metrics.precision_score, average = 'weighted')
else:
scoreParam = make_scorer(sklearn.metrics.precision_score)
elif scoreParam.lower() == "f1_score" :
if len(categoryCountList) > 2:
scoreParam = make_scorer(sklearn.metrics.f1_score, average = 'weighted')
else:
scoreParam = make_scorer(sklearn.metrics.f1_score)
elif scoreParam.lower() == "roc_auc" :
if len(categoryCountList) > 2:
scoreParam = make_scorer(sklearn.metrics.roc_auc_score,needs_proba=True,multi_class='ovr',average='weighted')
else:
scoreParam = make_scorer(sklearn.metrics.roc_auc_score)
else:
scoreParam = scoreParam
else:
allowedmatrix = ['mse','r2','rmse','mae']
if(scoreParam.lower() not in allowedmatrix):
scoreParam = 'neg_mean_squared_error'
elif scoreParam.lower() == 'none':
scoreParam = 'neg_mean_squared_error'
elif scoreParam.lower() == 'mse':
scoreParam = 'neg_mean_squared_error'
elif scoreParam.lower() == 'rmse':
#scoreParam = make_scorer(sklearn.metrics.mean_squared_error, squared = False)
scoreParam='neg_root_mean_squared_error'
elif scoreParam.lower() == 'mae':
scoreParam = 'neg_mean_absolute_error'
elif scoreParam.lower() == 'r2':
scoreParam = 'r2'
else:
scoreParam = scoreParam
#self.log.info('Status:- !... Scoring parameters selected')
self.log.info("-------> Scoring parameter: "+str(scoreParam))
return(scoreParam)
def getbestfeatureModel(self,modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,featuresset1,featureset2):
best_feature_model = featuresset1
self.log.info('\\n ---------- ML Summary ------------')
if modelType.lower() == "classification":
if(threshold1 == -1 and threshold2 == -1):
if score1> score2:
self.log.info('-------> Best Features: '+str(featuresset1))
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featuresset1
else:
self.log.info('-------> Best Features:'+str(featureset2))
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featureset2
elif(threshold1 == -1):
self.log.info('-------> Best Features: '+str(featureset2))
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featureset2
elif(threshold1 == -2):
self.log.info('-------> Best Features: '+str(featuresset1))
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model =featuresset1
else:
if pscore1 == pscore2:
if rscore1 > rscore2:
self.log.info('-------> Best Features: '+str(featuresset1))
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featuresset1
else:
self.log.info('-------> Best Features: '+str(featureset2))
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featureset2
elif rscore1 == rscore2:
if pscore1 > pscore2:
self.log.info('-------> Best Features: '+str(featuresset1))
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featuresset1
else:
self.log.info('-------> Best Features: '+str(featureset2))
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featureset2
elif modelType.lower() == "regression":
if scoreParam == "r2" or scoreParam == "explained_variance":
if score1> score2 :
self.log.info('-------> Best Features: '+str(featuresset1))
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featuresset1
else:
self.log.info('-------> Best Features: '+str(featureset2))
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featureset2
else:
if score1< score2 :
self.log.info('-------> Best Features: '+str(featuresset1))
self.log.info('-------> Best Model: '+str(model1))
self.log.info('-------> Best Score: '+str(score1))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featuresset1
else:
self.log.info('-------> Best Features: '+str(featureset2))
self.log.info('-------> Best Model: '+str(model2))
self.log.info('-------> Best Score: '+str(score2))
self.log.info('-------> Scoring Param: '+str(scoreParam))
best_feature_model = featureset2
self.log.info('---------- ML Summary End ------------\\n')
return(best_feature_model)
def startLearning(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,modelFeatures,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,predicted_data_file,labelMaps,featuresBasedOn,code_configure,featureEngineeringSelector,modelEvaluationConfig,imageFolderLocation):
model = 'None'
params = 'None'
score = 0xFFFF
estimator = None
model_tried = ''
threshold = -1
pscore = -1
rscore = -1
topics = {}
if(targetColumn != ''):
targetData = dataFrame[targetColumn]
datacolumns=list(dataFrame.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
if(modelType != 'clustering') and (modelType != 'TopicMod |
elling'):
scoreParam = self.setScoreParams(scoreParam,modelType,categoryCountList)
if len(topFeatures) > 0:
self.log.info('\\n-------------- Training ML: Top/StatisticalBased Features Start --------------')
modelbasedon = 'StatisticalBased'
if featureEngineeringSelector.lower() == 'true':
self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection started')
modelbasedon = 'DimensionalityReduction'
else:
self.log.info('Status:- |... Algorithm analysis based on statistical based feature selection started')
model_type1,model1,params1, score1, estimator1,model_tried1,xtrain1,ytrain1,xtest1,ytest1,threshold1,pscore1,rscore1,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelbasedon,code_configure,modelEvaluationConfig)
if model_tried != '':
model_tried += ','
model_tried += model_tried1
topFeaturesStatus = True
if featureEngineeringSelector.lower() == 'true':
self.log.info('Status:- |... Algorithm analysis based on feature engineering based feature selection completed')
else:
self.log.info('Status:- |... Algorithm analysis for statistical based feature completed')
self.log.info('-------------- Training ML: Top/StatisticalBased Features End --------------\\n')
else:
topFeaturesStatus = False
if len(modelFeatures) > 0:
self.log.info('\\n-------------- Training ML: Models Based Selected Features Start --------------')
self.log.info('Status:- |... Algorithm analysis based on model based feature selection started')
model_type2,model2,params2, score2, estimator2,model_tried2,xtrain2,ytrain2,xtest2,ytest2,threshold2,pscore2,rscore2,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,modelFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, "ModelBased",code_configure,modelEvaluationConfig)
#model_tried2['Features'] = 'ModelBased'
if model_tried != '':
model_tried += ','
model_tried += model_tried2
modelFeaturesStatus = True
self.log.info('Status:- |... Algorithm analysis for model based selected features completed')
self.log.info('-------------- Training ML: Models Based Selected Features End --------------\\n')
else:
modelFeaturesStatus = False
if len(allFeatures) > 0:
self.log.info('Status:- |... Algorithm analysis based on all features Start')
model_type3,model3,params3, score3, estimator3,model_tried3,xtrain3,ytrain3,xtest3,ytest3,threshold3,pscore3,rscore3,method,topics=self.startLearnerModule(mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,allFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, "AllFeatures",code_configure,modelEvaluationConfig)
#model_tried3['Features'] = 'AllFeatures'
allFeaturesStatus = True
if model_tried != '':
model_tried += ','
model_tried += model_tried3
self.log.info('Status:- |... Algorithm analysis based all features completed')
else:
allFeaturesStatus = False
#print(topFeaturesStatus,modelFeaturesStatus,allFeaturesStatus)
if topFeaturesStatus:
if modelFeaturesStatus:
best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score2,model1,model2,threshold1,pscore1,rscore1,threshold2,pscore2,rscore2,'StatisticalBased','ModelBased')
if best_feature_model == 'StatisticalBased' and allFeaturesStatus:
best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures')
if best_feature_model == 'ModelBased' and allFeaturesStatus:
best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures')
elif allFeaturesStatus:
best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score1,score3,model1,model3,threshold1,pscore1,rscore1,threshold3,pscore3,rscore3,'StatisticalBased','AllFeatures')
else:
best_feature_model = 'StatisticalBased'
if featureEngineeringSelector.lower() == 'true':
best_feature_model = 'DimensionalityReduction'
else:
if modelFeaturesStatus and allFeaturesStatus:
best_feature_model = self.getbestfeatureModel(modelType,scoreParam,score2,score3,model2,model3,threshold2,pscore2,rscore2,threshold3,pscore3,rscore3,'ModelBased','AllFeatures')
elif modelFeaturesStatus:
best_feature_model = 'ModelBased'
elif allFeaturesStatus:
best_feature_model = 'AllFeatures'
if (best_feature_model == 'StatisticalBased' or best_feature_model == 'DimensionalityReduction'):
model_type = model_type1
model = model1
params = params1
score = score1
estimator = estimator1
#model_tried = model_tried1
xtrain = xtrain1
ytrain = ytrain1
xtest = xtest1
ytest = ytest1
features = topFeatures
threshold = threshold1
pscore = pscore1
rscore = rscore1
elif (best_feature_model == 'AllFeatures'):
model_type = model_type3
model = model3
params = params3
score = score3
estimator = estimator3
#model_tried = model_tried3
xtrain = xtrain3
ytrain = ytrain3
xtest = xtest3
ytest = ytest3
features = allFeatures
threshold = threshold3
pscore = pscore3
rscore = rscore3
else:
model_type = model_type2
model = model2
params = params2
score = score2
estimator = estimator2
#model_tried = model_tried2
xtrain = xtrain2
ytrain = ytrain2
xtest = xtest2
ytest = ytest2
threshold = threshold2
pscore = pscore2
rscore = rscore2
features = modelFeatures
if score != 'NA':
self.log.info('Status:- |... Final Best Algorithm selected: '+model+' having score='+str(round(score,2))+' based on '+best_feature_model+' feature selection')
filename = os.path.join(deployLocation,'model',iterName+'_'+iterVersion+'.sav')
saved_model = iterName+'_'+iterVersion+'.sav'
if model == 'Neural Architecture Search':
loaded_model = estimator
try:
estimator.save(filename, save_format="tf")
except Exception:
filename = os.path.join(deployLocation,'model','autoKerasModel.h5')
estimator.save(filename)
saved_model = 'autoKerasModel.h5'
else:
pickle.dump(estimator, open(filename, 'wb'))
loaded_model = pickle.load(open(filename, 'rb'))
if not xtest.empty:
df_test = xtest.copy()
else:
df_test = xtrain.copy()
if threshold == -1:
if model.lower() == 'lda':
predictedData = loaded_model.transform(xtest).argmax(axis=1)
trainPredictedData = loaded_model.transform(xtrain)
elif model.lower() == 'dbscan':
predictedData = loaded_model.fit_predict(xtest)
predictedData = loaded_model.labels_
trainPredictedData = loaded_model.fit_predict(xtrain)
trainPredictedData = loaded_model.labels_
elif model == 'Neural Architecture Search':
test_prob = estimator.predict(xtest)
train_prob = estimator.predict(xtrain)
if train_prob.shape[1] == 1:
train_prob = np.hstack(( 1-train_prob, train_prob))
test_prob = np.hstack(( 1-test_prob, test_prob))
predictedData = np.argmax(test_prob, axis=1)
trainPredictedData = np.argmax(train_prob, axis=1)
elif model in ['Deep Q Network','Dueling Deep Q Network']:
from tf_agents.trajectories import time_step
from tensorflow import constant
from sklearn.preprocessing import MinMaxScaler
q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False)
test_prob = MinMaxScaler().fit_transform( q.numpy())
q, _ = loaded_model(np.array(xtrain), step_type=constant([time_step.StepType.FIRST] * np.array(xtrain).shape[0]), training=False)
train_prob = MinMaxScaler().fit_transform( q.numpy())
predictedData = np.argmax(test_prob, axis=1)
trainPredictedData = np.argmax(train_prob, axis=1)
elif modelType == 'clustering':
if not xtest.empty:
predictedData = loaded_model.predict(xtest)
trainPredictedData = loaded_model.predict(xtrain)
else:
if not xtest.empty:
predictedData = loaded_model.predict(xtest)
trainPredictedData = loaded_model.predict(xtrain)
if hasattr(loaded_model, 'predict_proba'):
train_prob = loaded_model.predict_proba(xtrain)
if not xtest.empty:
test_prob = loaded_model.predict_proba(xtest)
else:
self.log.info("-------> Threshold :"+str(threshold))
if not xtest.empty:
#bug 12437
if 'predict_proba' in dir(loaded_model):
test_prob = loaded_model.predict_proba(xtest)
predictedData = binarize(test_prob[:,1].reshape(-1, 1),threshold=threshold)
else:
raise Exception('--------- Loaded model does not support predict_proba ---------\\n')
train_prob = loaded_model.predict_proba(xtrain)
trainPredictedData = binarize(train_prob[:,1].reshape(-1, 1),threshold=threshold)
matrix = ''
try:
if(model_type == 'Classification'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = self.getClassificationPerformaceMatrix(ytrain,trainPredictedData,train_prob,labelMaps)
self.log.info('--------- Performance Matrix with Train Data End ---------\\n')
if not xtest.empty:
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performancematrix = self.getClassificationPerformaceMatrix(ytest,predictedData,test_prob,labelMaps)
df_test['actual'] = ytest
df_test['predict'] = predictedData
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
matrix = performancematrix
if hasattr( loaded_model, 'predict_proba'):
predictedData_fit = loaded_model.predict_proba(xtest)
elif model == 'Neural Architecture Search':
predictedData_fit = estimator.predict(xtest)
elif model in ['Deep Q Network','Dueling Deep Q Network']:
from tf_agents.trajectories import time_step
from tensorflow import constant
q, _ = loaded_model(np.array(xtest), step_type=constant([time_step.StepType.FIRST] * np.array(xtest).shape[0]), training=False)
predictedData_fit = q.numpy()
else:
predictedData_fit = loaded_model.predict(xtest)
if predictedData_fit.shape[1] == 1:
predictedData_fit = np.hstack((1 - predictedData_fit, predictedData_fit))
self.auc |
_roccurve(ytest,predictedData_fit,labelMaps,imageFolderLocation)
else:
df_test['actual'] = ytrain
df_test['predict'] = trainPredictedData
elif(model_type == 'Regression'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = self.get_regression_matrix(ytrain, trainPredictedData)
self.log.info('--------- Performance Matrix with Train Data End ---------\\n')
if not xtest.empty:
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
matrix = self.get_regression_matrix(ytest, predictedData)
df_test['actual'] = ytest
df_test['predict'] = predictedData
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
else:
df_test['actual'] = ytrain
df_test['predict'] = trainPredictedData
elif(model_type == 'Clustering'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = self.getclusterMatrix(xtrain,trainPredictedData)
self.log.info('--------- Performance Matrix with Train Data End ---------\\n')
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performacematrix = self.getclusterMatrix(xtest,predictedData)
df_test['predict'] = predictedData
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
matrix = performacematrix
elif(model_type.lower() == 'topicmodelling'):
self.log.info('\\n--------- Performance Matrix with Train Data ---------')
train_matrix = ""
self.log.info('--------- Performance Matrix with Train Data End ---------\\n')
self.log.info('\\n--------- Performance Matrix with Test Data ---------')
performacematrix = ""
df_test['predict'] = predictedData
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
matrix = performacematrix
except Exception as Inst:
self.log.info('--------- Error Performance Matrix ---------\\n')
self.log.info(str(Inst))
df_test['predict'] = predictedData
matrix = ""
train_matrix = ""
self.log.info('--------- Performance Matrix with Test Data End ---------\\n')
save_csv_compressed(df_test, predicted_data_file, encoding='utf-8')
return 'Success',model_type,model,saved_model,matrix,train_matrix,xtrain.shape,model_tried,score,filename,features,threshold,pscore,rscore,method,estimator,xtrain,ytrain,xtest,ytest,topics,params
def auc_roccurve(self,y_true,y_score,classee,imageFolderLocation):
from keras.utils import to_categorical
from sklearn.preprocessing import label_binarize
import re
n_classes = len(classee)
y_true = to_categorical(y_true,num_classes = n_classes)
fpr ={}
tpr={}
roc_auc={}
class_names = list(classee.keys())
typeofclass = list(classee.values())
n_class = len(typeofclass)
for i in range(n_classes):
fpr[i],tpr[i],_ = roc_curve(y_true[:,i], y_score[:,i])
roc_auc[i]= auc(fpr[i],tpr[i])
plt.figure()
plt.plot(fpr[i],tpr[i],label=f'{class_names[i]} (AUC = {roc_auc[i]:.2f})')
plt.plot([0,1],[0,1], linestyle='--')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title(f'{class_names[i]} ROC Curve')
plt.legend()
img_location = os.path.join(imageFolderLocation,str(i)+'_roc.png') #15092
plt.savefig(img_location)
def startLearnerModule(self,mlconfig,modelType,modelParams,modelList,scoreParam,targetColumn,dataFrame,xtrain,ytrain,xtest,ytest,categoryCountList,topFeatures,targetType,deployLocation,iterName,iterVersion,trained_data_file,labelMaps,featuresBasedOn, modelFeatureBased,code_configure,modelEvaluationConfig):
matrix = ''
threshold = -1
pscore = -1
rscore = -1
method = mlconfig['optimizationMethod']
method = method.lower()
geneticParam = ''
topics = {}
optimizationHyperParameter = mlconfig['optimizationHyperParameter']
cvSplit = optimizationHyperParameter['trainTestCVSplit']
nIter = int(optimizationHyperParameter['iterations'])
if(method.lower() == 'genetic'):
geneticParam = optimizationHyperParameter['geneticparams']
scoreParam = scoreParam
if 'thresholdTunning' in mlconfig:
thresholdTunning = mlconfig['thresholdTunning']
else:
thresholdTunning = 'NA'
if len(topFeatures) !=0:
self.features=topFeatures
else:
datacolumns=list(xtrain.columns)
if targetColumn in datacolumns:
datacolumns.remove(targetColumn)
self.features =datacolumns
self.log.info(f'-------> Number of Features Used For Training the Model: {len(self.features)}')
features_names = str(self.features)
if len(features_names) > 500:
features_names = ','.join(self.features[:2]) + ', ..... ,' + ','.join(self.features[-2:])
self.log.info(f'-------> Features Used For Training the Model: {features_names}')
xtrain = xtrain[self.features]
if not xtest.empty:
xtest = xtest[self.features]
if cvSplit == "":
cvSplit =None
else:
cvSplit =int(cvSplit)
if modelType == 'classification':
model_type = "Classification"
MakeFP0 = False
MakeFN0 = False
if(len(categoryCountList) == 2):
self.log.info("\\n -------------- Check for FP or FN -------------- ")
self.log.info("-------> Binary Classification")
if(thresholdTunning.lower() == 'fp0'):
self.log.info("-------> Threshold Tuning: False Positive")
MakeFP0 = True
elif(thresholdTunning.lower() == 'fn0'):
self.log.info("-------> Threshold Tuning: False Negative")
MakeFN0 = True
if MakeFP0 == False and MakeFN0 == False:
self.log.info("-------> Threshold Tuning: Not Any")
self.log.info("-------------- Check for FP or FN End-------------- \\n")
elif(len(categoryCountList) > 2): #bug 12438
self.log.info("\\n -------------- Check for FP or FN -------------- ")
self.log.info("-------> Multiclass Classification")
if(thresholdTunning.lower() == 'fp0' or thresholdTunning.lower() == 'fn0'):
self.log.info("-------> Threshold Tuning: Not supported")
else:
self.log.info("-------> Threshold Tuning: Not Any")
self.log.info("-------------- Check for FP or FN End-------------- \\n")
objClf = ClassifierModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,modelType,MakeFP0,MakeFN0,deployLocation)
model, params, score, estimator,model_tried,threshold,pscore,rscore = objClf.classModelling( modelFeatureBased,code_configure)
elif modelType == 'regression':
model_type = "Regression"
objClf = RegressionModel(modelList, modelParams, scoreParam, cvSplit, nIter,geneticParam, xtrain,ytrain,xtest,ytest,method,deployLocation)
model,params,score,estimator,model_tried = objClf.regressionModelling(modelFeatureBased,code_configure)
elif modelType =='clustering':
model_type = 'Clustering'
print(modelList)
if 'KMeans' in modelList:
clustendency = self.cluster_tendency(xtrain)
model='KMeans'
model_tried = '{"Model":"KMeans","Score":"NA"}'
kmeanmodelparams=modelParams['KMeans']
n_clusters = kmeanmodelparams['n_clusters']
if n_clusters == None or n_clusters == 0 or n_clusters == '':
n_clusters = self.calculateNumberofCluster(xtrain)
kmeanmodelparams['n_clusters'] = n_clusters
kmeans=KMeans(n_clusters=n_clusters)
targetData=kmeans.fit_predict(xtrain)
self.log.info('Status:- |... ML Algorithm applied: KMeans')
self.log.info('\\n------------ Centers Points Start------------')
values = kmeans.cluster_centers_.squeeze()
#print(values)
centers = pd.DataFrame(kmeans.cluster_centers_,columns= xtrain.columns)
filename = os.path.join(deployLocation,'centers.csv')
centers.to_csv(filename)
labels = kmeans.labels_
i=0
for value_row in values:
j=0
self.log.info('------->Label: '+str(i))
for value in value_row:
self.log.info('---------->Feature: "'+str(self.features[j])+'" Center Point: '+str(value))
j = j+1
i = i+1
self.log.info('------------ Centers Points Start------------\\n')
score='NA'
scoreParam=None
params=kmeanmodelparams
estimator=kmeans
if 'DBSCAN' in modelList:
DBSCAN_ModelParams=modelParams['DBSCAN']
db = DBSCAN(eps=DBSCAN_ModelParams['eps'],min_samples = DBSCAN_ModelParams['min_samples']).fit(xtrain)
#targetData=db.fit_predict(xtrain)
self.log.info('Status:- |... ML Algorithm applied: DBSCAN')
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
self.log.info('------->Labels: '+str(labels))
self.log.info('------->No Of Cluster: '+str(n_clusters_))
self.log.info('------->No Of Noise Point: '+str(n_noise_))
score='NA'
scoreParam=None
params=''
estimator=db
model='DBSCAN'
model_tried = '{"Model":"DBSCAN","Score":"NA"}'
elif modelType == 'topicmodelling':
model_type = 'TopicModelling'
model='LDA'
model_tried = '{"Model":"LDA","Score":"NA"}'
LDAmodelparams=modelParams['LDA']
n_topics = LDAmodelparams['n_topics']
n_words_per_topic = LDAmodelparams['n_words_per_topic']
if n_topics == None or n_topics == 0 or n_topics == '':
n_topics = 10
LDAmodelparams['n_topics'] = n_topics
if n_words_per_topic == None or n_words_per_topic == 0 or n_words_per_topic == '':
n_words_per_topic = 10
LDAmodelparams['n_words_per_topic'] = n_words_per_topic
lda = LatentDirichletAllocation(n_components=n_topics,random_state=0)
self.log.info('Status:- |... ML Algorithm applied: LDA')
targetData=lda.fit_transform(xtrain)
topics = self.get_topics(lda, topFeatures, n_words_per_topic)
self.log.info(topics)
score='NA'
scoreParam=None
params=LDAmodelparams
estimator=lda
return model_type,model,params, score, estimator,model_tried,xtrain,ytrain,xtest,ytest,threshold,pscore,rscore,method, topics<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL |
Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import time
import os
import sys
import numpy as np
from numpy import arange
from numpy import argmax
import json
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from sklearn.preprocessing import binarize
from learner.optimizetechnique import OptimizationTq
from learner.defaultAlgos import defaultParams
from learner.parameters import parametersDefine
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import logging
from learner.aion_matrix import aion_matrix
import mlflow
from pathlib import Path
from uncertainties.aionUQ import aionUQ
# apply threshold to positive probabilities to create labels
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
class ClassifierModel():
def __init__(self,modelList,params,scoreParam,cvSplit,numIter,geneticParam,trainX,trainY,testX,testY,method,modelType,MakeFP0,MakeFN0,deployLocation):
self.modelList =modelList
self.params =params
self.trainX =trainX
self.X =trainX
self.trainY =trainY
self.testX = testX
self.testY = testY
self.method =method
self.scoreParam=scoreParam
self.cvSplit=cvSplit
self.numIter=numIter
self.geneticParam=geneticParam
self.MakeFP0= MakeFP0
self.MakeFN0=MakeFN0
self.log = logging.getLogger('eion')
self.modelType = modelType
self.uq_x_train = trainX
self.uq_x_test = testX
self.uq_y_train = trainY
self.uq_y_test = testY
self.deployLocation = deployLocation
self.AlgorithmNames={'Logistic Regression':'LogisticRegression','Stochastic Gradient Descent':'SGDClassifier','Naive Bayes':'GaussianNB','Support Vector Machine':'SVC','K Nearest Neighbors':'KNeighborsClassifier','Decision Tree':'DecisionTreeClassifier','Random Forest':'RandomForestClassifier','Gradient Boosting':'GradientBoostingClassifier','Extreme Gradient Boosting (XGBoost)':'XGBClassifier','Categorical Boosting (CatBoost)': 'CatBoostClassifier','Light Gradient Boosting (LightGBM)': 'LGBMClassifier','Bagging (Ensemble)':'BaggingClassifier','Stacking (Ensemble)':'StackingClassifier','Voting (Ensemble)':'VotingClassifier','Deep Q Network':'DQN','Dueling Deep Q Network':'DDQN','Neural Architecture Search':'NAS'}
self.modelToAlgoNames = {value: key for key, value in self.AlgorithmNames.items()}
def check_threshold(self,estimator,testX,testY,threshold_range,checkParameter,modelName):
thresholdx = -1
for threshold in threshold_range:
predictedData = estimator.predict_proba(testX)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)
p_score = precision_score(testY, predictedData)
#self.log.info('-------------> Precision:'+str(p_score))
r_score = recall_score(testY, predictedData)
#self.log.info('-------------> Rscore:'+str(r_score))
#self.log.info(confusion_matrix(testY, predictedData))
tn, fp, fn, tp = confusion_matrix(testY, predictedData).ravel()
if(checkParameter.lower() == 'fp'):
if fp == 0:
if(p_score == 1):
thresholdx = threshold
self.log.info('---------------> Best Threshold:'+str(threshold))
self.log.info('---------------> Best Precision:'+str(p_score))
self.log.info('---------------> Best Recall:'+str(r_score))
self.log.info('---------------> TN:'+str(tn))
self.log.info('---------------> FP:'+str(fp))
self.log.info('---------------> FN:'+str(fn))
self.log.info('---------------> TP:'+str(tp))
break
if(checkParameter.lower() == 'fn'):
if fn == 0:
if(r_score == 1):
thresholdx = threshold
self.log.info('---------------> Best Threshold:'+str(threshold))
self.log.info('---------------> Best Precision:'+str(p_score))
self.log.info('---------------> Best Recall:'+str(r_score))
self.log.info('---------------> TN:'+str(tn))
self.log.info('---------------> FP:'+str(fp))
self.log.info('---------------> FN:'+str(fn))
self.log.info('---------------> TP:'+str(tp))
break
return(thresholdx,p_score,r_score)
def getBestModel(self,fp0,fn0,threshold,bestthreshold,rscore,brscore,pscore,bpscore,tscore,btscore):
cmodel = False
if(threshold != -1):
if(bestthreshold == -1):
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif fp0:
if rscore > brscore:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif rscore == brscore:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif fn0:
if pscore > bpscore:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
elif pscore == bpscore:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
bestthreshold = threshold
brscore = rscore
bpscore = pscore
btscore = tscore
else:
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
else:
if(bestthreshold == -1):
if tscore > btscore or btscore == -0xFFFF:
cmodel = True
btscore = tscore
return cmodel,btscore,bestthreshold,brscore,bpscore
def logMlflow(self, runName, params, metrices, estimator, algoName=None):
with mlflow.start_run(run_name = runName):
for k,v in params.items():
mlflow.log_param(k, v)
for k,v in metrices.items():
mlflow.log_metric(k, v)
if algoName == 'CatBoostClassifier':
mlflow.catboost.log_model(estimator, "model")
else:
mlflow.sklearn.log_model(estimator, "model")
model_uri = mlflow.get_artifact_uri("model")
""" for some dataset evaluate takes more than 90 min, so commenting till some solution is not found
evaluate_data = self.testX.copy()
evaluate_data['label'] = self.testY.copy()
mlflow.evaluate(model_uri, data=evaluate_data, targets='label', model_type="classifier")
del evaluate_data
"""
def classModelling(self, modelOrFeatureBased,code_configure):
paramObj=parametersDefine()
bestModel='None'
bestParams={}
bestScore=-0xFFFF
bestEstimator = 'None'
bestpipelineModel='None'
scoredetails = ''
threshold = -1
bestthreshold = -1
precisionscore =-1
bestprecisionscore=-1
recallscore = -1
bestrecallscore=-1
self.log.info('\\n---------- ClassifierModel has started ----------')
objClf = aion_matrix()
try:
self.log.info('Status:- |... Search Optimization Method applied: '+self.method)
for modelName in self.modelList:
if modelName in ['Bagging (Ensemble)','Voting (Ensemble)','Stacking (Ensemble)','Dueling Deep Q Network','Deep Q Network','Neural Architecture Search']:
if modelName == 'Bagging (Ensemble)':
from ensemble.ensemble_bagging import ensemble_bagging
ensemble_bagging_obj = ensemble_bagging(self.params[modelName],self.scoreParam,self.MakeFP0,self.MakeFN0)
estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_bagging_obj.ensemble_bagging_classifier(self.trainX,self.trainY,self.testX,self.testY)
if modelName == 'Stacking (Ensemble)':
from ensemble.ensemble_stacking import ensemble_stacking
ensemble_stacking_obj = ensemble_stacking(self.params[modelName],self.scoreParam)
estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_stacking_obj.ensemble_stacking_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList)
if modelName == 'Voting (Ensemble)':
from ensemble.ensemble_voting import ensemble_voting
ensemble_voting_obj = ensemble_voting("",self.scoreParam)
#bug 12437
status,estimator,modelParams,score,model,threshold,precisionscore,recallscore = ensemble_voting_obj.ensemble_voting_classifier(self.trainX,self.trainY,self.testX,self.testY,self.MakeFP0,self.MakeFN0,self.modelList)
if status != "SUCCESS": #bug 12437
continue
if modelName == 'Deep Q Network':
from reinforcement.DRL_train import ReinformentLearning
rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification')
estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DQN',self.deployLocation)
if modelName == 'Dueling Deep Q Network':
from reinforcement.DRL_train import ReinformentLearning
rlObj = ReinformentLearning(self.params[modelName],self.scoreParam,'Classification')
estimator,modelParams,score,model,threshold,precisionscore,recallscore = rlObj.TrainRL(self.trainX,self.trainY,self.testX,self.testY,'DDQN',self.deployLocation)
'''
if modelName == 'Neural Architecture Search':
from nas.aionNAS import aionNAS
objNAS = aionNAS('Classification',self.params[modelName],self.trainX,self.testX,self.trainY,self.testY,self.deployLocation)
estimator,modelParams,score,model,threshold,precisionscore,recallscore=objNAS.nasMain(self.scoreParam)
'''
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":"NA"}'
status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore)
if status:
bestScore =bscore
bestModel =model
bestParams=modelParams
bestEstimator=estimator
bestthreshold = bthres
|
bestrecallscore = brscore
bestprecisionscore = bpscore
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
self.log.info('Status:- |... Score: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n')
continue
paramSpace=self.params[modelName].copy()
algoName = self.AlgorithmNames[modelName]
paramDict =paramObj.paramDefine(paramSpace,self.method)
if not self.method == 'bayesopt':
paramSize = paramObj.getParamSpaceSize(paramDict)
else:
paramSize = 0
if (self.method == 'bayesopt' and not paramDict) or (not self.method == 'bayesopt' and paramSize<=0):
try:
start = time.time()
#function call
defObj = defaultParams(algoName,paramDict,self.scoreParam,self.MakeFP0, self.MakeFN0,paramSize)
estimator, modelParams, model,score, threshold, precisionscore, recallscore =defObj.startTrainingClassification(self.trainX,self.trainY,self.testX,self.testY)
executionTime = time.time() - start
if (scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"' + self.modelToAlgoNames[model] + '","FeatureEngineering":"' + str(
modelOrFeatureBased) + '","Score":' + str(score) + ',"ModelUncertainty":"NA"}'
status, bscore, bthres, brscore, bpscore = self.getBestModel(self.MakeFP0, self.MakeFN0,threshold, bestthreshold,recallscore, bestrecallscore,precisionscore, bestprecisionscore,score, bestScore)
self.log.info('---------> Total Execution: ' + str(executionTime) + '\\n')
if status:
bestScore = bscore
bestModel = model
bestParams = modelParams
bestEstimator = estimator
bestthreshold = bthres
bestrecallscore = brscore
bestprecisionscore = bpscore
self.log.info('Status:- |... ML Algorithm applied: ' + modelName)
self.log.info('Status:- |... Score: ' + objClf.get_print_score(self.scoreParam) + '=' + str(
round(score, 2)) + '\\n')
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------' + modelName + ' Model Execution failed!!!.' + str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
continue
# call algorithms with default valuepass
if self.method == 'bayesopt':
code_configure.add_model(algoName,paramSpace)
else:
paramDictCopy = paramDict
# numpy array is not json serializable
#numpy is already imported but still np.ndarray raise error
import numpy as np
for key,value in paramDictCopy.items():
if isinstance(value, np.ndarray):
paramDictCopy[key] = paramDictCopy[key].tolist()
code_configure.add_model(algoName,paramDictCopy)
trainingStatus = 'Success'
if self.method =='grid':
try:
self.log.info("-------> Optimization Method :Grid Search")
self.log.info("-------> Model Name: "+str(modelName))
opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY)
start = time.time()
model,modelParams,score,estimator=opTq.gridSearchOpt()
executionTime=time.time() - start
if not self.testX.empty:
predictedData = estimator.predict(self.testX)
score = objClf.get_score(self.scoreParam,self.testY,predictedData)
else:
score = score*100
problemName = estimator.__class__.__name__
runName = algoName + '_' + modelOrFeatureBased
metrices = {}
metrices["score"] = score
try:
self.logMlflow(runName, modelParams, metrices, estimator, algoName)
except Exception as e:
self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish
pass
output_jsonobject = ""
problemName = estimator.__class__.__name__
self.log.info('----------> Testing Score: '+str(score))
try:
if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ):
self.log.info('-----> Model Uncertainty Not Supported')
else:
uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation)
accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%'))
self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%'))
except:
pass
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}'
self.log.info('----------> Testing Score: '+str(score))
import numpy as np
if self.MakeFP0:
self.log.info('-------- Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName)
self.log.info('-------- Calculate Threshold for FP End-------')
if self.MakeFN0:
self.log.info('-------- Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName)
self.log.info('-------- Calculate Threshold for FN End-------')
self.log.info('----------> Total Execution: '+str(executionTime)+'\\n')
status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore)
if status:
bestScore =bscore
bestModel =model
bestParams=modelParams
bestEstimator=estimator
bestthreshold = bthres
bestrecallscore = brscore
bestprecisionscore = bpscore
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
trainingStatus = 'Error (Exception)'
elif self.method == 'random':
try:
self.log.info("-------> Optimization Method :Random Search")
self.log.info("-------> Model Name: "+str(modelName))
start = time.time()
opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY)
model,modelParams,score,estimator=opTq.randomSearchOpt()
executionTime=time.time() - start
if not self.testX.empty:
predictedData = estimator.predict(self.testX)
score = objClf.get_score(self.scoreParam,self.testY,predictedData)
else:
score = score*100
problemName = estimator.__class__.__name__
runName = algoName + '_' + modelOrFeatureBased
metrices = {}
metrices["score"] = score
try:
self.logMlflow(runName, modelParams, metrices, estimator, algoName)
except Exception as e:
self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish
pass
import numpy as np
if self.MakeFP0:
self.log.info('-------- Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FP',algoName)
self.log.info('-------- Calculate Threshold for FP End-------')
if self.MakeFN0:
self.log.info('-------- Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = self.check_threshold(estimator,self.trainX,self.trainY,threshold_range,'FN',algoName)
self.log.info('-------- Calculate Threshold for FN End-------')
if threshold != -1:
if not self.testX.empty:
predictedData = estimator.predict_proba(self.testX)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)
score = objClf.get_score(self.scoreParam,self.testY,predictedData)
else:
predictedData = estimator.predict_proba(self.trainX)
predictedData = binarize(predictedData[:,1].reshape(-1, 1),threshold=threshold)
score = objClf.get_score(self.scoreParam,self.trainY,predictedData)
self.log.info('---------> Total Execution: '+str(executionTime)+'\\n')
output_jsonobject = ""
problemName = estimator.__class__.__name__ |
self.log.info('----------> Testing Score: '+str(score))
try:
if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ):
self.log.info('-----> Model Uncertainty Not Supported')
else:
uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation)
accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%'))
self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%'))
except Exception as e:
pass
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}'
status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore)
if status:
bestScore =bscore
bestModel =model
bestParams=modelParams
bestEstimator=estimator
bestthreshold = threshold
bestrecallscore = recallscore
bestprecisionscore = precisionscore
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
trainingStatus = 'Error (Exception)'
elif self.method == 'bayesopt':
try:
self.log.info("-------> Optimization Method :BayesOpt")
self.log.info("-------> Model Name: "+str(modelName))
opTq =OptimizationTq(algoName,paramDict,self.cvSplit,self.scoreParam,self.numIter,self.trainX,self.trainY)
fun=opTq.f
trials = Trials()
start = time.time()
best = fmin(fun,paramDict,algo=tpe.suggest, max_evals=self.numIter, trials=trials)
executionTime=time.time() - start
results = sorted(trials.results, key = lambda x: x['loss'])
bestresult=results[0]
model=bestresult['model']
score=bestresult['score']
modelParams=bestresult['params']
executionTime=time.time() - start
res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in modelParams.items())
modelObj=eval(model+'('+res+')')
estimator = modelObj.fit(self.trainX,self.trainY)
if not self.testX.empty:
predictedData = estimator.predict(self.testX)
score = objClf.get_score(self.scoreParam,self.testY,predictedData)
problemName = estimator.__class__.__name__
runName = algoName + '_' + modelOrFeatureBased
metrices = {}
metrices["score"] = score
try:
self.logMlflow(runName, modelParams, metrices, estimator, algoName)
except Exception as e:
self.log.info('----------> ML Flow error!!!. ' + str(e)) # usnish
pass
output_jsonobject = ""
problemName = estimator.__class__.__name__
self.log.info('----------> Testing Score: '+str(score))
try:
if ((estimator.__class__.__name__ == "ABCMeta") or (model in ['SGDClassifier','XGBClassifier','CatBoostClassifier','LGBMClassifier']) ):
self.log.info('-----> Model Uncertainty Not Supported')
else:
uqObj=aionUQ(None,None,None,problemName,modelParams,estimator,None,None,self.deployLocation)
accuracy,uq_ece,output_jsonobject,model_confidence_per,model_uncertainty_per=uqObj.uqMain_BBMClassification(self.uq_x_train,self.uq_x_test,self.uq_y_train,self.uq_y_test,"aionuq")
self.log.info("-------> model_confidence: "+str(model_confidence_per)+str('%'))
self.log.info("-------> model_uncertainty: "+str(model_uncertainty_per)+str('%'))
except:
pass
if(scoredetails != ''):
scoredetails += ','
scoredetails += '{"Model":"'+self.modelToAlgoNames[model]+'","FeatureEngineering":"'+str(modelOrFeatureBased)+'","Score":'+str(score)+',"ModelUncertainty":'+str(json.dumps(output_jsonobject))+'}'
'''
test_accuracy = accuracy_score(self.testY,predictedData)
test_precision = precision_score(self.testY,predictedData,average='macro')
self.log.info('---------> Test Accuracy: '+str(test_accuracy))
self.log.info('---------> Test Precision: '+str(test_precision))
'''
import numpy as np
if self.MakeFP0:
self.log.info('-------- Calculate Threshold for FP Start-------')
startRange = 0.0
endRange = 1.0
stepsize = 0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FP',algoName)
self.log.info('-------- Calculate Threshold for FP End-------')
if self.MakeFN0:
self.log.info('-------- Calculate Threshold for FN Start-------')
startRange = 1.0
endRange = 0.0
stepsize = -0.01
threshold_range = np.arange(startRange,endRange,stepsize)
threshold,precisionscore,recallscore = self.check_threshold(estimator,self.testX,self.testY,threshold_range,'FN',algoName)
self.log.info('-------- Calculate Threshold for FN End-------')
self.log.info('---------> Total Execution: '+str(executionTime)+'\\n')
status,bscore,bthres,brscore,bpscore = self.getBestModel(self.MakeFP0,self.MakeFN0,threshold,bestthreshold,recallscore,bestrecallscore,precisionscore,bestprecisionscore,score,bestScore)
if status:
bestScore =score
bestModel =model
bestParams=modelParams
res = ', '.join("{!s}={!r}".format(key,val) for (key,val) in bestParams.items())
modelObj=eval(bestModel+'('+res+')')
bestEstimator=estimator
bestthreshold = threshold
bestrecallscore = recallscore
bestprecisionscore = precisionscore
except Exception as inst:
self.log.info('\\n < ---------- Model Execution Failed Start--------->')
self.log.info('\\n<-------'+ modelName+' Model Execution failed!!!.'+str(inst))
self.log.info('\\n < ---------- Model Execution Failed End --------->')
trainingStatus = 'Error (Exception)'
else:
trainingStatus = 'Error (HyperTunning Algo Not Supported)'
pass
self.log.info('Status:- |... ML Algorithm applied: '+modelName)
if trainingStatus.lower() == 'success':
self.log.info('Status:- |... Score after hyperparameter tuning: '+objClf.get_print_score(self.scoreParam)+'='+str(round(score,2))+'\\n')
else:
self.log.info('Status:- |... Training Error : '+trainingStatus+'\\n')
self.log.info('---------- ClassifierModel End ---------- \\n')
if bestModel != 'None':
self.log.info('\\n------- Best Model and its parameters -------------')
self.log.info('Status:- |... Best Algorithm selected: '+str(self.modelToAlgoNames[bestModel])+' Score='+str(round(bestScore,2)))
self.log.info("-------> Best Name: "+str(bestModel))
self.log.info("-------> Best Score: "+str(bestScore))
return self.modelToAlgoNames[bestModel],bestParams,bestScore,bestEstimator,scoredetails,bestthreshold,bestprecisionscore,bestrecallscore
else:
raise Exception("Sorry, no model is trained")
except Exception as inst:
self.log.info( '\\n-----> ClassifierModel failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import numpy as np
import logging
import sys
import os
class parametersDefine():
def __init__(self):
self.paramDict = None
self.log = logging.getLogger('eion')
def getParamSpaceSize(self,paramDict):
size=1
if(len(paramDict)==0):
return 0
for keys in paramDict.keys():
size=size*len(paramDict[keys])
return size
def paramDefine(self, paramSpace, method):
paramDict = {}
for j in list(paramSpace.keys()):
inp = paramSpace[j]
try:
isLog = False
isLin = False
isRan = False
isList = False
isString = False
try:
# check if functions are given as input and reassign paramspace
v = paramSpace[j]
if 'logspace' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isLog = True
elif 'linspace' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isLin = True
elif 'range' in paramSpace[j]:
param |
Space[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isRan = True
elif 'list' in paramSpace[j]:
paramSpace[j] = v[v.find("(") + 1:v.find(")")].replace(" ", "")
isList = True
elif '[' and ']' in paramSpace[j]:
paramSpace[j] = v.split('[')[1].split(']')[0].replace(" ", "")
isList = True
x = paramSpace[j].split(',')
except Exception as e:
if isinstance(paramSpace[j], (int, float)):
paramSpace[j] = str(paramSpace[j])
x = []
x.append(paramSpace[j])
str_arg = paramSpace[j]
# check if arguments are string
try:
test = eval(x[0])
except:
isString = True
if isString:
paramDict.update({j: hp.choice(j, x)} if method == 'bayesopt' else {j: x})
else:
res = eval(str_arg)
if isLin:
y = eval('np.linspace' + str(res))
paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y})
elif isLog:
y = eval('np.logspace' + str(res))
paramDict.update(
{j: hp.uniform(j, 10 ** eval(x[0]), 10 ** eval(x[1]))} if method == 'bayesopt' else {j: y})
elif isRan:
y = eval('np.arange' + str(res))
paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y})
# check datatype of argument
elif isinstance(eval(x[0]), bool):
y = list(map(lambda i: eval(i), x))
paramDict.update({j: hp.choice(j, eval(str(y)))} if method == 'bayesopt' else {j: y})
elif isinstance(eval(x[0]), float):
res = eval(str_arg)
if len(str_arg.split(',')) == 3 and not isList:
y = eval('np.linspace' + str(res))
#print(y)
paramDict.update({j: hp.uniform(j, eval(x[0]), eval(x[1]))} if method == 'bayesopt' else {j: y})
else:
y = list(res) if isinstance(res, tuple) else [res]
paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y})
else:
res = eval(str_arg)
if len(str_arg.split(',')) == 3 and not isList:
y = eval('np.linspace' +str(res)) if eval(x[2]) >= eval(x[1]) else eval('np.arange'+str(res))
else:
y = list(res) if isinstance(res, tuple) else [res]
paramDict.update({j: hp.choice(j, y)} if method == 'bayesopt' else {j: y})
except Exception as inst:
self.log.info('\\n-----> Parameter parsing failed!!!.' + str(inst))
self.log.info("The entered parameter is invalid: {"+ j +':'+ inp+'}')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno))
raise
return paramDict
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import sys
import logging
import json
import joblib
from pathlib import Path
import platform
from datetime import datetime as dt
import time
from pathlib import Path
import argparse
from operator import itemgetter
import re
import fitz
from io import StringIO
from nltk.tokenize import sent_tokenize
import pandas as pd
from scipy import spatial
import urllib.request
import zipfile
import shutil
requirements = """
scipy
pandas
pathlib
"""
def pdf2txtInternal(pdffile):
keyword = ['Discussion','4. Discussion','DISCUSSION','Results','RESULTS','Introduction','introduction','methods',
'method','result','results','limitation','Conclusions','conclusion','Conclusions','Acknowledgements',
'Acknowledgement','ACKNOWLEDGMENT','ACKNOWLEDGMENTS','References','REFERENCES']
print(pdffile)
filename1 = Path(pdffile)
csvInpClassFileName = filename1.stem
csvOutpClassFileName = "ClassResult" + filename1.stem +".csv"
styles = {}
font_counts = {}
granularity=False
doc = fitz.open(pdffile)
for i in range(1,len(doc)+1):
page = doc[i-1]
blocks = page.get_text("dict")["blocks"]
for b in blocks: # iterate through the text blocks
if b['type'] == 0: # block contains text
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
if granularity:
identifier = "{0}_{1}_{2}_{3}".format(s['size'], s['flags'], s['font'], s['color'])
styles[identifier] = {'size': s['size'], 'flags': s['flags'], 'font': s['font'],
'color': s['color']}
else:
identifier = "{0}".format(s['size'])
styles[identifier] = {'size': s['size'], 'font': s['font']}
font_counts[identifier] = font_counts.get(identifier, 0) + 1 # count the fonts usage
font_counts = sorted(font_counts.items(), key=itemgetter(1), reverse=True)
doc.close()
if len(font_counts) < 1:
raise ValueError("Zero discriminating fonts found!")
p_style = styles[font_counts[0][0]] # get style for most used font by count (paragraph)
p_size = p_style['size']
results = [] # list of tuples that store the information as (text, font size, font name)
total_data =[]
para_data =[]
search_data =[]
only_text =[]
v={}
pdf = fitz.open(pdffile) # filePath is a string that contains the path to the pdf
for page in pdf:
dict = page.get_text("dict")
blocks = dict["blocks"]
for block in blocks:
if "lines" in block.keys():
spans = block['lines']
for span in spans:
data = span['spans']
for lines in data:
if lines['size']>=p_size:
total_data.append([[lines['text']], [lines['size'], lines['font']]])
search_data.append([[lines['text']], [str(int(lines['size']))]])
para_data.append([lines['text']]) #, [lines['size']]])
for keywords in keyword:
if keywords == lines['text']: # only store font information of a specific keyword
results.append([[lines['text']], [lines['size'], lines['font']]])
only_text.append([lines['text']])
pdf.close()
headers=['']
intros =['Abstract','abstract']
header = ['']
headers_info =[]
for line in total_data:
if results[-1][1] == line[1]:
headers_info.append(line)
headers.extend(line[0])
if str(results[-1][0]).isupper():
headers =([item for item in headers if re.findall(r"(?<![^\\s,])[A-Z]+(?![^\\s,])", item)])
headers.insert(0,'')
m1 = [x for x in headers if x=='Abstract']
if len(m1)!=0:
headers.pop(0)
else:
headers = headers
elif str(results[-1][0][0][0]).isdigit():
headers = ([item for item in headers if re.findall(r"([0-9])" , item)])
headers.insert(0,'')
else:
m1 = [x for x in headers if x=='Abstract']
if len(m1)!=0:
headers.pop(0)
else:
headers = headers
header_size=(headers_info[0][1][0])
paragraph =[]
check =[]
str1 =' '
for data in (para_data):
paragraph.extend(data)
str2 = str1.join(paragraph)
repl = [['- ', '-'], [' +', ' '], [' \\.', '.']]
for i in repl:
str2 = re.sub(i[0], i[1], str2)
for al in search_data:
rec=(''.join(str(x) for x in al[1]))
if float(rec) >=(p_size) or float(rec)>= header_size:
check.extend(al[0])
str3 = str1.join(check)
str3 = str1.join(check)
repl = [['- ', '-'], [' +', ' '], [' \\.', '.']]
for i in repl:
str3 = re.sub(i[0], i[1], str3)
dataTosend=[]
data = []
for cols in range(2,len(headers)+1):
start = headers[cols-2] #.replace(' ','') #'SUBJECTS AND METHODS'
end = headers[cols-1]
if start in ['Acknowledgements', 'Acknowledgement', 'ACKNOWLEDGMENT','ACKNOWLEDGMENTS', 'References', 'REFERENCES']:
break
if start=='': #.replace(' ','')
res=(str2[str2.find(start)+len(start):str2.rfind(end)])
data.append(['Abstract', res])
tmp='Abstract' + ':'+ ' ' + res
dataTosend.append(tmp)
else:
res=(str2[str2.rfind(start)+len(start):str2.rfind(end)])
data.append([start, res])
tmp=start + ':'+ ' ' + res
dataTosend.append(tmp)
tokens = [] # sent tokenization and csv file creation updated
for idx in range(len(data)):
head = data[idx][0]
para = data[idx][1]
exp = sent_tokenize(para)
for val in exp:
tokens.append([head, val])
sent_data = []
for head, sent in tokens:
break_sent = r'\\. [A-Z]|\\.[A-Z]' # break senteance if 2 or more in a same column.
match = re.findall(break_sent, sent)
if len(match) >= 1:
for i in range (len(match)):
idx, _ = re.search(break_sent, sent).span()
sent_data.append( sent[:int(idx)+1].strip())
sent = sent[int(idx)+1:].strip()
if (re.search('^[a-z]|^[,;]', sent)): # add incomplete sentence
if sent_data != []:
last_val = sent_data.pop()
new_val = last_val[1] +' '+ sent
sent_data.append( new_val)
else:
sent_data.append( sent)
return sent_data
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def extract_data(location):
files = [x for x in Path(location).iterdir() if x.suffix == '.pdf']
if not files:
raise ValueError(f'no pdf file found in directory {location}')
sentences = []
for file in files:
data=pdf2txtInternal(file)
sentences.append(data)
return [item for sublist in sentences for item in sublist]
def keyWordGeneration( keywords,deploy_loc, pretrained_loc):
keywords = keywords.split()
noOfKeyword = len(keywords)
embeddings = {}
word = ''
print(pretrained_loc)
with open(pretrained_loc, 'r', encoding="utf8") as f:
header = f.readline()
header = header.split(' ')
vocab_size = int(header[0])
embed_size = int(header[1])
for i in range(vocab_size):
data = f.readline().strip().split(' ')
word = data[0]
embeddings[word] = [float(x) for x in data[1:]]
readData=pd.DataFrame([],columns=['Keyword'])
for i in range(noOfKeyword):
neighbours = (sorted(embeddings.keys(), key=lambda word: spatial.distance.euclidean(embeddings[word], embeddings[keywords[i]])) )[1:6]
readData = readData.append({'Keyword': keywords[i]}, ignore_index=True)
for j in range(len(neighbours)):
readData = readData.append({'Keyword': neighbours[j]}, ignore_index=True)
readData.to_csv( Path(deploy_loc)/"keywordDataBase.csv",encoding='utf-8',index=False)
return set( readData['Key |
word'])
def dataClassifyWithKw(sentences, keywords):
df = pd.DataFrame(sentences, columns=['File'])
pattern = '|'.join(keywords)
df['Label'] = df.File.str.contains(pattern)
return df
def to_dataframe(data_loc, keywords, pretrained_type, embedding_size=300, deploy_loc=None, train=True):
pretrained_loc = checkAndDownloadPretrainedModel(pretrained_type, embedding_size)
sentences = extract_data(data_loc)
if train:
keywords = keyWordGeneration( keywords,deploy_loc, pretrained_loc)
df = dataClassifyWithKw(sentences, keywords)
return df
def get_pretrained_model_path():
from AION.appfe.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
def checkAndDownloadPretrainedModel(preTrainedModel, embedding_size=300):
models = {'glove':{50:'glove.6B.50d.w2vformat.txt',100:'glove.6B.100d.w2vformat.txt',200:'glove.6B.200d.w2vformat.txt',300:'glove.6B.300d.w2vformat.txt'}, 'fasttext':{300:'wiki-news-300d-1M.vec'}}
supported_models = [x for y in models.values() for x in y.values()]
embedding_sizes = {x:y.keys() for x,y in models.items()}
if embedding_size not in embedding_sizes[preTrainedModel]:
raise ValueError(f"Embedding size '{embedding_size}' not supported for {preTrainedModel}")
selected_model = models[preTrainedModel.lower()][embedding_size]
modelsPath = get_pretrained_model_path()
p = Path(modelsPath).glob('**/*')
modelsDownloaded = [x.name for x in p if x.name in supported_models]
local_file_path = None
if selected_model not in modelsDownloaded:
if preTrainedModel.lower() == "glove":
try:
location = Path(modelsPath)
local_file_path = location/f"glove.6B.{embedding_size}d.w2vformat.txt"
file_test, header_test = urllib.request.urlretrieve(f'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/glove.6B.{embedding_size}d.w2vformat.txt', local_file_path)
except Exception as e:
raise ValueError("Error: unable to download glove pretrained model, please try again or download it manually and placed it at {}. ".format(location)+str(e))
elif preTrainedModel.lower() == "fasttext":
try:
location = Path(modelsPath)
local_file_path = location/"wiki-news-300d-1M.vec.zip"
url = 'https://aion-pretrained-models.s3.ap-south-1.amazonaws.com/text/wiki-news-300d-1M.vec.zip'
file_test, header_test = urllib.request.urlretrieve(url, local_file_path)
with zipfile.ZipFile(local_file_path) as zip_ref:
zip_ref.extractall(location)
Path(local_file_path).unlink()
except Exception as e:
raise ValueError("Error: unable to download fastText pretrained model, please try again or download it manually and placed it at {}. ".format(location)+str(e))
return Path(modelsPath)/selected_model
def get_true_option(d, default_value=None):
if isinstance(d, dict):
for k,v in d.items():
if (isinstance(v, str) and v.lower() == 'true') or (isinstance(v, bool) and v == True):
return k
return default_value
def get_params(profiler):
pretrained_model = get_true_option(profiler.get('textConversionMethod', {}), 'Glove')
embedding_size = get_true_option(profiler['embeddingSize'][pretrained_model], 50)
pretrained_model = pretrained_model.lower()
if pretrained_model == 'fasttext':
embedding_size = 300
elif pretrained_model == 'glove':
sizes = {'default':300, '50d':50, '100d':100,'200d':200, '300d':300}
embedding_size = sizes[embedding_size]
keywords = profiler['KeyWords']
return "delhi dialysis", pretrained_model, embedding_size
def deploy(deploy_path, pretrained_model, embedding_size, output_columns,model_file, bert_length):
from AION.mlac.ml.core.imports import importModule
def create_predict(pretrained_model, embedding_size):
importer = importModule()
common_importes = [
{'module': 'sys', 'mod_from': None, 'mod_as': None},
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
local_importes = [
{'module': 'selector', 'mod_from': 'script.selector', 'mod_as': None},
{'module': 'inputprofiler', 'mod_from': 'script.inputprofiler', 'mod_as': None},
{'module': 'trained_model', 'mod_from': 'script.trained_model', 'mod_as': None},
{'module': 'summarize', 'mod_from': None, 'mod_as': None}
]
for mod in local_importes:
importer.addLocalModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
def predict(data):
try:
dataLocation = Path(data)
if not dataLocation.is_dir():
raise ValueError('Input should be a valid directory')
keywords_file = Path(__file__).parent/'keywordDataBase.csv'
if not keywords_file.exists():
raise ValueError('keywordDataBase.csv is missing in trained model output')
keywords_df = pd.read_csv(keywords_file)
if 'Keyword' not in keywords_df.columns:
raise ValueError('keywordDataBase.csv file in output folder is corrupt')
pretrained_type = '{pretrained_model.lower()}'
embedding_sz = {embedding_size}
keywords = keywords_df['Keyword'].tolist()
df = summarize.to_dataframe(dataLocation, keywords, pretrained_type, embedding_sz, train=False)
df0 = df.copy()
profilerobj = inputprofiler()
df = profilerobj.apply_profiler(df)
selectobj = selector()
df = selectobj.apply_selector(df)
modelobj = trained_model()
output = modelobj.predict(df,df0)
outputjson = {{"status":"SUCCESS","data":output}}
print("predictions:",outputjson)
except KeyError as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
except Exception as e:
output = {{"status":"FAIL","message":str(e).strip('"')}}
print("predictions:",json.dumps(output))
return (json.dumps(output))
if __name__ == "__main__":
output = predict(sys.argv[1])
"""
code = importer.getCode()
code += text
return code
def create_profiler(output_columns):
importer = importModule()
common_importes = [
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'}
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
class inputprofiler(object):
def __init__(self):
self.model = None
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if preprocess_path.exists():
self.model = joblib.load(preprocess_path)
else:
raise ValueError('Preprocess model not found')
def apply_profiler(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
if self.model:
df = self.model.transform(df)
if isinstance(df, scipy.sparse.spmatrix):
df = pd.DataFrame(df.toarray(), columns={output_columns})
else:
df = pd.DataFrame(df, columns={output_columns})
return(df)
"""
code = importer.getCode()
code += text
return code
def create_selector(output_columns):
importer = importModule()
common_importes = [
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'}
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
class selector(object):
def apply_selector(self,df):
df = df[{output_columns}]
return(df)
"""
code = importer.getCode()
code += text
return code
def create_train(model_file, bert_length):
importer = importModule()
common_importes = [
{'module': 'os', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'Summarizer', 'mod_from': 'summarizer', 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
text = f"""
class trained_model(object):
def __init__(self):
self.model = joblib.load(os.path.join(os.path.dirname(__file__),'..','model','{model_file}'))
def predict(self, X, df_org):
X = X.astype(np.float32)
df_org['predicted'] = pd.DataFrame(self.model.predict(X))
textToSum=""
for i in df_org.index:
if (df_org['predicted'][i] or df_org['Label'][i]) :
textToSum=textToSum + " " + df_org["File"][i]
bert_model = Summarizer()
bert_summary=bert_model(textToSum, min_length={bert_length})
return bert_summary
"""
code = importer.getCode()
code += text
return code
deploy_path = Path(deploy_path)
aion_prediction = deploy_path/'aion_predict.py'
profiler_file = deploy_path/'script'/'inputprofiler.py'
selector_file = deploy_path/'script'/'selector.py'
trainer_file = deploy_path/'script'/'trained_model.py'
with open(aion_prediction, 'w') as f:
f.write(create_predict(pretrained_model, embedding_size))
with open(profiler_file, 'w') as f:
f.write(create_profiler(output_columns))
with open(selector_file, 'w') as f:
f.write(create_selector(output_columns))
with open(trainer_file, 'w') as f:
f.write(create_train(model_file, bert_length))
cwf = Path(__file__)
shutil.copy(cwf, deploy_path/cwf.name)
# require dataLocation for reading files
# require deployLocation for saving keywords
# require pretrained model location
# require pretrained model type
# require keywwords
if __name__ == '__main__':
dataLocation = r'C:\\Harish\\aion\\task\\task\\summarization\\reference\\pdfs'
deployLocation = r'C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\uses'
pretrained_loc = r"C:\\Users\\vashistah\\AppData\\Local\\HCLT\\AION\\PreTrainedModels\\TextProcessing"
pretrained_type = 'glove'
keywords = 'delhi dialysis'
data = to_dataframe(dataLocation, keywords, pretrained_type,300, deployLocation, train=True)
print(data)
data.to_csv(Path(deployLocation)/'output.csv', index=False)<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for generating the feature_statistics proto from generic data.
The proto is used as input for the Overview visualization.
"""
from facets_overview.base_generic_feature_statistics_generator import BaseGenericFeatureStatisticsGenerator
import facets_overview.feature_statistics_pb2 as fs
class GenericFeatureStatisticsGenerator(BaseGenericFeatureStatisticsGenerator):
"""Generator of stats proto from generic data."""
def __init__(self):
BaseGenericFeatureStatisticsGenerator.__init__(
self |
, fs.FeatureNameStatistics, fs.DatasetFeatureStatisticsList,
fs.Histogram)
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: feature_statistics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='feature_statistics.proto',
package='featureStatistics',
syntax='proto3',
serialized_pb=_b('\\n\\x18\\x66\\x65\\x61ture_statistics.proto\\x12\\x11\\x66\\x65\\x61tureStatistics\\"]\\n\\x1c\\x44\\x61tasetFeatureStatisticsList\\x12=\\n\\x08\\x64\\x61tasets\\x18\\x01 \\x03(\\x0b\\x32+.featureStatistics.DatasetFeatureStatistics\\"\\x99\\x01\\n\\x18\\x44\\x61tasetFeatureStatistics\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x14\\n\\x0cnum_examples\\x18\\x02 \\x01(\\x04\\x12\\x1d\\n\\x15weighted_num_examples\\x18\\x04 \\x01(\\x01\\x12:\\n\\x08\\x66\\x65\\x61tures\\x18\\x03 \\x03(\\x0b\\x32(.featureStatistics.FeatureNameStatistics\\"\\x8b\\x03\\n\\x15\\x46\\x65\\x61tureNameStatistics\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12;\\n\\x04type\\x18\\x02 \\x01(\\x0e\\x32-.featureStatistics.FeatureNameStatistics.Type\\x12\\x39\\n\\tnum_stats\\x18\\x03 \\x01(\\x0b\\x32$.featureStatistics.NumericStatisticsH\\x00\\x12;\\n\\x0cstring_stats\\x18\\x04 \\x01(\\x0b\\x32#.featureStatistics.StringStatisticsH\\x00\\x12\\x39\\n\\x0b\\x62ytes_stats\\x18\\x05 \\x01(\\x0b\\x32\\".featureStatistics.BytesStatisticsH\\x00\\x12\\x38\\n\\x0c\\x63ustom_stats\\x18\\x06 \\x03(\\x0b\\x32\\".featureStatistics.CustomStatistic\\"1\\n\\x04Type\\x12\\x07\\n\\x03INT\\x10\\x00\\x12\\t\\n\\x05\\x46LOAT\\x10\\x01\\x12\\n\\n\\x06STRING\\x10\\x02\\x12\\t\\n\\x05\\x42YTES\\x10\\x03\\x42\\x07\\n\\x05stats\\"x\\n\\x18WeightedCommonStatistics\\x12\\x17\\n\\x0fnum_non_missing\\x18\\x01 \\x01(\\x01\\x12\\x13\\n\\x0bnum_missing\\x18\\x02 \\x01(\\x01\\x12\\x16\\n\\x0e\\x61vg_num_values\\x18\\x03 \\x01(\\x01\\x12\\x16\\n\\x0etot_num_values\\x18\\x04 \\x01(\\x01\\"w\\n\\x0f\\x43ustomStatistic\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x03num\\x18\\x02 \\x01(\\x01H\\x00\\x12\\r\\n\\x03str\\x18\\x03 \\x01(\\tH\\x00\\x12\\x31\\n\\thistogram\\x18\\x04 \\x01(\\x0b\\x32\\x1c.featureStatistics.HistogramH\\x00\\x42\\x05\\n\\x03val\\"\\xaa\\x02\\n\\x11NumericStatistics\\x12\\x39\\n\\x0c\\x63ommon_stats\\x18\\x01 \\x01(\\x0b\\x32#.featureStatistics.CommonStatistics\\x12\\x0c\\n\\x04mean\\x18\\x02 \\x01(\\x01\\x12\\x0f\\n\\x07std_dev\\x18\\x03 \\x01(\\x01\\x12\\x11\\n\\tnum_zeros\\x18\\x04 \\x01(\\x04\\x12\\x0b\\n\\x03min\\x18\\x05 \\x01(\\x01\\x12\\x0e\\n\\x06median\\x18\\x06 \\x01(\\x01\\x12\\x0b\\n\\x03max\\x18\\x07 \\x01(\\x01\\x12\\x30\\n\\nhistograms\\x18\\x08 \\x03(\\x0b\\x32\\x1c.featureStatistics.Histogram\\x12L\\n\\x16weighted_numeric_stats\\x18\\t \\x01(\\x0b\\x32,.featureStatistics.WeightedNumericStatistics\\"\\x8c\\x03\\n\\x10StringStatistics\\x12\\x39\\n\\x0c\\x63ommon_stats\\x18\\x01 \\x01(\\x0b\\x32#.featureStatistics.CommonStatistics\\x12\\x0e\\n\\x06unique\\x18\\x02 \\x01(\\x04\\x12\\x44\\n\\ntop_values\\x18\\x03 \\x03(\\x0b\\x32\\x30.featureStatistics.StringStatistics.FreqAndValue\\x12\\x12\\n\\navg_length\\x18\\x04 \\x01(\\x02\\x12\\x38\\n\\x0erank_histogram\\x18\\x05 \\x01(\\x0b\\x32 .featureStatistics.RankHistogram\\x12J\\n\\x15weighted_string_stats\\x18\\x06 \\x01(\\x0b\\x32+.featureStatistics.WeightedStringStatistics\\x1aM\\n\\x0c\\x46reqAndValue\\x12\\x1b\\n\\x0f\\x64\\x65precated_freq\\x18\\x01 \\x01(\\x04\\x42\\x02\\x18\\x01\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t\\x12\\x11\\n\\tfrequency\\x18\\x03 \\x01(\\x01\\"|\\n\\x19WeightedNumericStatistics\\x12\\x0c\\n\\x04mean\\x18\\x01 \\x01(\\x01\\x12\\x0f\\n\\x07std_dev\\x18\\x02 \\x01(\\x01\\x12\\x0e\\n\\x06median\\x18\\x03 \\x01(\\x01\\x12\\x30\\n\\nhistograms\\x18\\x04 \\x03(\\x0b\\x32\\x1c.featureStatistics.Histogram\\"\\x9a\\x01\\n\\x18WeightedStringStatistics\\x12\\x44\\n\\ntop_values\\x18\\x01 \\x03(\\x0b\\x32\\x30.featureStatistics.StringStatistics.FreqAndValue\\x12\\x38\\n\\x0erank_histogram\\x18\\x02 \\x01(\\x0b\\x32 .featureStatistics.RankHistogram\\"\\xa1\\x01\\n\\x0f\\x42ytesStatistics\\x12\\x39\\n\\x0c\\x63ommon_stats\\x18\\x01 \\x01(\\x0b\\x32#.featureStatistics.CommonStatistics\\x12\\x0e\\n\\x06unique\\x18\\x02 \\x01(\\x04\\x12\\x15\\n\\ravg_num_bytes\\x18\\x03 \\x01(\\x02\\x12\\x15\\n\\rmin_num_bytes\\x18\\x04 \\x01(\\x02\\x12\\x15\\n\\rmax_num_bytes\\x18\\x05 \\x01(\\x02\\"\\xed\\x02\\n\\x10\\x43ommonStatistics\\x12\\x17\\n\\x0fnum_non_missing\\x18\\x01 \\x01(\\x04\\x12\\x13\\n\\x0bnum_missing\\x18\\x02 \\x01(\\x04\\x12\\x16\\n\\x0emin_num_values\\x18\\x03 \\x01(\\x04\\x12\\x16\\n\\x0emax_num_values\\x18\\x04 \\x01(\\x04\\x12\\x16\\n\\x0e\\x61vg_num_values\\x18\\x05 \\x01(\\x02\\x12\\x16\\n\\x0etot_num_values\\x18\\x08 \\x01(\\x04\\x12:\\n\\x14num_values_histogram\\x18\\x06 \\x01(\\x0b\\x32\\x1c.featureStatistics.Histogram\\x12J\\n\\x15weighted_common_stats\\x18\\x07 \\x01(\\x0b\\x32+.featureStatistics.WeightedCommonStatistics\\x12\\x43\\n\\x1d\\x66\\x65\\x61ture_list_length_histogram\\x18\\t \\x01(\\x0b\\x32\\x1c.featureStatistics.Histogram\\"\\xc4\\x02\\n\\tHistogram\\x12\\x0f\\n\\x07num_nan\\x18\\x01 \\x01(\\x04\\x12\\x15\\n\\rnum_undefined\\x18\\x02 \\x01(\\x04\\x12\\x34\\n\\x07\\x62uckets\\x18\\x03 \\x03(\\x0b\\x32#.featureStatistics.Histogram.Bucket\\x12\\x38\\n\\x04type\\x18\\x04 \\x01(\\x0e\\x32*.featureStatistics.Histogram.HistogramType\\x12\\x0c\\n\\x04name\\x18\\x05 \\x01(\\t\\x1a\\x63\\n\\x06\\x42ucket\\x12\\x11\\n\\tlow_value\\x18\\x01 \\x01(\\x01\\x12\\x12\\n\\nhigh_value\\x18\\x02 \\x01(\\x01\\x12\\x1c\\n\\x10\\x64\\x65precated_count\\x18\\x03 \\x01(\\x04\\x42\\x02\\x18\\x01\\x12\\x14\\n\\x0csample_count\\x18\\x04 \\x01(\\x01\\",\\n\\rHistogramType\\x12\\x0c\\n\\x08STANDARD\\x10\\x00\\x12\\r\\n\\tQUANTILES\\x10\\x01\\"\\xc9\\x01\\n\\rRankHistogram\\x12\\x38\\n\\x07\\x62uckets\\x18\\x01 \\x03(\\x0b\\x32\\'.featureStatistics.RankHistogram.Bucket\\x12\\x0c\\n\\x04name\\x18\\x02 \\x01(\\t\\x1ap\\n\\x06\\x42ucket\\x12\\x10\\n\\x08low_rank\\x18\\x01 \\x01(\\x04\\x12\\x11\\n\\thigh_rank\\x18\\x02 \\x01(\\x04\\x12\\x1c\\n\\x10\\x64\\x65precated_count\\x18\\x03 \\x01(\\x04\\x42\\x02\\x18\\x01\\x12\\r\\n\\x05label\\x18\\x04 \\x01(\\t\\x12\\x14\\n\\x0csample_count\\x18\\x05 \\x01(\\x01\\x62\\x06proto3')
)
_FEATURENAMESTATISTICS_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='featureStatistics.FeatureNameStatistics.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BYTES', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
|
options=None,
serialized_start=636,
serialized_end=685,
)
_sym_db.RegisterEnumDescriptor(_FEATURENAMESTATISTICS_TYPE)
_HISTOGRAM_HISTOGRAMTYPE = _descriptor.EnumDescriptor(
name='HistogramType',
full_name='featureStatistics.Histogram.HistogramType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STANDARD', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUANTILES', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2735,
serialized_end=2779,
)
_sym_db.RegisterEnumDescriptor(_HISTOGRAM_HISTOGRAMTYPE)
_DATASETFEATURESTATISTICSLIST = _descriptor.Descriptor(
name='DatasetFeatureStatisticsList',
full_name='featureStatistics.DatasetFeatureStatisticsList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datasets', full_name='featureStatistics.DatasetFeatureStatisticsList.datasets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=140,
)
_DATASETFEATURESTATISTICS = _descriptor.Descriptor(
name='DatasetFeatureStatistics',
full_name='featureStatistics.DatasetFeatureStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='featureStatistics.DatasetFeatureStatistics.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_examples', full_name='featureStatistics.DatasetFeatureStatistics.num_examples', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weighted_num_examples', full_name='featureStatistics.DatasetFeatureStatistics.weighted_num_examples', index=2,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='features', full_name='featureStatistics.DatasetFeatureStatistics.features', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=296,
)
_FEATURENAMESTATISTICS = _descriptor.Descriptor(
name='FeatureNameStatistics',
full_name='featureStatistics.FeatureNameStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='featureStatistics.FeatureNameStatistics.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='featureStatistics.FeatureNameStatistics.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_stats', full_name='featureStatistics.FeatureNameStatistics.num_stats', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='string_stats', full_name='featureStatistics.FeatureNameStatistics.string_stats', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bytes_stats', full_name='featureStatistics.FeatureNameStatistics.bytes_stats', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='custom_stats', full_name='featureStatistics.FeatureNameStatistics.custom_stats', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEATURENAMESTATISTICS_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='stats', full_name='featureStatistics.FeatureNameStatistics.stats',
index=0, containing_type=None, fields=[]),
],
serialized_start=299,
serialized_end=694,
)
_WEIGHTEDCOMMONSTATISTICS = _descriptor.Descriptor(
name='WeightedCommonStatistics',
full_name='featureStatistics.WeightedCommonStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_non_missing', full_name='featureStatistics.WeightedCommonStatistics.num_non_missing', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_missing', full_name='featureStatistics.WeightedCommonStatistics.num_missing', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='avg_num_values', full_name='featureStatistics.WeightedCommonStatistics.avg_num_values', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tot_num_values', full_name='featureStatistics.WeightedCommonStatistics.tot_num_values', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=696,
serialized_end=816,
)
_CUSTOMSTATISTIC = _descriptor.Descriptor(
name='CustomStatistic',
full_name='featureStatistics.CustomStatistic',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='featureStatistics.CustomStatistic.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num', full_name='featureStatistics.CustomStatistic.num', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='str', full_name='featureStatistics.CustomStatistic.str', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='histogram', full_name='featureStatistics.CustomStatistic.histogram', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='val', full_name='featureStatistics.CustomStatistic.val',
index=0, containing_type=None, fields=[]),
],
serialized_start=818,
serialized_end=937,
)
_NUMERICSTATISTICS = _descriptor.Descriptor(
name='NumericStatistics',
full_name='featureStatistics.NumericStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='common_stats', full_name='featureStatistics.NumericStatistics.common_stats', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean', full_name='featureStatistics.NumericStatistics.mean', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='std_dev', full_name='featureStatistics.NumericStatistics.std_dev', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_zeros', full_name='featureStatistics.NumericStatistics.num_zeros', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min', full_name='featureStatistics.NumericStatistics.min', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='median', full_name='featureStatistics.NumericStatistics.median', index=5 |
,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='featureStatistics.NumericStatistics.max', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='histograms', full_name='featureStatistics.NumericStatistics.histograms', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weighted_numeric_stats', full_name='featureStatistics.NumericStatistics.weighted_numeric_stats', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=940,
serialized_end=1238,
)
_STRINGSTATISTICS_FREQANDVALUE = _descriptor.Descriptor(
name='FreqAndValue',
full_name='featureStatistics.StringStatistics.FreqAndValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated_freq', full_name='featureStatistics.StringStatistics.FreqAndValue.deprecated_freq', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))),
_descriptor.FieldDescriptor(
name='value', full_name='featureStatistics.StringStatistics.FreqAndValue.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='frequency', full_name='featureStatistics.StringStatistics.FreqAndValue.frequency', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1560,
serialized_end=1637,
)
_STRINGSTATISTICS = _descriptor.Descriptor(
name='StringStatistics',
full_name='featureStatistics.StringStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='common_stats', full_name='featureStatistics.StringStatistics.common_stats', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unique', full_name='featureStatistics.StringStatistics.unique', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='top_values', full_name='featureStatistics.StringStatistics.top_values', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='avg_length', full_name='featureStatistics.StringStatistics.avg_length', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rank_histogram', full_name='featureStatistics.StringStatistics.rank_histogram', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weighted_string_stats', full_name='featureStatistics.StringStatistics.weighted_string_stats', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_STRINGSTATISTICS_FREQANDVALUE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1241,
serialized_end=1637,
)
_WEIGHTEDNUMERICSTATISTICS = _descriptor.Descriptor(
name='WeightedNumericStatistics',
full_name='featureStatistics.WeightedNumericStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mean', full_name='featureStatistics.WeightedNumericStatistics.mean', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='std_dev', full_name='featureStatistics.WeightedNumericStatistics.std_dev', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='median', full_name='featureStatistics.WeightedNumericStatistics.median', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='histograms', full_name='featureStatistics.WeightedNumericStatistics.histograms', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1639,
serialized_end=1763,
)
_WEIGHTEDSTRINGSTATISTICS = _descriptor.Descriptor(
name='WeightedStringStatistics',
full_name='featureStatistics.WeightedStringStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='top_values', full_name='featureStatistics.WeightedStringStatistics.top_values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rank_histogram', full_name='featureStatistics.WeightedStringStatistics.rank_histogram', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1766,
serialized_end=1920,
)
_BYTESSTATISTICS = _descriptor.Descriptor(
name='BytesStatistics',
full_name='featureStatistics.BytesStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='common_stats', full_name='featureStatistics.BytesStatistics.common_stats', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unique', full_name='featureStatistics.BytesStatistics.unique', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='avg_num_bytes', full_name='featureStatistics.BytesStatistics.avg_num_bytes', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_num_bytes', full_name='featureStatistics.BytesStatistics.min_num_bytes', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_num_bytes', full_name='featureStatistics.BytesStatistics.max_num_bytes', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1923,
serialized_end=2084,
)
_COMMONSTATISTICS = _descriptor.Descriptor(
name='CommonStatistics',
full_name='featureStatistics.CommonStatistics',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_non_missing', full_name='featureStatistics.CommonStatistics.num_non_missing', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_missing', full_name='featureStatistics.CommonStatistics.num_missing', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_num_values', full_name='featureStatistics.CommonStatistics.min_num_values', index=2,
number=3, type=4, cpp_type=4 |
, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_num_values', full_name='featureStatistics.CommonStatistics.max_num_values', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='avg_num_values', full_name='featureStatistics.CommonStatistics.avg_num_values', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tot_num_values', full_name='featureStatistics.CommonStatistics.tot_num_values', index=5,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_values_histogram', full_name='featureStatistics.CommonStatistics.num_values_histogram', index=6,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weighted_common_stats', full_name='featureStatistics.CommonStatistics.weighted_common_stats', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_list_length_histogram', full_name='featureStatistics.CommonStatistics.feature_list_length_histogram', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2087,
serialized_end=2452,
)
_HISTOGRAM_BUCKET = _descriptor.Descriptor(
name='Bucket',
full_name='featureStatistics.Histogram.Bucket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='low_value', full_name='featureStatistics.Histogram.Bucket.low_value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high_value', full_name='featureStatistics.Histogram.Bucket.high_value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated_count', full_name='featureStatistics.Histogram.Bucket.deprecated_count', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))),
_descriptor.FieldDescriptor(
name='sample_count', full_name='featureStatistics.Histogram.Bucket.sample_count', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2634,
serialized_end=2733,
)
_HISTOGRAM = _descriptor.Descriptor(
name='Histogram',
full_name='featureStatistics.Histogram',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_nan', full_name='featureStatistics.Histogram.num_nan', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_undefined', full_name='featureStatistics.Histogram.num_undefined', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='buckets', full_name='featureStatistics.Histogram.buckets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='featureStatistics.Histogram.type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='featureStatistics.Histogram.name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_HISTOGRAM_BUCKET, ],
enum_types=[
_HISTOGRAM_HISTOGRAMTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2455,
serialized_end=2779,
)
_RANKHISTOGRAM_BUCKET = _descriptor.Descriptor(
name='Bucket',
full_name='featureStatistics.RankHistogram.Bucket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='low_rank', full_name='featureStatistics.RankHistogram.Bucket.low_rank', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='high_rank', full_name='featureStatistics.RankHistogram.Bucket.high_rank', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated_count', full_name='featureStatistics.RankHistogram.Bucket.deprecated_count', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))),
_descriptor.FieldDescriptor(
name='label', full_name='featureStatistics.RankHistogram.Bucket.label', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_count', full_name='featureStatistics.RankHistogram.Bucket.sample_count', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2871,
serialized_end=2983,
)
_RANKHISTOGRAM = _descriptor.Descriptor(
name='RankHistogram',
full_name='featureStatistics.RankHistogram',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='buckets', full_name='featureStatistics.RankHistogram.buckets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='featureStatistics.RankHistogram.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RANKHISTOGRAM_BUCKET, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2782,
serialized_end=2983,
)
_DATASETFEATURESTATISTICSLIST.fields_by_name['datasets'].message_type = _DATASETFEATURESTATISTICS
_DATASETFEATURESTATISTICS.fields_by_name['features'].message_type = _FEATURENAMESTATISTICS
_FEATURENAMESTATISTICS.fields_by_name['type'].enum_type = _FEATURENAMESTATISTICS_TYPE
_FEATURENAMESTATISTICS.fields_by_name['num_stats'].message_type = _NUMERICSTATISTICS
_FEATURENAMESTATISTICS.fields_by_name['string_stats'].message_type = _STRINGSTATISTICS
_FEATURENAMESTATISTICS.fields_by_name['bytes_stats'].message_type = _BYTESSTATISTICS
_FEATURENAMESTATISTICS.fields_by_name['custom_stats'].message_type = _CUSTOMSTATISTIC
_FEATURENAMESTATISTICS_TYPE.containing_type = _FEATURENAMESTATISTICS
_FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append(
_FEATURENAMESTATISTICS.fields_by_name['num_stats'])
_FEATURENAMESTATISTICS.fields_by_name['num_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats']
_FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append(
_FEATURENAMESTATISTICS.fields_by_name['string_stats'])
_FEATURENAMESTATISTICS.fields_by_name['string_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats']
_FEATURENAMESTATISTICS.oneofs_by_name['stats'].fields.append(
_FEATURENAMESTATISTICS.fields_by_name['bytes_stats'])
_FEATURENAMESTATISTICS.fields_by_name['bytes_stats'].containing_oneof = _FEATURENAMESTATISTICS.oneofs_by_name['stats']
_CUSTOMSTAT |
ISTIC.fields_by_name['histogram'].message_type = _HISTOGRAM
_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append(
_CUSTOMSTATISTIC.fields_by_name['num'])
_CUSTOMSTATISTIC.fields_by_name['num'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val']
_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append(
_CUSTOMSTATISTIC.fields_by_name['str'])
_CUSTOMSTATISTIC.fields_by_name['str'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val']
_CUSTOMSTATISTIC.oneofs_by_name['val'].fields.append(
_CUSTOMSTATISTIC.fields_by_name['histogram'])
_CUSTOMSTATISTIC.fields_by_name['histogram'].containing_oneof = _CUSTOMSTATISTIC.oneofs_by_name['val']
_NUMERICSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS
_NUMERICSTATISTICS.fields_by_name['histograms'].message_type = _HISTOGRAM
_NUMERICSTATISTICS.fields_by_name['weighted_numeric_stats'].message_type = _WEIGHTEDNUMERICSTATISTICS
_STRINGSTATISTICS_FREQANDVALUE.containing_type = _STRINGSTATISTICS
_STRINGSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS
_STRINGSTATISTICS.fields_by_name['top_values'].message_type = _STRINGSTATISTICS_FREQANDVALUE
_STRINGSTATISTICS.fields_by_name['rank_histogram'].message_type = _RANKHISTOGRAM
_STRINGSTATISTICS.fields_by_name['weighted_string_stats'].message_type = _WEIGHTEDSTRINGSTATISTICS
_WEIGHTEDNUMERICSTATISTICS.fields_by_name['histograms'].message_type = _HISTOGRAM
_WEIGHTEDSTRINGSTATISTICS.fields_by_name['top_values'].message_type = _STRINGSTATISTICS_FREQANDVALUE
_WEIGHTEDSTRINGSTATISTICS.fields_by_name['rank_histogram'].message_type = _RANKHISTOGRAM
_BYTESSTATISTICS.fields_by_name['common_stats'].message_type = _COMMONSTATISTICS
_COMMONSTATISTICS.fields_by_name['num_values_histogram'].message_type = _HISTOGRAM
_COMMONSTATISTICS.fields_by_name['weighted_common_stats'].message_type = _WEIGHTEDCOMMONSTATISTICS
_COMMONSTATISTICS.fields_by_name['feature_list_length_histogram'].message_type = _HISTOGRAM
_HISTOGRAM_BUCKET.containing_type = _HISTOGRAM
_HISTOGRAM.fields_by_name['buckets'].message_type = _HISTOGRAM_BUCKET
_HISTOGRAM.fields_by_name['type'].enum_type = _HISTOGRAM_HISTOGRAMTYPE
_HISTOGRAM_HISTOGRAMTYPE.containing_type = _HISTOGRAM
_RANKHISTOGRAM_BUCKET.containing_type = _RANKHISTOGRAM
_RANKHISTOGRAM.fields_by_name['buckets'].message_type = _RANKHISTOGRAM_BUCKET
DESCRIPTOR.message_types_by_name['DatasetFeatureStatisticsList'] = _DATASETFEATURESTATISTICSLIST
DESCRIPTOR.message_types_by_name['DatasetFeatureStatistics'] = _DATASETFEATURESTATISTICS
DESCRIPTOR.message_types_by_name['FeatureNameStatistics'] = _FEATURENAMESTATISTICS
DESCRIPTOR.message_types_by_name['WeightedCommonStatistics'] = _WEIGHTEDCOMMONSTATISTICS
DESCRIPTOR.message_types_by_name['CustomStatistic'] = _CUSTOMSTATISTIC
DESCRIPTOR.message_types_by_name['NumericStatistics'] = _NUMERICSTATISTICS
DESCRIPTOR.message_types_by_name['StringStatistics'] = _STRINGSTATISTICS
DESCRIPTOR.message_types_by_name['WeightedNumericStatistics'] = _WEIGHTEDNUMERICSTATISTICS
DESCRIPTOR.message_types_by_name['WeightedStringStatistics'] = _WEIGHTEDSTRINGSTATISTICS
DESCRIPTOR.message_types_by_name['BytesStatistics'] = _BYTESSTATISTICS
DESCRIPTOR.message_types_by_name['CommonStatistics'] = _COMMONSTATISTICS
DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM
DESCRIPTOR.message_types_by_name['RankHistogram'] = _RANKHISTOGRAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DatasetFeatureStatisticsList = _reflection.GeneratedProtocolMessageType('DatasetFeatureStatisticsList', (_message.Message,), dict(
DESCRIPTOR = _DATASETFEATURESTATISTICSLIST,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.DatasetFeatureStatisticsList)
))
_sym_db.RegisterMessage(DatasetFeatureStatisticsList)
DatasetFeatureStatistics = _reflection.GeneratedProtocolMessageType('DatasetFeatureStatistics', (_message.Message,), dict(
DESCRIPTOR = _DATASETFEATURESTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.DatasetFeatureStatistics)
))
_sym_db.RegisterMessage(DatasetFeatureStatistics)
FeatureNameStatistics = _reflection.GeneratedProtocolMessageType('FeatureNameStatistics', (_message.Message,), dict(
DESCRIPTOR = _FEATURENAMESTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.FeatureNameStatistics)
))
_sym_db.RegisterMessage(FeatureNameStatistics)
WeightedCommonStatistics = _reflection.GeneratedProtocolMessageType('WeightedCommonStatistics', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTEDCOMMONSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.WeightedCommonStatistics)
))
_sym_db.RegisterMessage(WeightedCommonStatistics)
CustomStatistic = _reflection.GeneratedProtocolMessageType('CustomStatistic', (_message.Message,), dict(
DESCRIPTOR = _CUSTOMSTATISTIC,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.CustomStatistic)
))
_sym_db.RegisterMessage(CustomStatistic)
NumericStatistics = _reflection.GeneratedProtocolMessageType('NumericStatistics', (_message.Message,), dict(
DESCRIPTOR = _NUMERICSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.NumericStatistics)
))
_sym_db.RegisterMessage(NumericStatistics)
StringStatistics = _reflection.GeneratedProtocolMessageType('StringStatistics', (_message.Message,), dict(
FreqAndValue = _reflection.GeneratedProtocolMessageType('FreqAndValue', (_message.Message,), dict(
DESCRIPTOR = _STRINGSTATISTICS_FREQANDVALUE,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.StringStatistics.FreqAndValue)
))
,
DESCRIPTOR = _STRINGSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.StringStatistics)
))
_sym_db.RegisterMessage(StringStatistics)
_sym_db.RegisterMessage(StringStatistics.FreqAndValue)
WeightedNumericStatistics = _reflection.GeneratedProtocolMessageType('WeightedNumericStatistics', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTEDNUMERICSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.WeightedNumericStatistics)
))
_sym_db.RegisterMessage(WeightedNumericStatistics)
WeightedStringStatistics = _reflection.GeneratedProtocolMessageType('WeightedStringStatistics', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTEDSTRINGSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.WeightedStringStatistics)
))
_sym_db.RegisterMessage(WeightedStringStatistics)
BytesStatistics = _reflection.GeneratedProtocolMessageType('BytesStatistics', (_message.Message,), dict(
DESCRIPTOR = _BYTESSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.BytesStatistics)
))
_sym_db.RegisterMessage(BytesStatistics)
CommonStatistics = _reflection.GeneratedProtocolMessageType('CommonStatistics', (_message.Message,), dict(
DESCRIPTOR = _COMMONSTATISTICS,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.CommonStatistics)
))
_sym_db.RegisterMessage(CommonStatistics)
Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict(
Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict(
DESCRIPTOR = _HISTOGRAM_BUCKET,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.Histogram.Bucket)
))
,
DESCRIPTOR = _HISTOGRAM,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.Histogram)
))
_sym_db.RegisterMessage(Histogram)
_sym_db.RegisterMessage(Histogram.Bucket)
RankHistogram = _reflection.GeneratedProtocolMessageType('RankHistogram', (_message.Message,), dict(
Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict(
DESCRIPTOR = _RANKHISTOGRAM_BUCKET,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.RankHistogram.Bucket)
))
,
DESCRIPTOR = _RANKHISTOGRAM,
__module__ = 'feature_statistics_pb2'
# @@protoc_insertion_point(class_scope:featureStatistics.RankHistogram)
))
_sym_db.RegisterMessage(RankHistogram)
_sym_db.RegisterMessage(RankHistogram.Bucket)
_STRINGSTATISTICS_FREQANDVALUE.fields_by_name['deprecated_freq'].has_options = True
_STRINGSTATISTICS_FREQANDVALUE.fields_by_name['deprecated_freq']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))
_HISTOGRAM_BUCKET.fields_by_name['deprecated_count'].has_options = True
_HISTOGRAM_BUCKET.fields_by_name['deprecated_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))
_RANKHISTOGRAM_BUCKET.fields_by_name['deprecated_count'].has_options = True
_RANKHISTOGRAM_BUCKET.fields_by_name['deprecated_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\\030\\001'))
# @@protoc_insertion_point(module_scope)
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for generating the feature_statistics proto.
The proto is used as input for the Overview visualization.
"""
from facets_overview.base_feature_statistics_generator import BaseFeatureStatisticsGenerator
import facets_overview.feature_statistics_pb2 as fs
class FeatureStatisticsGenerator(BaseFeatureStatisticsGenerator):
"""Generator of stats proto from TF data."""
def __init__(self):
BaseFeatureStatisticsGenerator.__init__(self, fs.FeatureNameStatistics,
fs.DatasetFeatureStatisticsList,
fs.Histogram)
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for generating the feature_statistics proto from generic data.
The proto is used as input for the Overview visualization.
"""
import numpy as np
import pandas as pd
import sys
class BaseGenericFeatureStatisticsGenerator(object):
"""Base class for generator of stats proto from |
generic data."""
def __init__(self, fs_proto, datasets_proto, histogram_proto):
self.fs_proto = fs_proto
self.datasets_proto = datasets_proto
self.histogram_proto = histogram_proto
def ProtoFromDataFrames(self, dataframes,
histogram_categorical_levels_count=None):
"""Creates a feature statistics proto from a set of pandas dataframes.
Args:
dataframes: A list of dicts describing tables for each dataset for the
proto. Each entry contains a 'table' field of the dataframe of the
data
and a 'name' field to identify the dataset in the proto.
histogram_categorical_levels_count: int, controls the maximum number of
levels to display in histograms for categorical features.
Useful to prevent codes/IDs features from bloating the stats object.
Defaults to None.
Returns:
The feature statistics proto for the provided tables.
"""
datasets = []
for dataframe in dataframes:
table = dataframe['table']
table_entries = {}
for col in table:
table_entries[col] = self.NdarrayToEntry(table[col])
datasets.append({
'entries': table_entries,
'size': len(table),
'name': dataframe['name']
})
return self.GetDatasetsProto(
datasets,
histogram_categorical_levels_count=histogram_categorical_levels_count)
def DtypeToType(self, dtype):
"""Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum."""
if dtype.char in np.typecodes['AllFloat']:
return self.fs_proto.FLOAT
elif (dtype.char in np.typecodes['AllInteger'] or dtype == bool or
np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64)):
return self.fs_proto.INT
else:
return self.fs_proto.STRING
def DtypeToNumberConverter(self, dtype):
"""Converts a Numpy dtype to a converter method if applicable.
The converter method takes in a numpy array of objects of the provided
dtype
and returns a numpy array of the numbers backing that object for
statistical
analysis. Returns None if no converter is necessary.
Args:
dtype: The numpy dtype to make a converter for.
Returns:
The converter method or None.
"""
if np.issubdtype(dtype, np.datetime64):
def DatetimesToNumbers(dt_list):
return np.array([pd.Timestamp(dt).value for dt in dt_list])
return DatetimesToNumbers
elif np.issubdtype(dtype, np.timedelta64):
def TimedetlasToNumbers(td_list):
return np.array([pd.Timedelta(td).value for td in td_list])
return TimedetlasToNumbers
else:
return None
def NdarrayToEntry(self, x):
"""Converts an ndarray to the Entry format."""
row_counts = []
for row in x:
try:
rc = np.count_nonzero(~np.isnan(row))
if rc != 0:
row_counts.append(rc)
except TypeError:
try:
row_counts.append(row.size)
except AttributeError:
row_counts.append(1)
data_type = self.DtypeToType(x.dtype)
converter = self.DtypeToNumberConverter(x.dtype)
flattened = x.ravel()
orig_size = len(flattened)
# Remove all None and nan values and count how many were removed.
flattened = flattened[flattened != np.array(None)]
if converter:
flattened = converter(flattened)
if data_type == self.fs_proto.STRING:
flattened_temp = []
for x in flattened:
try:
if str(x) != 'nan':
flattened_temp.append(x)
except UnicodeEncodeError:
if x.encode('utf-8') != 'nan':
flattened_temp.append(x)
flattened = flattened_temp
else:
flattened = flattened[~np.isnan(flattened)].tolist()
missing = orig_size - len(flattened)
return {
'vals': flattened,
'counts': row_counts,
'missing': missing,
'type': data_type
}
def GetDatasetsProto(self, datasets, features=None,
histogram_categorical_levels_count=None):
"""Generates the feature stats proto from dictionaries of feature values.
Args:
datasets: An array of dictionaries, one per dataset, each one containing:
- 'entries': The dictionary of features in the dataset from the parsed
examples.
- 'size': The number of examples parsed for the dataset.
- 'name': The name of the dataset.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
histogram_categorical_levels_count: int, controls the maximum number of
levels to display in histograms for categorical features.
Useful to prevent codes/IDs features from bloating the stats object.
Defaults to None.
Returns:
The feature statistics proto for the provided datasets.
"""
features_seen = set()
whitelist_features = set(features) if features else None
all_datasets = self.datasets_proto()
# TODO(jwexler): Add ability to generate weighted feature stats
# if there is a specified weight feature in the dataset.
# Initialize each dataset
for dataset in datasets:
all_datasets.datasets.add(
name=dataset['name'], num_examples=dataset['size'])
# This outer loop ensures that for each feature seen in any of the provided
# datasets, we check the feature once against all datasets.
for outer_dataset in datasets:
for key, value in outer_dataset['entries'].items():
# If we have a feature whitelist and this feature is not in the
# whitelist then do not process it.
# If we have processed this feature already, no need to do it again.
if ((whitelist_features and key not in whitelist_features) or
key in features_seen):
continue
features_seen.add(key)
# Default to type int if no type is found, so that the fact that all
# values are missing from this feature can be displayed.
feature_type = value['type'] if 'type' in value else self.fs_proto.INT
# Process the found feature for each dataset.
for j, dataset in enumerate(datasets):
feat = all_datasets.datasets[j].features.add(
type=feature_type, name=key.encode('utf-8'))
value = dataset['entries'].get(key)
has_data = value is not None and (value['vals'].size != 0
if isinstance(
value['vals'], np.ndarray) else
value['vals'])
commonstats = None
# For numeric features, calculate numeric statistics.
if feat.type in (self.fs_proto.INT, self.fs_proto.FLOAT):
featstats = feat.num_stats
commonstats = featstats.common_stats
if has_data:
nums = value['vals']
featstats.std_dev = np.std(nums).item()
featstats.mean = np.mean(nums).item()
featstats.min = np.min(nums).item()
featstats.max = np.max(nums).item()
featstats.median = np.median(nums).item()
featstats.num_zeros = len(nums) - np.count_nonzero(nums)
nums = np.array(nums)
num_nan = len(nums[np.isnan(nums)])
num_posinf = len(nums[np.isposinf(nums)])
num_neginf = len(nums[np.isneginf(nums)])
# Remove all non-finite (including NaN) values from the numeric
# values in order to calculate histogram buckets/counts. The
# inf values will be added back to the first and last buckets.
nums = nums[np.isfinite(nums)]
counts, buckets = np.histogram(nums)
hist = featstats.histograms.add()
hist.type = self.histogram_proto.STANDARD
hist.num_nan = num_nan
for bucket_count in range(len(counts)):
bucket = hist.buckets.add(
low_value=buckets[bucket_count],
high_value=buckets[bucket_count + 1],
sample_count=counts[bucket_count].item())
# Add any negative or positive infinities to the first and last
# buckets in the histogram.
if bucket_count == 0 and num_neginf > 0:
bucket.low_value = float('-inf')
bucket.sample_count += num_neginf
elif bucket_count == len(counts) - 1 and num_posinf > 0:
bucket.high_value = float('inf')
bucket.sample_count += num_posinf
if not hist.buckets:
if num_neginf:
hist.buckets.add(
low_value=float('-inf'),
high_value=float('-inf'),
sample_count=num_neginf)
if num_posinf:
hist.buckets.add(
low_value=float('inf'),
high_value=float('inf'),
sample_count=num_posinf)
self._PopulateQuantilesHistogram(featstats.histograms.add(),nums.tolist())
elif feat.type == self.fs_proto.STRING:
featstats = feat.string_stats
commonstats = featstats.common_stats
if has_data:
strs = []
for item in value['vals']:
strs.append(item if hasattr(item, '__len__') else
item.encode('utf-8') if hasattr(item, 'encode') else str(
item))
featstats.avg_length = np.mean(np.vectorize(len)(strs))
vals, counts = np.unique(strs, return_counts=True)
featstats.unique = len(vals)
sorted_vals = sorted(zip(counts, vals), reverse=True)
sorted_vals = sorted_vals[:histogram_categorical_levels_count]
for val_index, val in enumerate(sorted_vals):
try:
if (sys.version_info.major < 3 or
isinstance(val[1], (bytes, bytearray))):
printable_val = val[1].decode('UTF-8', 'strict')
else:
printable_val = val[1]
except (UnicodeDecodeError, UnicodeEncodeError):
printable_val = '__BYTES_VALUE__'
bucket = featstats.rank_histogram.buckets.add(
low_rank=val_index,
high_rank=val_index,
sample_count=(val[0].item()),
label=printable_val)
if val_index < 2:
featstats.top_values.add(
value=bucket.label, frequency=bucket.sample_count)
# Add the common stats regardless of the feature type.
if has_data:
commonstats.num_missing = value['missing']
commonstats.num_non_missing = (all_datasets.datasets[j].num_examples
- featstats.common_stats.num_missing)
commonstats.min_num_values = int(np.min(value['counts']).astype(int))
commonstats.max_num_values = int(np.max(value['counts']).astype(int))
commonstats.avg_num_values = np.mean(value['counts'])
if 'feat_lens' in value and value['feat_lens']:
self._PopulateQuantilesHistogram(
commonstats.feature_list_length_histogram, value['feat_lens'])
self._PopulateQuantilesHistogram(commonstats.num_values_histogram,
value['counts'])
else:
commonstats.num_non_missing = 0
commonstats.num_missing = all_datasets.datasets[j].num_examples
return all_datasets
def _PopulateQuantilesHistogram(self, hist, nums):
"""Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
"""
if not nums:
return
num_quantile_buckets = 10
quantiles_to_get = [
x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)
]
try:
quantiles = np.percentile(nums, quantiles_to_get)
except:
quantiles = [0.0]
hist.type = self.histogram_proto.QUANTILES
quantiles_sample_count = float(len(nums)) / num_quantile_buckets
for low, high in zip(quantiles, quantiles[1:]):
hist.buckets.add(
low_value=low, high_value=high, sample_count=quantiles_sample_count)
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
import numpy as np
import pandas as pd
from tensorflow.python.platform import googletest
class GenericFeatureStatisticsGeneratorTest(googletest.TestCase):
def setUp(self):
self.gfsg = GenericFeatureStatisticsGenerator()
def testProtoFromDataFrames(self):
data = [[1, 'hi'], [2, 'hello'], [3, 'hi']]
df = pd.DataFrame(data, columns=['testFeatureInt', 'testFeatureString'])
dataframes = [{'table': df, 'name': 'testDataset'}]
p = self.gfsg.ProtoFromDataFrames(dataframes)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data |
.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(2, len(test_data.features))
if test_data.features[0].name == 'testFeatureInt':
numfeat = test_data.features[0]
stringfeat = test_data.features[1]
else:
numfeat = test_data.features[1]
stringfeat = test_data.features[0]
self.assertEqual('testFeatureInt', numfeat.name)
self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
self.assertEqual('testFeatureString', stringfeat.name)
self.assertEqual(self.gfsg.fs_proto.STRING, stringfeat.type)
self.assertEqual(2, stringfeat.string_stats.unique)
def testNdarrayToEntry(self):
arr = np.array([1.0, 2.0, None, float('nan'), 3.0], dtype=float)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual(2, entry['missing'])
arr = np.array(['a', 'b', float('nan'), 'c'], dtype=str)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual(1, entry['missing'])
def testNdarrayToEntryTimeTypes(self):
arr = np.array(
[np.datetime64('2005-02-25'),
np.datetime64('2006-02-25')],
dtype=np.datetime64)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual([1109289600000000000, 1140825600000000000], entry['vals'])
arr = np.array(
[np.datetime64('2009-01-01') - np.datetime64('2008-01-01')],
dtype=np.timedelta64)
entry = self.gfsg.NdarrayToEntry(arr)
self.assertEqual([31622400000000000], entry['vals'])
def testDTypeToType(self):
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.int32)))
# Boolean and time types treated as int
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.bool)))
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.datetime64)))
self.assertEqual(self.gfsg.fs_proto.INT,
self.gfsg.DtypeToType(np.dtype(np.timedelta64)))
self.assertEqual(self.gfsg.fs_proto.FLOAT,
self.gfsg.DtypeToType(np.dtype(np.float32)))
self.assertEqual(self.gfsg.fs_proto.STRING,
self.gfsg.DtypeToType(np.dtype(np.str)))
# Unsupported types treated as string for now
self.assertEqual(self.gfsg.fs_proto.STRING,
self.gfsg.DtypeToType(np.dtype(np.void)))
def testGetDatasetsProtoFromEntriesLists(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(self.gfsg.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
hist = numfeat.num_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.3, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(.3, buckets[9].sample_count)
def testGetDatasetsProtoSequenceExampleHistogram(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 2, 3],
'counts': [1, 2, 1],
'feat_lens': [1, 2, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets)
hist = p.datasets[0].features[
0].num_stats.common_stats.feature_list_length_histogram
buckets = hist.buckets
self.assertEqual(self.gfsg.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.3, buckets[0].sample_count)
self.assertEqual(1.8, buckets[9].low_value)
self.assertEqual(2, buckets[9].high_value)
self.assertEqual(.3, buckets[9].sample_count)
def testGetDatasetsProtoWithWhitelist(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.gfsg.fs_proto.INT
}
entries['ignoreFeature'] = {
'vals': [5, 6],
'counts': [1, 1],
'missing': 1,
'type': self.gfsg.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.gfsg.GetDatasetsProto(datasets, features=['testFeature'])
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(1, numfeat.num_stats.min)
def testGetDatasetsProtoWithMaxHistigramLevelsCount(self):
# Selected entries' lengths make it easy to compute average length
data = [['hi'], ['good'], ['hi'], ['hi'], ['a'], ['a']]
df = pd.DataFrame(data, columns=['testFeatureString'])
dataframes = [{'table': df, 'name': 'testDataset'}]
# Getting proto from ProtoFromDataFrames instead of GetDatasetsProto
# directly to avoid any hand written values ex: size of dataset.
p = self.gfsg.ProtoFromDataFrames(dataframes,
histogram_categorical_levels_count=2)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(6, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeatureString', numfeat.name)
top_values = numfeat.string_stats.top_values
self.assertEqual(3, top_values[0].frequency)
self.assertEqual('hi', top_values[0].value)
self.assertEqual(3, numfeat.string_stats.unique)
self.assertEqual(2, numfeat.string_stats.avg_length)
rank_hist = numfeat.string_stats.rank_histogram
buckets = rank_hist.buckets
self.assertEqual(2, len(buckets))
self.assertEqual('hi', buckets[0].label)
self.assertEqual(3, buckets[0].sample_count)
self.assertEqual('a', buckets[1].label)
self.assertEqual(2, buckets[1].sample_count)
if __name__ == '__main__':
googletest.main()
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from facets_overview.feature_statistics_generator import FeatureStatisticsGenerator
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import googletest
class FeatureStatisticsGeneratorTest(googletest.TestCase):
def setUp(self):
self.fs = FeatureStatisticsGenerator()
def testParseExampleInt(self):
# Tests parsing examples of integers
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
self.assertEqual(1, len(entries))
self.assertIn('num', entries)
info = entries['num']
self.assertEqual(0, info['missing'])
self.assertEqual(self.fs.fs_proto.INT, info['type'])
for i in range(len(examples)):
self.assertEqual(1, info['counts'][i])
self.assertEqual(i, info['vals'][i])
def testParseExampleMissingValueList(self):
# Tests parsing examples of integers
examples = []
example = tf.train.Example()
# pylint: disable=pointless-statement
example.features.feature['str']
# pylint: enable=pointless-statement
examples.append(example)
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'test')
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
self.assertEqual(1, len(entries))
self.assertIn('str', entries)
info = entries['str']
self.assertEqual(1, info['missing'])
self.assertEqual(self.fs.fs_proto.STRING, info['type'])
self.assertEqual(0, info['counts'][0])
self.assertEqual(1, info['counts'][1])
def _check_sequence_example_entries(self,
entries,
n_examples,
n_features,
feat_len=None):
self.assertIn('num', entries)
info = entries['num']
self.assertEqual(0, info['missing'])
self.assertEqual(self.fs.fs_proto.INT, info['type'])
for i in range(n_examples):
self.assertEqual(n_features, info['counts'][i])
if feat_len is not None:
self.assertEqual(feat_len, info['feat_lens'][i])
for i in range(n_examples * n_features):
self.assertEqual(i, info['vals'][i])
if feat_len is None:
self.assertEqual(0, len(info['feat_lens']))
def testParseExampleSequenceContext(self):
# Tests parsing examples of integers in context field
examples = []
for i in range(50):
example = tf.train.SequenceExample()
example.context.feature['num'].int64_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 50, 1)
self.assertEqual(1, len(entries))
def testParseExampleSequenceFeatureList(self):
examples = []
for i in range(50):
example = tf.train.SequenceExample()
feat = example.feature_lists.feature_list['num'].feature.add()
feat.int64_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 50, 1, 1)
def testParseExampleSequenceFeatureListMultipleEntriesInner(self):
examples = []
for i in range(2):
example = tf.train.SequenceExample()
feat = example.feature_lists.feature_list['num'].feature.add()
for j in range(25):
feat.int64_list.value.append(i * 25 + j)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example |
.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 2, 25, 1)
def testParseExampleSequenceFeatureListMultipleEntriesOuter(self):
# Tests parsing examples of integers in context field
examples = []
for i in range(2):
example = tf.train.SequenceExample()
for j in range(25):
feat = example.feature_lists.feature_list['num'].feature.add()
feat.int64_list.value.append(i * 25 + j)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.context.feature,
example.feature_lists.feature_list, entries, i)
self._check_sequence_example_entries(entries, 2, 25, 25)
def testVaryingCountsAndMissing(self):
# Tests parsing examples of when some examples have missing features
examples = []
for i in range(5):
example = tf.train.Example()
example.features.feature['other'].int64_list.value.append(0)
for _ in range(i):
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
example = tf.train.Example()
example.features.feature['other'].int64_list.value.append(0)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
info = entries['num']
self.assertEqual(2, info['missing'])
self.assertEqual(4, len(info['counts']))
for i in range(4):
self.assertEqual(i + 1, info['counts'][i])
self.assertEqual(10, len(info['vals']))
def testParseExampleStringsAndFloats(self):
# Tests parsing examples of string and float features
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hi')
example.features.feature['float'].float_list.value.append(i)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
self.assertEqual(2, len(entries))
self.assertEqual(self.fs.fs_proto.FLOAT, entries['float']['type'])
self.assertEqual(self.fs.fs_proto.STRING, entries['str']['type'])
for i in range(len(examples)):
self.assertEqual(1, entries['str']['counts'][i])
self.assertEqual(1, entries['float']['counts'][i])
self.assertEqual(i, entries['float']['vals'][i])
self.assertEqual('hi', entries['str']['vals'][i].decode(
'UTF-8', 'strict'))
def testParseExamplesTypeMismatch(self):
examples = []
example = tf.train.Example()
example.features.feature['feat'].int64_list.value.append(0)
examples.append(example)
example = tf.train.Example()
example.features.feature['feat'].bytes_list.value.append(b'str')
examples.append(example)
entries = {}
self.fs._ParseExample(examples[0].features.feature, [], entries, 0)
with self.assertRaises(TypeError):
self.fs._ParseExample(examples[1].features.feature, [], entries, 1)
def testGetDatasetsProtoFromEntriesLists(self):
entries = {}
entries['testFeature'] = {
'vals': [1, 2, 3],
'counts': [1, 1, 1],
'missing': 0,
'type': self.fs.fs_proto.INT
}
datasets = [{'entries': entries, 'size': 3, 'name': 'testDataset'}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('testDataset', test_data.name)
self.assertEqual(3, test_data.num_examples)
self.assertEqual(1, len(test_data.features))
numfeat = test_data.features[0]
self.assertEqual('testFeature', numfeat.name)
self.assertEqual(self.fs.fs_proto.INT, numfeat.type)
self.assertEqual(1, numfeat.num_stats.min)
self.assertEqual(3, numfeat.num_stats.max)
def testGetProtoNums(self):
# Tests converting int examples into the feature stats proto
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
example = tf.train.Example()
example.features.feature['other'].int64_list.value.append(0)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('test', test_data.name)
self.assertEqual(51, test_data.num_examples)
numfeat = test_data.features[0] if (
test_data.features[0].name == 'num') else test_data.features[1]
self.assertEqual('num', numfeat.name)
self.assertEqual(self.fs.fs_proto.INT, numfeat.type)
self.assertEqual(0, numfeat.num_stats.min)
self.assertEqual(49, numfeat.num_stats.max)
self.assertEqual(24.5, numfeat.num_stats.mean)
self.assertEqual(24.5, numfeat.num_stats.median)
self.assertEqual(1, numfeat.num_stats.num_zeros)
self.assertAlmostEqual(14.430869689, numfeat.num_stats.std_dev, 4)
self.assertEqual(1, numfeat.num_stats.common_stats.num_missing)
self.assertEqual(50, numfeat.num_stats.common_stats.num_non_missing)
self.assertEqual(1, numfeat.num_stats.common_stats.min_num_values)
self.assertEqual(1, numfeat.num_stats.common_stats.max_num_values)
self.assertAlmostEqual(1, numfeat.num_stats.common_stats.avg_num_values, 4)
hist = numfeat.num_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(5, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(5, buckets[9].sample_count)
self.assertEqual(2, len(numfeat.num_stats.histograms))
buckets = numfeat.num_stats.histograms[0].buckets
self.assertEqual(self.fs.histogram_proto.STANDARD,
numfeat.num_stats.histograms[0].type)
self.assertEqual(10, len(buckets))
self.assertEqual(0, buckets[0].low_value)
self.assertEqual(4.9, buckets[0].high_value)
self.assertEqual(5, buckets[0].sample_count)
self.assertAlmostEqual(44.1, buckets[9].low_value)
self.assertEqual(49, buckets[9].high_value)
self.assertEqual(5, buckets[9].sample_count)
buckets = numfeat.num_stats.histograms[1].buckets
self.assertEqual(self.fs.histogram_proto.QUANTILES,
numfeat.num_stats.histograms[1].type)
self.assertEqual(10, len(buckets))
self.assertEqual(0, buckets[0].low_value)
self.assertEqual(4.9, buckets[0].high_value)
self.assertEqual(5, buckets[0].sample_count)
self.assertAlmostEqual(44.1, buckets[9].low_value)
self.assertEqual(49, buckets[9].high_value)
self.assertEqual(5, buckets[9].sample_count)
def testQuantiles(self):
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example)
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(100)
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
numfeat = p.datasets[0].features[0]
self.assertEqual(2, len(numfeat.num_stats.histograms))
self.assertEqual(self.fs.histogram_proto.QUANTILES,
numfeat.num_stats.histograms[1].type)
buckets = numfeat.num_stats.histograms[1].buckets
self.assertEqual(10, len(buckets))
self.assertEqual(0, buckets[0].low_value)
self.assertEqual(9.9, buckets[0].high_value)
self.assertEqual(10, buckets[0].sample_count)
self.assertEqual(100, buckets[9].low_value)
self.assertEqual(100, buckets[9].high_value)
self.assertEqual(10, buckets[9].sample_count)
def testInfinityAndNan(self):
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(i)
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('inf'))
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('-inf'))
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('nan'))
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
numfeat = p.datasets[0].features[0]
self.assertEqual('num', numfeat.name)
self.assertEqual(self.fs.fs_proto.FLOAT, numfeat.type)
self.assertTrue(np.isnan(numfeat.num_stats.min))
self.assertTrue(np.isnan(numfeat.num_stats.max))
self.assertTrue(np.isnan(numfeat.num_stats.mean))
self.assertTrue(np.isnan(numfeat.num_stats.median))
self.assertEqual(1, numfeat.num_stats.num_zeros)
self.assertTrue(np.isnan(numfeat.num_stats.std_dev))
self.assertEqual(53, numfeat.num_stats.common_stats.num_non_missing)
hist = buckets = numfeat.num_stats.histograms[0]
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type)
self.assertEqual(1, hist.num_nan)
self.assertEqual(10, len(buckets))
self.assertEqual(float('-inf'), buckets[0].low_value)
self.assertEqual(4.9, buckets[0].high_value)
self.assertEqual(6, buckets[0].sample_count)
self.assertEqual(44.1, buckets[9].low_value)
self.assertEqual(float('inf'), buckets[9].high_value)
self.assertEqual(6, buckets[9].sample_count)
def testInfinitysOnly(self):
examples = []
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('inf'))
examples.append(example)
example = tf.train.Example()
example.features.feature['num'].float_list.value.append(float('-inf'))
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
numfeat = p.datasets[0].features[0]
hist = buckets = numfeat.num_stats.histograms[0]
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.STANDARD, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(float('-inf'), buckets[0].low_value)
self.assertEqual(0.1, buckets[0].high_value)
self.assertEqual(1, buckets[0].sample_count)
self.assertEqual(0.9, buckets[9].low_value)
self.assertEqual(float('inf'), buckets[9].high_value)
self.assertEqual(1, buckets[9].sample_count)
def testGetProtoStrings(self):
# Tests converting string examples into the feature stats proto
examples = []
for i in range(2):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hello')
examples |
.append(example)
for i in range(3):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hi')
examples.append(example)
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'hey')
examples.append(example)
entries = {}
for i, example in enumerate(examples):
self.fs._ParseExample(example.features.feature, [], entries, i)
datasets = [{'entries': entries, 'size': len(examples), 'name': 'test'}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(1, len(p.datasets))
test_data = p.datasets[0]
self.assertEqual('test', test_data.name)
self.assertEqual(6, test_data.num_examples)
strfeat = test_data.features[0]
self.assertEqual('str', strfeat.name)
self.assertEqual(self.fs.fs_proto.STRING, strfeat.type)
self.assertEqual(3, strfeat.string_stats.unique)
self.assertAlmostEqual(19 / 6.0, strfeat.string_stats.avg_length, 4)
self.assertEqual(0, strfeat.string_stats.common_stats.num_missing)
self.assertEqual(6, strfeat.string_stats.common_stats.num_non_missing)
self.assertEqual(1, strfeat.string_stats.common_stats.min_num_values)
self.assertEqual(1, strfeat.string_stats.common_stats.max_num_values)
self.assertEqual(1, strfeat.string_stats.common_stats.avg_num_values)
hist = strfeat.string_stats.common_stats.num_values_histogram
buckets = hist.buckets
self.assertEqual(self.fs.histogram_proto.QUANTILES, hist.type)
self.assertEqual(10, len(buckets))
self.assertEqual(1, buckets[0].low_value)
self.assertEqual(1, buckets[0].high_value)
self.assertEqual(.6, buckets[0].sample_count)
self.assertEqual(1, buckets[9].low_value)
self.assertEqual(1, buckets[9].high_value)
self.assertEqual(.6, buckets[9].sample_count)
self.assertEqual(2, len(strfeat.string_stats.top_values))
self.assertEqual(3, strfeat.string_stats.top_values[0].frequency)
self.assertEqual('hi', strfeat.string_stats.top_values[0].value)
self.assertEqual(2, strfeat.string_stats.top_values[1].frequency)
self.assertEqual('hello', strfeat.string_stats.top_values[1].value)
buckets = strfeat.string_stats.rank_histogram.buckets
self.assertEqual(3, len(buckets))
self.assertEqual(0, buckets[0].low_rank)
self.assertEqual(0, buckets[0].high_rank)
self.assertEqual(3, buckets[0].sample_count)
self.assertEqual('hi', buckets[0].label)
self.assertEqual(2, buckets[2].low_rank)
self.assertEqual(2, buckets[2].high_rank)
self.assertEqual(1, buckets[2].sample_count)
self.assertEqual('hey', buckets[2].label)
def testGetProtoMultipleDatasets(self):
# Tests converting multiple datsets into the feature stats proto
# including ensuring feature order is consistent in the protos.
examples1 = []
for i in range(2):
example = tf.train.Example()
example.features.feature['str'].bytes_list.value.append(b'one')
example.features.feature['num'].int64_list.value.append(0)
examples1.append(example)
examples2 = []
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(1)
example.features.feature['str'].bytes_list.value.append(b'two')
examples2.append(example)
entries1 = {}
for i, example1 in enumerate(examples1):
self.fs._ParseExample(example1.features.feature, [], entries1, i)
entries2 = {}
for i, example2 in enumerate(examples2):
self.fs._ParseExample(example2.features.feature, [], entries2, i)
datasets = [{
'entries': entries1,
'size': len(examples1),
'name': 'test1'
}, {
'entries': entries2,
'size': len(examples2),
'name': 'test2'
}]
p = self.fs.GetDatasetsProto(datasets)
self.assertEqual(2, len(p.datasets))
test_data_1 = p.datasets[0]
self.assertEqual('test1', test_data_1.name)
self.assertEqual(2, test_data_1.num_examples)
num_feat_index = 0 if test_data_1.features[0].name == 'num' else 1
self.assertEqual(0, test_data_1.features[num_feat_index].num_stats.max)
test_data_2 = p.datasets[1]
self.assertEqual('test2', test_data_2.name)
self.assertEqual(1, test_data_2.num_examples)
self.assertEqual(1, test_data_2.features[num_feat_index].num_stats.max)
def testGetEntriesNoFiles(self):
features, num_examples = self.fs._GetEntries(['test'], 10,
lambda unused_path: [])
self.assertEqual(0, num_examples)
self.assertEqual({}, features)
@staticmethod
def get_example_iter():
def ex_iter(unused_filename):
examples = []
for i in range(50):
example = tf.train.Example()
example.features.feature['num'].int64_list.value.append(i)
examples.append(example.SerializeToString())
return examples
return ex_iter
def testGetEntries_one(self):
features, num_examples = self.fs._GetEntries(['test'], 1,
self.get_example_iter())
self.assertEqual(1, num_examples)
self.assertTrue('num' in features)
def testGetEntries_oneFile(self):
unused_features, num_examples = self.fs._GetEntries(['test'], 1000,
self.get_example_iter())
self.assertEqual(50, num_examples)
def testGetEntries_twoFiles(self):
unused_features, num_examples = self.fs._GetEntries(['test0', 'test1'],
1000,
self.get_example_iter())
self.assertEqual(100, num_examples)
def testGetEntries_stopInSecondFile(self):
unused_features, num_examples = self.fs._GetEntries([
'test@0', 'test@1', 'test@2', 'test@3', 'test@4', 'test@5', 'test@6',
'test@7', 'test@8', 'test@9'
], 75, self.get_example_iter())
self.assertEqual(75, num_examples)
if __name__ == '__main__':
googletest.main()
<s> # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for generating the feature_statistics proto from TensorFlow data.
The proto is used as input for the Overview visualization.
"""
from functools import partial
from facets_overview.base_generic_feature_statistics_generator import BaseGenericFeatureStatisticsGenerator
import tensorflow as tf
# The feature name used to track sequence length when analyzing
# tf.SequenceExamples.
SEQUENCE_LENGTH_FEATURE_NAME = 'sequence length (derived feature)'
class BaseFeatureStatisticsGenerator(BaseGenericFeatureStatisticsGenerator):
"""Base class for generator of stats proto from TF data."""
def __init__(self, fs_proto, datasets_proto, histogram_proto):
BaseGenericFeatureStatisticsGenerator.__init__(
self, fs_proto, datasets_proto, histogram_proto)
def ProtoFromTfRecordFiles(self,
files,
max_entries=10000,
features=None,
is_sequence=False,
iterator_options=None,
histogram_categorical_levels_count=None):
"""Creates a feature statistics proto from a set of TFRecord files.
Args:
files: A list of dicts describing files for each dataset for the proto.
Each
entry contains a 'path' field with the path to the TFRecord file on
disk
and a 'name' field to identify the dataset in the proto.
max_entries: The maximum number of examples to load from each dataset
in order to create the proto. Defaults to 10000.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
is_sequence: True if the input data from 'tables' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
histogram_categorical_levels_count: int, controls the maximum number of
levels to display in histograms for categorical features.
Useful to prevent codes/IDs features from bloating the stats object.
Defaults to None.
Returns:
The feature statistics proto for the provided files.
"""
datasets = []
for entry in files:
entries, size = self._GetTfRecordEntries(entry['path'], max_entries,
is_sequence, iterator_options)
datasets.append({'entries': entries, 'size': size, 'name': entry['name']})
return self.GetDatasetsProto(
datasets,
features,
histogram_categorical_levels_count)
def _ParseExample(self, example_features, example_feature_lists, entries,
index):
"""Parses data from an example, populating a dictionary of feature values.
Args:
example_features: A map of strings to tf.Features from the example.
example_feature_lists: A map of strings to tf.FeatureLists from the
example.
entries: A dictionary of all features parsed thus far and arrays of their
values. This is mutated by the function.
index: The index of the example to parse from a list of examples.
Raises:
TypeError: Raises an exception when a feature has inconsistent types
across
examples.
"""
features_seen = set()
for feature_list, is_feature in zip(
[example_features, example_feature_lists], [True, False]):
sequence_length = None
for feature_name in feature_list:
# If this feature has not been seen in previous examples, then
# initialize its entry into the entries dictionary.
if feature_name not in entries:
entries[feature_name] = {
'vals': [],
'counts': [],
'feat_lens': [],
'missing': index
}
feature_entry = entries[feature_name]
feature = feature_list[feature_name]
value_type = None
value_list = []
if is_feature:
# If parsing a tf.Feature, extract the type and values simply.
if feature.HasField('float_list'):
value_list = feature.float_list.value
value_type = self.fs_proto.FLOAT
elif feature.HasField('bytes_list'):
value_list = feature.bytes_list.value
value_type = self.fs_proto.STRING
elif feature.HasField('int64_list'):
value_list = feature.int64_list.value
value_type = self.fs_proto.INT
else:
# If parsing a tf.FeatureList, get the type and values by iterating
# over all Features in the FeatureList.
sequence_length = len(feature.feature)
if sequence_length != 0 and feature.feature[0].HasField('float_list'):
for feat in feature.feature:
for value in feat.float_list.value:
value_list.append(value)
value_type = self.fs_proto.FLOAT
elif sequence_length != 0 and feature.feature[0].HasField(
'bytes_list'):
for feat in feature.feature:
for value in feat.bytes_list.value:
value_list.append(value)
value_type = self.fs_proto.STRING
elif sequence_length != 0 and feature.feature[0].HasField(
'int64_list'):
for feat in feature.feature:
for value in feat.int64_list.value:
value_list.append(value)
value_type = self.fs_proto.INT
if value_type is not None:
if 'type' not in feature_entry:
feature_entry['type'] = value_type
elif feature_entry['type'] != value_type:
raise TypeError('type mismatch for feature ' + feature_name)
feature_entry['counts'].append(len(value_list))
feature_entry['vals'].extend(value_list)
if sequence_length is not None:
feature_entry['feat_lens'].append(sequence_length)
if value_list:
features_seen.add(feature_name)
# For all previously-seen features not found in this example, update the
# feature's missing value.
for f in entries:
fv = entries[f]
if f not in features_seen:
fv['missing'] += 1
def _GetEntries(self,
paths,
max_entries,
iterator_from_file,
is_sequence=False):
"""Extracts examples into a dictionary of feature values.
Args:
paths: A list of the paths to the files to parse.
max_entries: The maximum number of examples to load.
iterator_from_file: A method that takes a file path string and returns an
iterator to the examples in that file.
is_sequence: True if the input data from 'iterator_from_file' are
tf.SequenceExamples, False if tf.Examples. Defaults to false.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
|
"""
entries = {}
index = 0
for filepath in paths:
reader = iterator_from_file(filepath)
for record in reader:
if is_sequence:
sequence_example = tf.train.SequenceExample.FromString(record)
self._ParseExample(sequence_example.context.feature,
sequence_example.feature_lists.feature_list,
entries, index)
else:
self._ParseExample(
tf.train.Example.FromString(record).features.feature, [], entries,
index)
index += 1
if index == max_entries:
return entries, index
return entries, index
def _GetTfRecordEntries(self, path, max_entries, is_sequence,
iterator_options):
"""Extracts TFRecord examples into a dictionary of feature values.
Args:
path: The path to the TFRecord file(s).
max_entries: The maximum number of examples to load.
is_sequence: True if the input data from 'path' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
A tuple with two elements:
- A dictionary of all features parsed thus far and arrays of their
values.
- The number of examples parsed.
"""
return self._GetEntries([path], max_entries,
partial(
tf.compat.v1.io.tf_record_iterator,
options=iterator_options), is_sequence)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
from importlib.metadata import version
import sys
import os
def requirementfile(deploy_path,model,textFeatures,learner_type):
print('hola', model)
modules = ['pandas','numpy','alibi','matplotlib','joblib','shap','ipython','category_encoders','scikit-learn','word2number','flask_restful','evidently','Flask-Cors']
requires = ''
for mod in modules:
requires += f"{mod}=={version(mod)}\\n"
if len(textFeatures) > 0:
tmodules = ['spacy','nltk','textblob','demoji','beautifulsoup4','text-unidecode','pyspellchecker','contractions','protobuf']
for mod in tmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Extreme Gradient Boosting (XGBoost)':
mmodules = ['xgboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Light Gradient Boosting (LightGBM)':
mmodules = ['lightgbm']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model == 'Categorical Boosting (CatBoost)':
mmodules = ['catboost']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'arima':
mmodules = ['pmdarima']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'fbprophet':
mmodules = ['prophet']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'lstm' or model.lower() == 'mlp' or learner_type =='DL':
mmodules = ['tensorflow']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() in ['cox', 'kaplanmeierfitter']: #bug 12833
mmodules = ['lifelines']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
if model.lower() == 'sentencetransformer': #bug 12833
mmodules = ['sentence_transformers']
for mod in mmodules:
requires += f"{mod}=={version(mod)}\\n"
filename = os.path.join(deploy_path,'requirements.txt')
f = open(filename, "wb")
f.write(str(requires).encode('utf8'))
f.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
import sys
from AionConfigManager import AionConfigManager
from sklearn.externals import joblib
class edgeformats:
def __init__(self,deploy_path):
self.deploy_path = deploy_path
self.edge_deploy_path = os.path.join(deploy_path,"edge")
os.mkdir(self.edge_deploy_path)
def converttoedgedeployment(self,saved_model,edge_format,xtrain,model_type,iterName,iterVersion,features,profiled_data_file):
if edge_format == 'onnx':
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
xtrain = xtrain[features]
initial_type = [('float_input', FloatTensorType([None, xtrain.shape[1]]))]
filename = os.path.join(self.deploy_path,saved_model)
loaded_model = joblib.load(filename)
onx = convert_sklearn(loaded_model, initial_types=initial_type)
onnx_filename = os.path.join(self.edge_deploy_path, model_type + '_' + iterName + '_' + iterVersion + '.onnx')
with open(onnx_filename, "wb") as f:
f.write(onx.SerializeToString())
self.createedgeruntimeFile(onnx_filename,profiled_data_file,features)
def createedgeruntimeFile(self,onnx_filename,datafilepath,features):
runtimefilecontent = ''
runtimefilecontent += 'import pandas'
runtimefilecontent += '\\n'
runtimefilecontent += 'import numpy'
runtimefilecontent += '\\n'
runtimefilecontent += 'import sys'
runtimefilecontent += '\\n'
runtimefilecontent += 'import onnxruntime as rt'
runtimefilecontent += '\\n'
runtimefilecontent += 'def onnx_runtime_validation():'
runtimefilecontent += '\\n'
runtimefilecontent += ' modelfile = r"'+str(onnx_filename)+'"'
runtimefilecontent += '\\n'
runtimefilecontent += ' datafile = r"'+str(datafilepath)+'"'
runtimefilecontent += '\\n'
runtimefilecontent += ' dataframe = pandas.read_csv(datafile)'
runtimefilecontent += '\\n'
runtimefilecontent += ' dataframe = dataframe['+str(features)+']'
runtimefilecontent += '\\n'
runtimefilecontent += ' df = dataframe.head(8)'
runtimefilecontent += '\\n'
runtimefilecontent += ' dataset = df.values'
runtimefilecontent += '\\n'
runtimefilecontent += ' sess = rt.InferenceSession(modelfile)'
runtimefilecontent += '\\n'
runtimefilecontent += ' input_name = sess.get_inputs()[0].name'
runtimefilecontent += '\\n'
runtimefilecontent += ' label_name = sess.get_outputs()[0].name'
runtimefilecontent += '\\n'
runtimefilecontent += ' inputsize=sess.get_inputs()[0].shape'
runtimefilecontent += '\\n'
runtimefilecontent += ' XYZ = dataset[:,0:inputsize[1]].astype(float)'
runtimefilecontent += '\\n'
runtimefilecontent += ' pred_onx = sess.run([label_name], {input_name: XYZ.astype(numpy.float32)[0:8]})[0]'
runtimefilecontent += '\\n'
runtimefilecontent += ' df[\\'predictions\\'] = pred_onx'
runtimefilecontent += '\\n'
runtimefilecontent += ' result = df.to_json(orient="records")'
runtimefilecontent += '\\n'
runtimefilecontent += ' return(result)'
runtimefilecontent += '\\n'
runtimefilecontent += 'if __name__ == "__main__":'
runtimefilecontent += '\\n'
runtimefilecontent += ' output = onnx_runtime_validation()'
runtimefilecontent += '\\n'
runtimefilecontent += ' print("predictions:",output)'
filename = os.path.join(self.edge_deploy_path,'onnxvalidation.py')
f = open(filename, "w")
f.write(str(runtimefilecontent))
f.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import platform
import json
import shutil
import logging
class outputFormatter:
def __init__(self):
self.log = logging.getLogger('eion')
self.log.info('========> Inside Output Formatter')
def crate_output_format_file(self,deploy_path,learner_type,modelType,model,output_label,threshold,trained_data_file,dictDiffCount,targetFeature,features,datetimeFeature):
self.output_formatfile = 'import json'
self.output_formatfile += '\\n'
self.output_formatfile += 'import numpy as np'
self.output_formatfile += '\\n'
self.output_formatfile += 'import pandas as pd'
self.output_formatfile += '\\n'
self.output_formatfile += 'import os'
self.output_formatfile += '\\n'
self.output_formatfile += 'from pathlib import Path'
self.output_formatfile += '\\n'
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
self.output_formatfile += 'from script.aion_granularity import aion_gettimegranularity'
self.output_formatfile += '\\n'
self.output_formatfile += 'class output_format(object):'
self.output_formatfile += '\\n'
if(model == 'VAR'):
self.output_formatfile += ' def invertTransformation(self,predictions):'
self.output_formatfile += '\\n'
self.output_formatfile += ' datasetdf = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)),"..","data","trainingdata.csv"))'
self.output_formatfile += '\\n'
self.output_formatfile += ' dictDiffCount = '+str(dictDiffCount)
self.output_formatfile += '\\n'
self.output_formatfile += ' targetFeature = "'+str(targetFeature)+'"'
self.output_formatfile += '\\n'
self.output_formatfile += ' columns = targetFeature.split(",")'
self.output_formatfile += '\\n'
self.output_formatfile += ' pred = pd.DataFrame(index=range(0,len(predictions)),columns=columns)'
self.output_formatfile += '\\n'
self.output_formatfile += ' for j in range(0,len(columns)):'
self.output_formatfile += '\\n'
self.output_formatfile += ' for i in range(0, len(predictions)):'
self.output_formatfile += '\\n'
self.output_formatfile += ' pred.iloc[i][j] = round(predictions[i][j],2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction = pred'
self.output_formatfile += '\\n'
self.output_formatfile += ' for col in columns:'
self.output_formatfile += '\\n'
self.output_formatfile += ' if col in dictDiffCount:'
self.output_formatfile += '\\n'
self.output_formatfile += ' if dictDiffCount[col]==2:'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction[col] = (datasetdf[col].iloc[-1]-datasetdf[col].iloc[-2]) + prediction[col].cumsum()'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction[col] = datasetdf[col].iloc[-1] + prediction[col].cumsum()'
self.output_formatfile += '\\n'
self.output_formatfile += ' prediction = pred'
self.output_formatfile += '\\n'
self.output_formatfile += ' return(prediction)'
self.output_formatfile += '\\n'
self.log.info("op:modelType: \\n"+str(modelType))
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
# if modelType == 'anomaly_detection':
self.output_formatfile += ' def find_point_subsequence_anomalies(self,datetime_column,dataframe=None):'
self.output_formatfile += '\\n'
self.output_formatfile += ' try:'
self.output_formatfile += '\\n'
self.output_formatfile += ' dataframe[datetime_column] = pd.to_datetime(dataframe[datetime_column]) '
self.output_formatfile += '\\n'
self.output_formatfile += ' aion_gettimegranularity_obj=aion_gettimegranularity(dataframe,datetime_column) '
self.output_formatfile += '\\n'
self.output_formatfile += ' anomaly_info_df=aion_gettimegranularity_obj.get_granularity() '
self.output_formatfile += '\\n'
self.output_formatfile += ' except Exception as e:'
self.output_formatfile += '\\n'
self.output_formatfile += ' print(f"find_point_subsequence_anomalies,: aion_gettimegranularity err msg:{e} ")\\n'
self.output_formatfile += ' return anomaly_info_df'
self.output_formatfile += '\\n'
if((model.lower() in ['autoencoder','dbscan']) and modelType.lower()=="anomaly_detection"):
if (datetimeFeature! |
='' and datetimeFeature!='NA'):
self.output_formatfile += ' def apply_output_format(self,df,modeloutput,datetimeFeature):'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' |
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput.idxmax(axis=1)'
self.output_formatfile += '\\n'
if learner_type != 'DL':
self.output_formatfile += ' df[\\'probability\\'] = modeloutput.max(axis=1).round(2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'remarks\\'] = modeloutput.apply(lambda x: x.to_json(double_precision=2), axis=1)'
self.output_formatfile += '\\n'
else:
if model == 'COX':
self.output_formatfile += '\\n'
self.output_formatfile += ' modeloutput[0] = modeloutput[0].round(2)'
self.output_formatfile += '\\n'
#self.output_formatfile += ' modeloutput = modeloutput[0].to_json(orient=\\'records\\',double_precision=2)'
#self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput'
self.output_formatfile += '\\n'
else:
self.output_formatfile += ' df[\\'prediction\\'] = modeloutput[0]'
if(learner_type == 'objectDetection'):
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\']'
else:
self.output_formatfile += '\\n'
self.output_formatfile += ' df[\\'prediction\\'] = df[\\'prediction\\'].round(2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = df.to_json(orient=\\'records\\',double_precision=2)'
self.output_formatfile += '\\n'
self.output_formatfile += ' outputjson = {"status":"SUCCESS","data":json.loads(outputjson)}'
self.output_formatfile += '\\n'
self.output_formatfile += ' return(json.dumps(outputjson))'
filename = os.path.join(deploy_path,'script','output_format.py')
#print(deploy_path)
f = open(filename, "wb")
self.log.info('-------> Output Mapping File Location :'+filename)
f.write(str(self.output_formatfile).encode('utf8'))
f.close()<s> #task 11190: Item based Recommender system---Usnish
import os
def generate_recommender_code(deployPath):
code = """
import pandas as pd
import numpy as np
import os
ITEMID = 'itemId'
DATA_FOLDER = 'data'
USER_ITEM_MATRIX = 'user_item_matrix.csv'
ITEM_SIMILARITY_MATRIX = 'item_similarity_matrix.csv'
RATING = 'rating'
SIMILARITY_SCORE = 'similarity_score'
class collaborative_filter(object):
def __init__(self):
self.matrix = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, USER_ITEM_MATRIX),index_col=0)
self.matrix.index.name = ITEMID
self.item_similarity_cosine = pd.read_csv(os.path.join(os.path.dirname(__file__), '..', DATA_FOLDER, ITEM_SIMILARITY_MATRIX))
self.item_similarity_cosine.index.name = ITEMID
self.item_similarity_cosine.columns.name = ITEMID
def item_based_rec(self,picked_userid, number_of_recommendations,number_of_similar_items=5):
import operator
if not isinstance(picked_userid,str):
picked_userid = str(picked_userid)
if picked_userid not in self.matrix.columns:
raise KeyError("UserID Does Not Exist")
# Movies that the target user has not watched
try:
picked_userid_unwatched = pd.DataFrame(self.matrix[picked_userid].isna()).reset_index()
picked_userid_unwatched = picked_userid_unwatched[picked_userid_unwatched[picked_userid] == True][ITEMID].values.tolist()
# Movies that the target user has watched
picked_userid_watched = pd.DataFrame(self.matrix[picked_userid].dropna(axis=0, how='all') \\
.sort_values(ascending=False)) \\
.reset_index() \\
.rename(columns={picked_userid: 'rating'})
# Dictionary to save the unwatched movie and predicted rating pair
rating_prediction = {}
# Loop through unwatched movies
for picked_movie in picked_userid_unwatched:
if not isinstance(picked_movie,str):
picked_movie = str(picked_movie)
# Calculate the similarity score of the picked movie with other movies
try:
picked_movie_similarity_score = self.item_similarity_cosine[[picked_movie]].reset_index().rename(
columns={picked_movie: SIMILARITY_SCORE})
# Rank the similarities between the picked user watched movie and the picked unwatched movie.
picked_userid_watched_similarity = pd.merge(left=picked_userid_watched,
right=picked_movie_similarity_score,
on=ITEMID,
how='inner') \\
.sort_values(SIMILARITY_SCORE, ascending=False)[
:number_of_similar_items]
# Calculate the predicted rating using weighted average of similarity scores and the ratings from picked user
try:
predicted_rating = round(np.average(picked_userid_watched_similarity[RATING],weights=picked_userid_watched_similarity[SIMILARITY_SCORE]), 6)
except Exception as e:
predicted_rating = 0
# Save the predicted rating in the dictionary
rating_prediction[picked_movie] = predicted_rating
except Exception as e:
rating_prediction[picked_movie] = 0
# Return the top recommended movies
return sorted(rating_prediction.items(), key=operator.itemgetter(1), reverse=True)[:number_of_recommendations]
except Exception as e:
print(e)
raise KeyError(str(e))
def predict(self,X):
predictions = []
for index,row in X.iterrows():
score = self.item_based_rec(int(row["uid"]),int(row["numberOfRecommendation"]))
df = pd.DataFrame(score,columns=['ItemId','Ratings'])
predictions.append(df)
return predictions"""
filename = os.path.join(deployPath, 'script', 'item_recommendation.py')
# print(deploy_path)
f = open(filename, "wb")
f.write(str(code).encode('utf8'))
f.close()
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
from pathlib import Path
from AION.prediction_package.imports import importModule
from AION.prediction_package import utility
from AION.prediction_package.utility import TAB_CHAR
from importlib.metadata import version
"""
This file provide the functionality which is common for most of the
problem types deployment.
"""
def main_code():
return """
class predict():
def __init__(self):
self.profiler = inputprofiler()
self.selector = selector()
self.trainer = trainer()
self.formatter = output_format()
def run(self, data):
try:
df = self._parse_data(data)
raw_df = df.copy()
df = self.profiler.run(df)
df = self.selector.run(df)
df = self.trainer.run(df)
output = self.formatter.run(raw_df, df)
print("predictions:",output)
return (output)
except Exception as e:
output = {"status":"FAIL","message":str(e).strip('"')}
print("predictions:",json.dumps(output))
return (json.dumps(output))
def _parse_data(self, data):
file_path = Path(data)
if file_path.suffix == ".tsv":
df = pd.read_csv(data,encoding='utf-8',sep='\\\\t',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix in [".csv", ".dat"]:
df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix in [".gz"] and file_path.stem.endswith('.csv'):
df=pd.read_csv(data,encoding='utf-8',skipinitialspace = True,na_values=['-','?'])
elif file_path.suffix == ".json":
with open(data,'r',encoding='utf-8') as f:
jsonData = json.load(f)
df = pd.json_normalize(jsonData)
else:
jsonData = json.loads(data)
df = pd.json_normalize(jsonData)
return df
import sys
if __name__ == "__main__":
output = predict().run(sys.argv[1])
"""
def profiler_code(params, indent=0):
"""
This will create the profiler file based on the config file.
separated file is created as profiler is required for input drift also.
"""
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'scipy', 'mod_from': None, 'mod_as': None},
{'module': 'joblib', 'mod_from': None, 'mod_as': None},
{'module': 'numpy', 'mod_from': None, 'mod_as': 'np'},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None}
]
importer = importModule()
utility.import_modules(importer, imported_modules)
code = """
class inputprofiler():
"""
init_code = """
def __init__(self):
"""
if params.get('text_features'):
imported_modules.append({'module':'importlib.util'})
init_code += """
# preprocessing
preprocess_path = Path(__file__).parent.parent/'model'/'preprocess_pipe.pkl'
if not preprocess_path.exists():
raise ValueError(f'Preprocess model file not found: {preprocess_path}')
self.profiler = joblib.load(preprocess_path)
"""
run_code = """
def run(self,df):
df = df.replace(r'^\\s*$', np.NaN, regex=True)
"""
if params.get('input_features_type'):
imported_modules.append({'module':'dtype','mod_from':'numpy'})
run_code += f"""
df = df.astype({params.get('input_features_type')})
"""
if params.get('word2num_features'):
imported_modules.append({'module':'w2n','mod_from':'word2number'})
run_code += f"""
def s2n(value):
try:
x=eval(value)
return x
except:
try:
return w2n.word_to_num(value)
except:
return np.nan
df[{params['word2num_features']}] = df[{params['word2num_features']}].apply(lambda x: s2n(x))"""
if params.get('unpreprocessed_columns'):
run_code += f"""
unpreprocessed_data = df['{params['unpreprocessed_columns'][0]}']
df.drop(['{params['unpreprocessed_columns'][0]}'], axis=1,inplace=True)
"""
if params.get('force_numeric_conv'):
run_code += f"""
df[{params['force_numeric_conv']}] = df[{params['force_numeric_conv']}].apply(pd.to_numeric,errors='coerce')"""
if params.get('conversion_method','').lower() == 'glove':
code_text, modules = __profiler_glove_code(params)
imported_modules.extend( modules)
init_code += code_text
elif params.get('conversion_method','').lower() == 'fasttext':
init_code += __profiler_fasttext_code(params)
run_code += __profiler_main_code(params)
if params.get('unpreprocessed_columns'):
run_code += f"""
df['{params.get('unpreprocessed_columns')[0]}'] = unpreprocessed_data
"""
utility.import_modules(importer, imported_modules)
import_code = importer.getCode()
return import_code + code + init_code + run_code
def __profiler_glove_code(params, indent=2):
modules = []
modules.append({'module':'load_pretrained','mod_from':'text.Embedding'})
modules.append({'module':'TextProcessing','mod_from':'text'})
code = """
model_path = TextProcessing.checkAndDownloadPretrainedModel('glove')
embed_size, pretrained_model = load_pretrained(model_path)
self.profiler.set_params(text_process__vectorizer__external_model = pretrained_model)
"""
return code.replace('\\n', '\\n'+(indent * TAB_CHAR)), modules
def __profiler_fasttext_code(params, indent=2):
code = """
def get_pretrained_model_path():
try:
from AION.appbe.dataPath import DATA_DIR
modelsPath = Path(DATA_DIR)/'PreTrainedModels'/'TextProcessing'
except:
modelsPath = Path('aion')/'PreTrainedModels'/'TextProcessing'
if not modelsPath.exists():
modelsPath.mkdir(parents=True, exist_ok=True)
return modelsPath
if not importlib.util.find_spec('fasttext'):
raise ValueError('fastText not installed')
else:
import os
import fasttext
import fasttext.util
cwd = os.getcwd()
os.chdir(get_pretrained_model_path())
fasttext.util.download_ |