code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import pandas as pd
import numpy as np
import datetime as dt
import time
from dateutil.relativedelta import relativedelta
from scipy.stats import norm
from zeno_etl_libs.utils.ipc2.engine.data_load import LoadData
from zeno_etl_libs.utils.ipc2.engine.forecast import Forecast
from zeno_etl_libs.utils.ipc2.engine.feat_engg import Feature_Engg
from zeno_etl_libs.utils.ipc_pmf.ipc_drug_fcst.engine.data_pre_process import PreprocessData
from zeno_etl_libs.utils.ipc_pmf.ipc_drug_fcst.engine.segmentation import Segmentation
from zeno_etl_libs.utils.ipc_pmf.ipc_drug_fcst.engine.ts_fcst import TS_forecast
from zeno_etl_libs.utils.ipc_pmf.config_ipc_drug import *
def ipc_drug_forecast(store_id, reset_date, type_list, schema, db, logger):
store_id_list = ("({})").format(store_id) # for sql pass
last_date = dt.date(day=1, month=4, year=2019) # max history
# define empty variables in case of fail
weekly_fcst = pd.DataFrame()
ts_fcst = pd.DataFrame()
ts_fcst_cols = []
logger.info("Data Loading Started...")
data_load_obj = LoadData()
(
drug_list,
sales_history,
cfr_pr,
calendar,
first_bill_date
) = data_load_obj.load_all_input(
type_list=type_list,
store_id_list=store_id_list,
last_date=last_date,
reset_date=reset_date,
schema=schema,
db=db
)
logger.info("Data Pre Processing Started...")
data_prep_obj = PreprocessData()
(
drug_sales_4w_wtd,
drug_sales_latest_4w,
drug_sales_latest_12w,
train_4w_agg_vald_max_date,
sales_pred_4w_agg_vald,
train_vald_max_date,
sales_pred_vald,
sales_4w_agg,
sales_pred_4w_agg,
sales,
sales_pred,
cal_sales,
sales_daily
) = data_prep_obj.preprocess_all(
sales=sales_history,
drug_list=drug_list,
cfr_pr=cfr_pr,
calendar=calendar,
first_bill_date=first_bill_date,
last_date=last_date
)
train_max_date = sales[date_col].max()
end_date = sales_pred[date_col].max()
logger.info("Segmentation Started...")
seg_obj = Segmentation()
seg_df = seg_obj.get_weekly_segmentation(
df=sales.copy(deep=True),
df_sales_daily=sales_daily.copy(deep=True),
train_max_date=train_max_date,
end_date=end_date
)
seg_df['reset_date'] = str(reset_date)
# ========================================================================
# VALIDATION AND BEST MODEL FOR BUCKET SELECTION
# ========================================================================
# Find validation period actual demand
valid_start_date = train_max_date - relativedelta(weeks=4)
valid_period_demand = sales.loc[sales[date_col] > valid_start_date]
valid_period_demand = valid_period_demand.groupby(key_col,
as_index=False).agg(
{target_col: "sum"})
min_history_date_validation = valid_start_date - relativedelta(weeks=4)
df_min_date = sales_pred_vald.groupby(key_col, as_index=False).agg(
{date_col: 'min'})
df_min_date['min_allowed_date'] = min_history_date_validation
ts_ids_to_drop = \
df_min_date.loc[df_min_date['min_allowed_date'] < df_min_date[date_col]][
key_col].tolist()
# Perform Un-Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred_vald, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[
merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
merged_df1 = merged_df1[~merged_df1['ts_id'].isin(ts_ids_to_drop)]
# calculate bucket wise wmape
valid_wmape = {'Model': []}
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
valid_wmape[bucket] = []
if runs_ts_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast(
df=merged_df1.copy(),
train_max_date=train_vald_max_date,
forecast_start=train_vald_max_date + relativedelta(weeks=1))
for model in ts_fcst_cols:
df_model = ts_fcst[[key_col, 'Mixed', model]]
df_model = df_model.groupby(key_col, as_index=False).agg(
{'Mixed': 'first', model: 'sum'})
df_model = df_model.merge(valid_period_demand, on=key_col,
how='left')
df_model['error'] = df_model[model] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / \
df_bucket_wmape[target_col]
valid_wmape['Model'].append(model)
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = \
df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket][
'wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
if run_ml_flag == 1:
forecast_start = train_vald_max_date + relativedelta(weeks=1)
weekly_fcst = run_LGBM(merged_df1, train_vald_max_date, forecast_start,
logger, is_validation=True)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg(
{'preds_lgb': 'sum'})
df_model = lgbm_fcst.merge(valid_period_demand, on=key_col, how='left')
df_model = df_model.merge(seg_df[[key_col, 'Mixed']], how='left',
on='ts_id')
df_model['error'] = df_model['preds_lgb'] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / \
df_bucket_wmape[target_col]
valid_wmape['Model'].append('LGBM')
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket][
'wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
# Perform Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred_4w_agg_vald, seg_df, how='left',
on=['ts_id'])
merged_df1 = merged_df1[
merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
merged_df1 = merged_df1[~merged_df1['ts_id'].isin(ts_ids_to_drop)]
if run_ts_4w_agg_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast_agg(
df=merged_df1.copy(),
train_max_date=train_4w_agg_vald_max_date,
forecast_start=train_4w_agg_vald_max_date + relativedelta(weeks=1))
for model in ts_fcst_cols:
df_model = ts_fcst[[key_col, 'Mixed', model]]
df_model = df_model.groupby(key_col, as_index=False).agg(
{'Mixed': 'first', model: 'sum'})
df_model = df_model.merge(valid_period_demand, on=key_col,
how='left')
df_model['error'] = df_model[model] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / \
df_bucket_wmape[target_col]
valid_wmape['Model'].append(model)
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = \
df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket][
'wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
if run_ml_4w_agg_flag == 1:
forecast_start = train_4w_agg_vald_max_date + relativedelta(weeks=4)
weekly_fcst = run_LGBM(merged_df1, train_4w_agg_vald_max_date,
forecast_start,
logger, is_validation=True, agg_4w=True)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg(
{'preds_lgb': 'sum'})
df_model = lgbm_fcst.merge(valid_period_demand, on=key_col, how='left')
df_model = df_model.merge(seg_df[[key_col, 'Mixed']], how='left',
on='ts_id')
df_model['error'] = df_model['preds_lgb'] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / \
df_bucket_wmape[target_col]
valid_wmape['Model'].append('LGBM_4w_agg')
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket][
'wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
# ========================================================================
# Choose best model based on lowest wmape
# ========================================================================
best_bucket_model = {}
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
min_wmape = min(valid_wmape[bucket])
if min_wmape != np.inf:
best_bucket_model[bucket] = valid_wmape['Model'][
valid_wmape[bucket].index(min_wmape)]
else:
best_bucket_model[bucket] = default_model # default
# FIXED BUCKETS
# best_bucket_model['CY'] = 'LGBM'
# best_bucket_model['CZ'] = 'LGBM'
# best_bucket_model['DY'] = 'LGBM'
# best_bucket_model['DZ'] = 'LGBM'
# ========================================================================
# TRAINING AND FINAL FORECAST
# ========================================================================
# Perform Un-Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[
merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
if runs_ts_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast(
df=merged_df1.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
final_fcst = pd.DataFrame()
for model_fcst in ts_fcst_cols:
df_model_fcst = ts_fcst.groupby(key_col, as_index=False).agg(
{model_fcst: 'sum'})
df_model_fcst.rename({model_fcst: 'fcst'}, axis=1, inplace=True)
df_model_fcst['model'] = model_fcst
final_fcst = final_fcst.append(df_model_fcst)
if run_ml_flag == 1:
forecast_start = train_max_date + relativedelta(weeks=2)
weekly_fcst = run_LGBM(merged_df1, train_max_date, forecast_start,
logger, is_validation=False)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg(
{'preds_lgb': 'sum'})
lgbm_fcst.rename({'preds_lgb': 'fcst'}, axis=1, inplace=True)
lgbm_fcst['model'] = 'LGBM'
final_fcst = final_fcst.append(lgbm_fcst)
# Perform Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred_4w_agg, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[
merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
if run_ts_4w_agg_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast_agg(
df=merged_df1.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
for model_fcst in ts_fcst_cols:
df_model_fcst = ts_fcst.groupby(key_col, as_index=False).agg(
{model_fcst: 'sum'})
df_model_fcst.rename({model_fcst: 'fcst'}, axis=1, inplace=True)
df_model_fcst['model'] = model_fcst
final_fcst = final_fcst.append(df_model_fcst)
if run_ml_4w_agg_flag == 1:
forecast_start = train_max_date + relativedelta(weeks=2)
weekly_fcst = run_LGBM(merged_df1, train_max_date, forecast_start,
logger, is_validation=False, agg_4w=True)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg(
{'preds_lgb': 'sum'})
lgbm_fcst.rename({'preds_lgb': 'fcst'}, axis=1, inplace=True)
lgbm_fcst['model'] = 'LGBM_4w_agg'
final_fcst = final_fcst.append(lgbm_fcst)
final_fcst = final_fcst.merge(seg_df[[key_col, 'Mixed']], on=key_col,
how='left')
final_fcst.rename({'Mixed': 'bucket'}, axis=1, inplace=True)
# Choose buckets forecast of best models as final forecast
final_selected_fcst = pd.DataFrame()
for bucket in best_bucket_model.keys():
df_selected = final_fcst.loc[(final_fcst['bucket'] == bucket) &
(final_fcst['model'] == best_bucket_model[
bucket])]
final_selected_fcst = final_selected_fcst.append(df_selected)
# add comb rejected due to recent sales
list_all_comb = final_fcst[key_col].unique().tolist()
list_all_final_comb = final_selected_fcst[key_col].unique().tolist()
list_comb_rejects = list(set(list_all_comb) - set(list_all_final_comb))
comb_fcst_to_add = final_fcst.loc[
(final_fcst[key_col].isin(list_comb_rejects))
& (final_fcst["model"] == default_model)]
final_selected_fcst = final_selected_fcst.append(comb_fcst_to_add)
final_selected_fcst = final_selected_fcst.merge(seg_df[[key_col, 'std']],
on=key_col, how='left')
final_selected_fcst[[store_col, drug_col]] = final_selected_fcst[
'ts_id'].str.split('_', expand=True)
final_selected_fcst[store_col] = final_selected_fcst[store_col].astype(int)
model_name_map = {'preds_ETS_12w': 'ETS_12w', 'preds_ma': 'MA',
'preds_ETS_auto': 'ETS_auto', 'preds_croston': 'Croston',
'preds_ETS_4w_auto': 'ETS_4w_auto', 'preds_AE_ts': 'AvgTS',
'preds_prophet': 'Prophet'}
final_selected_fcst["model"] = final_selected_fcst["model"].map(
model_name_map).fillna(final_selected_fcst["model"])
return final_selected_fcst, seg_df, drug_sales_latest_12w, drug_sales_latest_4w, drug_sales_4w_wtd
def run_LGBM(merged_df1, train_max_date, forecast_start, logger, is_validation=False, agg_4w=False):
start_time = time.time()
merged_df1['All'] = 'All'
slice_col = 'All'
forecast_volume = merged_df1[merged_df1[date_col] > train_max_date][
target_col].sum()
assert forecast_volume == 0
logger.info(
"forecast start {} total volume: {}".format(forecast_start,
forecast_volume)
)
forecast_df = pd.DataFrame()
validation_df = pd.DataFrame()
weekly_fcst = pd.DataFrame()
if agg_4w:
end_range = 2
else:
end_range = 5
for i in range(1, end_range):
if is_validation:
num_shift_lags = i
else:
num_shift_lags = i + 1
# for group_name in merged_df1[slice_col].dropna.unique():
# slice_col = 'Mixed'
# for groups in [['AW', 'BW', 'CW', 'DW'], ['AX', 'BX', 'CX', 'DX'], ['AY', 'BY', 'CY', 'DY'], ['AZ', 'BZ', 'CZ', 'DZ']]:
for groups in ['All']:
logger.info('Group: {}'.format(groups))
logger.info("Feature Engineering Started...")
feat_df = pd.DataFrame()
for one_df in [merged_df1]:
feat_engg_obj = Feature_Engg()
one_feat_df = feat_engg_obj.feat_agg(
one_df[
one_df[slice_col] == groups
].drop(slice_col, axis=1).copy(deep=True),
train_max_date=train_max_date,
num_shift_lag=num_shift_lags
)
feat_df = pd.concat([one_feat_df, feat_df])
if pd.DataFrame(feat_df).empty:
continue
logger.info(
"Forecasting Started for {}...".format(forecast_start))
forecast_obj = Forecast()
fcst_df, val_df, Feature_Imp_all = forecast_obj.get_STM_forecast(
feat_df.copy(deep=True),
forecast_start=forecast_start,
num_shift_lags=num_shift_lags
)
forecast_df = pd.concat([forecast_df, fcst_df], axis=0)
validation_df = pd.concat([validation_df, val_df])
ml_fc_cols = [i for i in forecast_df.columns if
i.startswith('preds_')]
# forecast_df['AE'] = forecast_df[ml_fc_cols].mean(axis=1)
end_time = time.time()
logger.info(
"total time for {} forecast: {}"
.format(forecast_start, end_time - start_time)
)
forecast_start = forecast_start + relativedelta(weeks=1)
# weekly_fcst = pd.concat([weekly_fcst, forecast_df])
return forecast_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_drug_fcst/forecast_main.py | forecast_main.py |
import numpy as np
np.random.seed(0)
import pandas as pd
# import time
# import re
# from datetime import date
# from dateutil.relativedelta import relativedelta
# from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from prophet import Prophet
from statsmodels.tsa.api import ExponentialSmoothing
# import sktime
from zeno_etl_libs.utils.ipc2.helpers.helper_functions import \
applyParallel_croston
# from boruta import BorutaPy
from zeno_etl_libs.utils.ipc_pmf.config_ipc_drug import (
date_col,
target_col,
models_un_agg,
models_agg
)
import logging
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
class TS_forecast:
def train_test_split(self, df, train_max_date, forecast_start):
df.rename(columns={date_col: 'ds', target_col: 'y'}, inplace=True)
df.sort_values(by=['ds'], inplace=True)
train = df[df['ds'] <= train_max_date]
test = df[df['ds'] >= forecast_start]
return train, test
def Croston_TSB(self, ts, extra_periods=4, alpha=0.4, beta=0.4):
d = np.array(ts) # Transform the input into a numpy array
cols = len(d) # Historical period length
d = np.append(d, [
np.nan] * extra_periods) # Append np.nan into the demand array to cover future periods
# level (a), probability(p) and forecast (f)
a, p, f = np.full((3, cols + extra_periods), np.nan)
# Initialization
first_occurence = np.argmax(d[:cols] > 0)
a[0] = d[first_occurence]
p[0] = 1 / (1 + first_occurence)
f[0] = p[0] * a[0]
# Create all the t+1 forecasts
for t in range(0, cols):
if d[t] > 0:
a[t + 1] = alpha * d[t] + (1 - alpha) * a[t]
p[t + 1] = beta * (1) + (1 - beta) * p[t]
else:
a[t + 1] = a[t]
p[t + 1] = (1 - beta) * p[t]
f[t + 1] = p[t + 1] * a[t + 1]
# Future Forecast
a[cols + 1:cols + extra_periods] = a[cols]
p[cols + 1:cols + extra_periods] = p[cols]
f[cols + 1:cols + extra_periods] = f[cols]
df = pd.DataFrame.from_dict(
{"Demand": d, "Forecast": f, "Period": p, "Level": a,
"Error": d - f})
return df[-extra_periods:]
def ETS_forecast(self, train, test, latest_12w_fit=False):
try:
train.set_index(['ds'], inplace=True)
test.set_index(['ds'], inplace=True)
train.index.freq = train.index.inferred_freq
test.index.freq = test.index.inferred_freq
if latest_12w_fit:
train = train[-12:] # use only latest 3 months
fit = ExponentialSmoothing(train['y']).fit()
preds = fit.forecast(len(test) + 1)
preds = preds[-len(test):]
except Exception as e:
logger.info("error in ETS fcst")
logger.info(str(e))
preds = 0
return preds
def ma_forecast(self, data):
"""
Purpose: Compute MA forecast for the for the forecast horizon specified
Inputs: time series to create forecast
Output: series with forecasted values
"""
sma_df = data.copy(deep=True)
yhat = []
if len(data) >= 8:
for i in range(5):
sma_val = sma_df.rolling(8).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
else:
for i in range(5):
sma_val = sma_df.rolling(len(data)).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
logger.info(yhat)
return yhat[-4:]
def prophet_fcst(self, train, test, params=None):
# reg_list = []
try:
if params is None:
pro = Prophet()
else:
pro = Prophet(n_changepoints=params)
# for j in train.columns:
# if j not in col_list:
# pro.add_regressor(j)
# reg_list.append(j)
pro.fit(train[['ds', 'y']])
pred_f = pro.predict(test)
test = test[["ds", "y"]]
test = pd.merge(test, pred_f, on="ds", how="left")
except Exception as e:
logger.info("error in prophet fcst")
logger.info(str(e))
test['yhat'] = 0
return test
def ts_forecast_un_agg(self, df, train_max_date, forecast_start):
train, test = self.train_test_split(df, train_max_date=train_max_date,
forecast_start=forecast_start)
test = test.sort_values(by=['ds'])
if 'croston' in models_un_agg:
preds_croston = self.Croston_TSB(train['y'])
test['preds_croston'] = preds_croston['Forecast'].values
if 'ETS_Auto' in models_un_agg:
preds_ETS = self.ETS_forecast(train.copy(), test.copy())
try:
test['preds_ETS_auto'] = preds_ETS.values
except:
test['preds_ETS_auto'] = 0
if 'ETS_12w' in models_un_agg:
preds_ETS = self.ETS_forecast(train.copy(), test.copy(),
latest_12w_fit=True)
try:
test['preds_ETS_12w'] = preds_ETS.values
except:
test['preds_ETS_12w'] = 0
if 'MA' in models_un_agg:
preds_ma = self.ma_forecast(train['y'])
test['preds_ma'] = preds_ma
if 'prophet' in models_un_agg:
preds_prophet = self.prophet_fcst(train.copy(), test.copy())
test['preds_prophet'] = preds_prophet['yhat'].values
return test
def ts_forecast_agg(self, df, train_max_date, forecast_start):
train, test = self.train_test_split(df, train_max_date=train_max_date,
forecast_start=forecast_start)
test = test.sort_values(by=['ds'])
if 'ETS_4w_agg' in models_agg:
preds_ETS = self.ETS_forecast(train.copy(), test.copy(), latest_12w_fit=True)
try:
test['preds_ETS_4w_auto'] = preds_ETS.values
except:
test['preds_ETS_4w_auto'] = 0
return test
def apply_ts_forecast(self, df, train_max_date, forecast_start):
# global train_date
# train_date = train_max_date
# global forecast_start_date
# forecast_start_date = forecast_start
preds = applyParallel_croston(
df.groupby('ts_id'),
func=self.ts_forecast_un_agg, train_max_date=train_max_date,
forecast_start=forecast_start
)
preds.rename(columns={'ds': date_col, 'y': target_col}, inplace=True)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
# preds['preds_AE_ts'] = preds[ts_fcst_cols].mean(axis=1)
# ts_fcst_cols = ts_fcst_cols + ['preds_AE_ts']
return preds, ts_fcst_cols
def apply_ts_forecast_agg(self, df, train_max_date, forecast_start):
# global train_date
# train_date = train_max_date
# global forecast_start_date
# forecast_start_date = forecast_start
preds = applyParallel_croston(
df.groupby('ts_id'),
func=self.ts_forecast_agg, train_max_date=train_max_date,
forecast_start=forecast_start
)
preds.rename(columns={'ds': date_col, 'y': target_col}, inplace=True)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
# preds['preds_AE_ts'] = preds[ts_fcst_cols].mean(axis=1)
return preds, ts_fcst_cols | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_drug_fcst/engine/ts_fcst.py | ts_fcst.py |
import pandas as pd
import datetime
import numpy as np
from zeno_etl_libs.utils.ipc_pmf.config_ipc_drug import date_col, store_col, \
drug_col, target_col, key_col, local_testing
class PreprocessData:
def add_ts_id(self, df):
df = df[~df[drug_col].isnull()].reset_index(drop=True)
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def preprocess_sales(self, df, drug_list):
df.rename(columns={
'net_sales_quantity': target_col
}, inplace=True)
df.rename(columns={
'sales_date': date_col
}, inplace=True)
set_dtypes = {
store_col: int,
drug_col: int,
date_col: str,
target_col: float
}
df = df.astype(set_dtypes)
df[target_col] = df[target_col].round()
df[date_col] = pd.to_datetime(df[date_col])
df = df.groupby(
[store_col, drug_col, key_col, date_col]
)[target_col].sum().reset_index()
df = df[df[drug_col].isin(drug_list[drug_col].unique().tolist())]
return df
def get_formatted_data(self, df):
df_start = df.groupby([key_col])[date_col].min().reset_index().rename(
columns={date_col: 'sales_start'})
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start=min_date,
end=end_date,
freq='d'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = pd.merge(df, df_start, how='left', on=key_col)
df = df[df[date_col] >= df['sales_start']]
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
df[[store_col, drug_col]] = df[[store_col, drug_col]]
df[store_col] = df[store_col].astype(int)
df[drug_col] = df[drug_col].astype(int)
return df
def preprocess_cfr_pr(self, df):
set_dtypes = {
store_col: int,
drug_col: int,
'loss_quantity': int
}
df = df.astype(set_dtypes)
df['shortbook_date'] = pd.to_datetime(df['shortbook_date'])
return df
def merge_cfr_pr(self, sales, cfr_pr):
df = sales.merge(cfr_pr,
left_on=[store_col, drug_col, date_col],
right_on=[store_col, drug_col, 'shortbook_date'],
how='left')
df[date_col] = df[date_col].combine_first(df['shortbook_date'])
df[target_col].fillna(0, inplace=True)
df['loss_quantity'].fillna(0, inplace=True)
df[target_col] += df['loss_quantity']
df.drop(['shortbook_date', 'loss_quantity'], axis=1, inplace=True)
return df
def preprocess_calendar(self, df, last_date):
df.rename(columns={'date': date_col}, inplace=True)
df[date_col] = pd.to_datetime(df[date_col])
cal_sales = df.copy()
cal_sales['week_begin_dt'] = cal_sales.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
cal_sales['month_begin_dt'] = cal_sales.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
cal_sales['key'] = 1
ld = pd.to_datetime(last_date)
cal_sales = cal_sales[cal_sales[date_col] > ld]
return df, cal_sales
def merge_calendar(self, sales, calendar):
df = sales.merge(calendar,
how='left',
on=date_col
)
# df_week_days_count = df.groupby([key_col, 'year', 'week_of_year'])[date_col].count().reset_index().rename(columns = {date_col:'week_days_count'})
# df['week_days_count'] = 1
df['week_begin_dt'] = df.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
df_week_days_count = df.groupby(['ts_id', 'week_begin_dt'])[
date_col].count().reset_index().rename(
columns={date_col: 'week_days_count'})
# df = df.groupby(['ts_id', store_col, drug_col, ]).resample('W-Mon', on =date_col )[target_col].sum().reset_index()
df = df.groupby(['ts_id', store_col, drug_col, 'week_begin_dt'])[
target_col].sum().reset_index()
df = pd.merge(df, df_week_days_count, how='left',
on=[key_col, 'week_begin_dt'])
df = df[df['week_days_count'] == 7].reset_index(drop=True)
df.drop(columns=['week_days_count'], inplace=True)
df.rename(columns={'week_begin_dt': date_col}, inplace=True)
return df
def preprocess_bill_date(self, df):
df.rename(columns={'store-id': store_col}, inplace=True)
df['bill_date'] = pd.to_datetime(df['bill_date'])
return df
def merge_first_bill_date(self, sales, first_bill_date):
df = pd.merge(sales, first_bill_date, on=[store_col])
df = df[df[date_col] >= df['bill_date']].reset_index(drop=True)
df.drop(columns=['bill_date'], inplace=True)
return df
def make_future_df(self, df):
start_date_df = (
df
.groupby(key_col)[date_col]
.min()
.reset_index()
.rename(columns={date_col: 'start_date'})
)
df = df[[key_col, date_col, target_col]]
end_date = df[date_col].max() + datetime.timedelta(weeks=5)
min_date = df[date_col].min()
date_range = pd.date_range(
start=min_date,
end=end_date,
freq="W-MON"
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = 0
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = df.merge(start_date_df, on=key_col, how='left')
df = df[
df[date_col] >= df['start_date']
]
df.drop('start_date', axis=1, inplace=True)
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
return df
def make_future_df_4w_agg(self, df):
start_date_df = (
df
.groupby(key_col)[date_col]
.min()
.reset_index()
.rename(columns={date_col: 'start_date'})
)
df = df[[key_col, date_col, target_col]]
fcst_week_start = df[date_col].max() + datetime.timedelta(weeks=5)
date_range = [fcst_week_start]
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = 0
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = df.merge(start_date_df, on=key_col, how='left')
df = df[
df[date_col] >= df['start_date']
]
df.drop('start_date', axis=1, inplace=True)
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
return df
def sales_pred_vald_df(
self,
df
):
vald_max_date = df[date_col].max() - datetime.timedelta(weeks=4)
df_vald_train = df.loc[df[date_col] <= vald_max_date]
df_vald_future = df.loc[~(df[date_col] <= vald_max_date)]
df_vald_future[target_col] = 0
df_final = df_vald_train.append(df_vald_future)
train_vald_max_date = df_vald_train[date_col].max()
return df_final, train_vald_max_date
def sales_4w_agg(
self,
df
):
# =====================================================================
# Combine 4 weeks into an arbitrary group
# =====================================================================
unique_ts_ids = df[key_col].unique().tolist()
sales_4w_agg = pd.DataFrame()
for ts_id in unique_ts_ids:
week_gp_size = 4
sales_temp = df.loc[df[key_col] == ts_id]
available_week_count = sales_temp.shape[0]
if available_week_count >= (3 * week_gp_size):
allowable_week_count = int(
week_gp_size * np.fix(available_week_count / week_gp_size))
sales_temp = sales_temp.sort_values(by=["date"], ascending=True)
sales_temp = sales_temp[-allowable_week_count:]
week_gps_count = int(allowable_week_count / week_gp_size)
week_gps_list = np.arange(1, week_gps_count + 1, 1)
week_gps_id_list = np.repeat(week_gps_list, week_gp_size)
sales_temp["week_gps_id"] = week_gps_id_list
sales_temp = sales_temp.groupby(
[key_col, store_col, drug_col, "week_gps_id"],
as_index=False).agg(
{"date": "first", "actual_demand": "sum"})
sales_4w_agg = sales_4w_agg.append(sales_temp)
sales_4w_agg = sales_4w_agg.drop("week_gps_id", axis=1)
sales_pred_4w_agg = self.make_future_df_4w_agg(sales_4w_agg.copy())
return sales_4w_agg, sales_pred_4w_agg
def drug_sales_12w_4w(
self,
df
):
date_12w_back = df[date_col].max() - datetime.timedelta(weeks=12)
df_12w = df.loc[df[date_col] > date_12w_back]
date_4w_back = df[date_col].max() - datetime.timedelta(weeks=4)
df_4w = df.loc[df[date_col] > date_4w_back]
df_count = df_12w.groupby([store_col, drug_col], as_index=False).agg(
{target_col: 'count'})
df_count.rename({target_col: 'week_count'}, axis=1, inplace=True)
drugs_with_12w_data = df_count.loc[df_count['week_count'] == 12][drug_col].tolist()
drugs_with_4w_data = df_count.loc[(df_count['week_count'] >= 4) & (df_count['week_count'] != 12)][drug_col].tolist()
df_12w = df_12w.groupby([store_col, drug_col], as_index=False).agg(
{target_col: 'sum'})
df_12w = df_12w.loc[df_12w[drug_col].isin(drugs_with_12w_data)]
df_4w = df_4w.groupby([store_col, drug_col], as_index=False).agg(
{target_col: 'sum'})
df_4w = df_4w.loc[df_4w[drug_col].isin(drugs_with_4w_data)]
return df_12w, df_4w
def drug_sales_4w_wtd(
self,
df
):
date_4w_back = df[date_col].max() - datetime.timedelta(weeks=4)
df_4w = df.loc[df[date_col] > date_4w_back]
# sales > 0 and all 4 latest week
df_4w_1 = df_4w[df_4w[target_col] > 0]
df_4w_cnt = df_4w_1.groupby([store_col, drug_col], as_index=False).agg(
{target_col: 'count'})
df_4w_cnt.rename({target_col: 'week_count'}, axis=1, inplace=True)
list_4w_drugs = df_4w_cnt.loc[df_4w_cnt['week_count'] == 4][drug_col].tolist()
df_4w_1 = df_4w_1.loc[df_4w_1[drug_col].isin(list_4w_drugs)]
dates_list = list(df_4w.date.unique())
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[3], 0.4, 0)
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[2], 0.3, df_4w_1['weights'])
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[1], 0.2, df_4w_1['weights'])
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[0], 0.1, df_4w_1['weights'])
df_4w_1['wtd_demand'] = df_4w_1[target_col] * df_4w_1['weights']
df_4w_1 = df_4w_1.groupby([store_col, drug_col], as_index=False).agg(
{'wtd_demand': 'sum'})
# sales > 0 and only 3 latest week
df_4w_2 = df_4w[df_4w[target_col] > 0]
df_4w_cnt = df_4w_2.groupby([store_col, drug_col], as_index=False).agg(
{target_col: 'count'})
df_4w_cnt.rename({target_col: 'week_count'}, axis=1, inplace=True)
list_4w_drugs = df_4w_cnt.loc[df_4w_cnt['week_count'] == 3][drug_col].tolist()
df_4w_2 = df_4w_2.loc[df_4w_2[drug_col].isin(list_4w_drugs)]
df_4w_2['w_count'] = np.tile(np.arange(1, 4), len(df_4w_2))[:len(df_4w_2)]
df_4w_2['weights'] = np.where(df_4w_2['w_count'] == 3, 0.5, 0)
df_4w_2['weights'] = np.where(df_4w_2['w_count'] == 2, 0.3, df_4w_2['weights'])
df_4w_2['weights'] = np.where(df_4w_2['w_count'] == 1, 0.2, df_4w_2['weights'])
df_4w_2['wtd_demand'] = df_4w_2[target_col] * df_4w_2['weights']
df_4w_2 = df_4w_2.groupby([store_col, drug_col], as_index=False).agg(
{'wtd_demand': 'sum'})
# sales > 0 and only 2 latest week
df_4w_3 = df_4w[df_4w[target_col] > 0]
df_4w_cnt = df_4w_3.groupby([store_col, drug_col], as_index=False).agg(
{target_col: 'count'})
df_4w_cnt.rename({target_col: 'week_count'}, axis=1, inplace=True)
list_4w_drugs = df_4w_cnt.loc[df_4w_cnt['week_count'] == 2][drug_col].tolist()
df_4w_3 = df_4w_3.loc[df_4w_3[drug_col].isin(list_4w_drugs)]
df_4w_3['w_count'] = np.tile(np.arange(1, 3), len(df_4w_3))[:len(df_4w_3)]
df_4w_3['weights'] = np.where(df_4w_3['w_count'] == 2, 0.6, 0)
df_4w_3['weights'] = np.where(df_4w_3['w_count'] == 1, 0.4, df_4w_3['weights'])
df_4w_3['wtd_demand'] = df_4w_3[target_col] * df_4w_3['weights']
df_4w_3 = df_4w_3.groupby([store_col, drug_col], as_index=False).agg(
{'wtd_demand': 'sum'})
df_4w = pd.concat([df_4w_1, df_4w_2, df_4w_3], axis=0)
df_4w['wtd_demand'] = np.round(df_4w['wtd_demand'] * 4)
return df_4w
def preprocess_all(
self,
sales=None,
cfr_pr=None,
drug_list=None,
calendar=None,
first_bill_date=None,
last_date=None,
):
sales = self.add_ts_id(sales)
# filter
#################################################
if local_testing == 1:
tsid_list = \
sales.sort_values(by=['net_sales_quantity'], ascending=False)[
key_col].unique().tolist()[:20]
sales = sales[sales[key_col].isin(tsid_list)]
#################################################
sales = self.preprocess_sales(sales, drug_list)
sales = self.get_formatted_data(sales)
cfr_pr = self.preprocess_cfr_pr(cfr_pr)
sales_daily = self.merge_cfr_pr(sales, cfr_pr)
calendar, cal_sales = self.preprocess_calendar(calendar, last_date)
sales = self.merge_calendar(sales_daily, calendar)
first_bill_date = self.preprocess_bill_date(first_bill_date)
sales = self.merge_first_bill_date(sales, first_bill_date)
sales_pred = self.make_future_df(sales.copy())
sales_pred_vald, train_vald_max_date = self.sales_pred_vald_df(sales)
sales_4w_agg, sales_pred_4w_agg = self.sales_4w_agg(sales)
sales_pred_4w_agg_vald, train_4w_agg_vald_max_date = self.sales_pred_vald_df(sales_4w_agg)
drug_sales_latest_12w, drug_sales_latest_4w = self.drug_sales_12w_4w(sales)
drug_sales_4w_wtd = self.drug_sales_4w_wtd(sales)
return (
drug_sales_4w_wtd,
drug_sales_latest_4w,
drug_sales_latest_12w,
train_4w_agg_vald_max_date,
sales_pred_4w_agg_vald,
train_vald_max_date,
sales_pred_vald,
sales_4w_agg,
sales_pred_4w_agg,
sales,
sales_pred,
cal_sales,
sales_daily
) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_drug_fcst/engine/data_pre_process.py | data_pre_process.py |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
from zeno_etl_libs.utils.ipc_pmf.config_ipc_drug import (
date_col,
target_col,
store_col,
drug_col,
eol_cutoff
)
class Segmentation:
def add_ts_id(self, df):
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def _calc_abc(self, df52):
B_cutoff = 0.5
C_cutoff = 0.8
D_cutoff = 0.95
tot_sales = (
df52.groupby([
'ts_id'
])[target_col].sum().reset_index()
)
tot_sales.rename(columns={target_col: 'total_LY_sales'}, inplace=True)
tot_sales.sort_values('total_LY_sales', ascending=False, inplace=True)
tot_sales["perc_sales"] = (
tot_sales['total_LY_sales'] / tot_sales['total_LY_sales'].sum()
)
tot_sales["cum_perc_sales"] = tot_sales.perc_sales.cumsum()
tot_sales["ABC"] = "A"
tot_sales.loc[tot_sales.cum_perc_sales > B_cutoff, "ABC"] = "B"
tot_sales.loc[tot_sales.cum_perc_sales > C_cutoff, "ABC"] = "C"
tot_sales.loc[tot_sales.cum_perc_sales > D_cutoff, "ABC"] = "D"
# tot_sales = self.add_ts_id(tot_sales)
return tot_sales[['ts_id', 'ABC', 'total_LY_sales']]
# TODO: lower COV cutoffs
def get_abc_classification(self, df52):
province_abc = df52.groupby(
[store_col]
).apply(self._calc_abc)
province_abc = province_abc[['ts_id', "ABC"]].reset_index(drop=True)
# one
tot_sales = (
df52
.groupby(['ts_id'])[target_col]
.agg(['sum', 'mean'])
.reset_index()
)
tot_sales.rename(
columns={'sum': 'total_LY_sales', 'mean': 'avg_ly_sales'},
inplace=True)
tot_sales = tot_sales.merge(
province_abc,
on=['ts_id'],
how='left'
)
tot_sales = tot_sales.drop_duplicates()
# tot_sales = self.add_ts_id(tot_sales)
tot_sales = tot_sales[['ts_id', 'ABC']]
return tot_sales
def get_xyzw_classification(self, df1):
input_ts_id = df1['ts_id'].unique()
df1 = df1[df1[target_col] > 0]
cov_df = df1.groupby(['ts_id'])[target_col].agg(
["mean", "std", "count", "sum"])
cov_df.reset_index(drop=False, inplace=True)
cov_df['cov'] = np.where(
((cov_df["count"] > 2) & (cov_df["sum"] > 0)),
(cov_df["std"]) / (cov_df["mean"]),
np.nan
)
cov_df['WXYZ'] = 'Z'
cov_df.loc[cov_df['cov'] <= 1.2, 'WXYZ'] = 'Y'
cov_df.loc[cov_df['cov'] <= 0.8, 'WXYZ'] = 'X'
cov_df.loc[cov_df['cov'] <= 0.5, 'WXYZ'] = 'W'
# cov_df = self.add_ts_id(cov_df)
cov_df = cov_df[['ts_id', 'cov', 'WXYZ']]
non_mapped_ts_ids = list(
set(input_ts_id) - set(cov_df['ts_id'].unique())
)
non_mapped_cov = pd.DataFrame({
'ts_id': non_mapped_ts_ids,
'cov': [np.nan] * len(non_mapped_ts_ids),
'WXYZ': ['Z'] * len(non_mapped_ts_ids)
})
cov_df = pd.concat([cov_df, non_mapped_cov], axis=0)
cov_df = cov_df.reset_index(drop=True)
return cov_df
def get_std(self, df1):
input_ts_id = df1['ts_id'].unique()
# df1 = df1[df1[target_col]>0]
std_df = df1.groupby(['ts_id'])[target_col].agg(["std"])
return std_df
def calc_interval_mean(self, x, key):
df = pd.DataFrame({"X": x, "ts_id": key}).reset_index(
drop=True).reset_index()
df = df[df.X > 0]
df["index_shift"] = df["index"].shift(-1)
df["interval"] = df["index_shift"] - df["index"]
df = df.dropna(subset=["interval"])
df['ADI'] = np.mean(df["interval"])
return df[['ts_id', 'ADI']]
def calc_adi(self, df):
# df = self.add_ts_id(df)
logger.info(
'Combinations entering adi: {}'.format(df['ts_id'].nunique()))
dict_of = dict(iter(df.groupby(['ts_id'])))
logger.info("Total tsids in df: {}".format(df.ts_id.nunique()))
logger.info("Total dictionary length: {}".format(len(dict_of)))
list_dict = [
self.calc_interval_mean(dict_of[x][target_col], x) for x in
tqdm(dict_of.keys())
]
data = (
pd.concat(list_dict)
.reset_index(drop=True)
.drop_duplicates()
.reset_index(drop=True)
)
logger.info('Combinations exiting adi: {}'.format(data.ts_id.nunique()))
return data
def get_PLC_segmentation(self, df, mature_cutoff_date, eol_cutoff_date):
df1 = df[df[target_col] > 0]
df1 = df1.groupby(['ts_id']).agg({date_col: [min, max]})
df1.reset_index(drop=False, inplace=True)
df1.columns = [' '.join(col).strip() for col in df1.columns.values]
df1['PLC Status L1'] = 'Mature'
df1.loc[
(df1[date_col + ' min'] > mature_cutoff_date), 'PLC Status L1'
] = 'New Product'
df1.loc[
(df1[date_col + ' max'] <= eol_cutoff_date), 'PLC Status L1'
] = 'EOL'
# df1 = self.add_ts_id(df1)
df1 = df1[['ts_id', 'PLC Status L1']]
return df1
def get_group_mapping(self, seg_df):
seg_df['Mixed'] = seg_df['ABC'].astype(str) + seg_df['WXYZ'].astype(str)
seg_df['Group'] = 'Group3'
group1_mask = seg_df['Mixed'].isin(['AW', 'AX', 'BW', 'BX'])
seg_df.loc[group1_mask, 'Group'] = 'Group1'
group2_mask = seg_df['Mixed'].isin(['AY', 'AZ', 'BY', 'BZ'])
seg_df.loc[group2_mask, 'Group'] = 'Group2'
return seg_df
def calc_dem_pat(self, cov_df, adi_df):
logger.info('Combinations entering calc_dem_pat: {}'.format(
cov_df.ts_id.nunique()))
logger.info('Combinations entering calc_dem_pat: {}'.format(
adi_df.ts_id.nunique()))
df = pd.merge(cov_df, adi_df, how='left', on='ts_id')
df["cov2"] = np.power(df["cov"], 2)
df["classification"] = "Lumpy"
df.loc[
(df.ADI >= 1.32) & (df.cov2 < 0.49), "classification"
] = "Intermittent"
df.loc[
(df.ADI < 1.32) & (df.cov2 >= 0.49), "classification"
] = "Erratic"
df.loc[
(df.ADI < 1.32) & (df.cov2 < 0.49), "classification"
] = "Smooth"
logger.info(
'Combinations exiting calc_dem_pat: {}'.format(df.ts_id.nunique()))
return df[['ts_id', 'classification']]
def get_start_end_dates_df(self, df, key_col, date_col, target_col,
train_max_date, end_date):
start_end_date_df = (
df[df[target_col] > 0]
.groupby(key_col)[date_col]
.agg({'min', 'max'})
.reset_index()
.rename(columns={'min': 'start_date', 'max': 'end_date'})
)
start_end_date_df.loc[
(
start_end_date_df['end_date'] > (
train_max_date - relativedelta(weeks=eol_cutoff)
)
), 'end_date'
] = end_date
return start_end_date_df
def get_weekly_segmentation(self, df, df_sales_daily, train_max_date,
end_date):
df = df[df[date_col] <= train_max_date]
df1 = df[
df[date_col] > (train_max_date - relativedelta(weeks=52))
].copy(deep=True)
df_std = df_sales_daily[
df_sales_daily[date_col] > (train_max_date - relativedelta(days=90))
].copy(deep=True)
df1 = self.add_ts_id(df1)
abc_df = self._calc_abc(df1)
xyzw_df = self.get_xyzw_classification(df1)
std_df = self.get_std(df_std)
adi_df = self.calc_adi(df1)
demand_pattern_df = self.calc_dem_pat(xyzw_df[['ts_id', 'cov']], adi_df)
mature_cutoff_date = train_max_date - relativedelta(weeks=52)
eol_cutoff_date = train_max_date - relativedelta(weeks=13)
plc_df = self.get_PLC_segmentation(df, mature_cutoff_date,
eol_cutoff_date)
start_end_date_df = self.get_start_end_dates_df(
df, key_col='ts_id',
date_col=date_col,
target_col=target_col,
train_max_date=train_max_date,
end_date=end_date
)
seg_df = plc_df.merge(abc_df, on='ts_id', how='outer')
seg_df = seg_df.merge(xyzw_df, on='ts_id', how='outer')
seg_df = seg_df.merge(adi_df, on='ts_id', how='outer')
seg_df = seg_df.merge(demand_pattern_df, on='ts_id', how='outer')
seg_df = seg_df.merge(start_end_date_df, on='ts_id', how='outer')
seg_df = seg_df.merge(std_df, on='ts_id', how='outer')
seg_df = self.get_group_mapping(seg_df)
seg_df['Mixed'] = np.where(seg_df['Mixed']=='nannan', np.nan, seg_df['Mixed'])
return seg_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_drug_fcst/engine/segmentation.py | segmentation.py |
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from datetime import timedelta
import pandas as pd
# Common utility functions for crm-campaigns listed in this class
# Connections also inititated in the class, to be closed when the script is closed.
class CrmCampaigns:
"""
# Sequence
# Data prep
# no_bill_in_last_n_days(data, run_date, last_n_days_param = 15)
# no_call_in_last_n_days(data, run_date, last_n_days_param = 30)
# patient_latest_store(data)
# remove_dnd(data)
# db_write()
"""
def __init__(self):
self.logger = get_logger()
self.ms_connection_read = MySQL()
self.ms_connection_read.open_connection()
# ALERT: read_only=False, if you want connection which writes
self.ms_connection_write = MySQL(read_only=False)
self.ms_connection_write.open_connection()
self.rs_db = DB()
self.rs_db.open_connection()
##############################
# Utility functions
##############################
def patient_latest_store(self, data_pass):
data_base_c_grp = data_pass.copy()
patients = tuple(data_base_c_grp['patient_id'].to_list())
self.logger.info("Length of patients tuple is - {}".format(len(patients)))
##########################################
# Latest store-id
##########################################
store_q = """
select
`patient-id`,
`store-id`,
`created-at`
from
(
select
`patient-id`,
`store-id`,
`created-at`,
ROW_NUMBER() OVER (partition by `patient-id`
order by
`created-at` desc) as bill_rank_desc
from
`bills-1` b
where `patient-id` in {}
) sub
where
bill_rank_desc = 1
""".format(patients)
data_store = pd.read_sql_query(store_q, self.ms_connection_read.connection)
data_store.columns = [c.replace('-', '_') for c in data_store.columns]
self.logger.info("Length of data store is {}".format(len(data_store)))
# Already unique, but still check
data_store['created_at'] = pd.to_datetime(data_store['created_at'])
data_store = data_store.sort_values(by=['patient_id', 'created_at'],
ascending=[True, False])
# Keep latest store-id
data_store = data_store.drop_duplicates(subset='patient_id')
data_store = data_store[['patient_id', 'store_id']].copy()
self.logger.info("Length of data store after dropping duplicates - is "
"{}".format(len(data_store)))
return data_store
def no_bill_in_last_n_days(self, data_pass, run_date, last_n_days_param=15):
##########################################
# No bills in last 15 days
##########################################
data_base_c_grp = data_pass.copy()
patients = tuple(data_base_c_grp['patient_id'].to_list())
self.logger.info("Length of patients tuple is - {}".format(len(patients)))
# Take parameter input, default is 15
last_n_days_cutoff = last_n_days_param
self.logger.info("Last n days cutoff is {}".format(last_n_days_cutoff))
run_date_minus_n_days = (pd.to_datetime(run_date) - timedelta(days=last_n_days_cutoff)).strftime("%Y-%m-%d")
self.logger.info("Run date minus n days is {}".format(run_date_minus_n_days))
lb_q = """
SELECT
`patient-id`
FROM
`bills-1`
WHERE
`created-at` >= '{0} 00:00:00'
and `patient-id` in {1}
GROUP BY
`patient-id`
""".format(run_date_minus_n_days, patients)
already_billed = pd.read_sql_query(lb_q, self.ms_connection_read.connection)
already_billed.columns = [c.replace('-', '_') for c in already_billed.columns]
already_billed_list = already_billed['patient_id'].to_list()
self.logger.info("Length of Already billed last 15 days (List)- "
"fetched is {}".format(len(already_billed_list)))
data_base_c_grp = data_base_c_grp.query("patient_id not in @already_billed_list")
self.logger.info("Length of data base after filtering already billed - "
"length is {}".format(len(data_base_c_grp)))
return data_base_c_grp
def no_call_in_last_n_days(self, data_pass, run_date, last_n_days_param=30):
##########################################
# No calls in last 30 days period
##########################################
data_base_c_grp = data_pass.copy()
patients = tuple(data_base_c_grp['patient_id'].to_list())
self.logger.info("Length of patients tuple is - {}".format(len(patients)))
# Take parameter input, default is 15
last_n_days_cutoff = last_n_days_param
self.logger.info("Last n days cutoff is {}".format(last_n_days_cutoff))
run_date_minus_n_days = (pd.to_datetime(run_date) -
timedelta(days=last_n_days_cutoff)).strftime("%Y-%m-%d")
self.logger.info("Run date minus n days is {}".format(run_date_minus_n_days))
calling_q = """
SELECT
`patient-id`
FROM
`calling-dashboard`
WHERE
(`list-date` >= '{0}'
OR `call-date` >= '{0}')
and `patient-id` in {1}
GROUP BY
`patient-id`
""".format(run_date_minus_n_days, patients)
data_c = pd.read_sql_query(calling_q, self.ms_connection_read.connection)
data_c.columns = [c.replace('-', '_') for c in data_c.columns]
already_p = data_c['patient_id'].drop_duplicates().to_list()
self.logger.info("Length of Calling last {0} days (List)- "
"fetched is {1}".format(last_n_days_cutoff, len(already_p)))
data_base_c_grp = data_base_c_grp.query("patient_id not in @already_p")
self.logger.info("Length of data base after filtering already called - "
"length is {}".format(len(data_base_c_grp)))
return data_base_c_grp
def remove_dnd(self, data_pass):
data_base_c_grp = data_pass.copy()
read_schema = 'prod2-generico'
self.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
# Read DND list
dnd_q = """
select
(case
when a."patient-id" is not null then a."patient-id"
else b."id"
end) as "patient-id"
from
"dnd-list" a
left join "prod2-generico"."patients" b on
a."phone" = b."phone"
where a."call-dnd" = 1
"""
self.logger.info(dnd_q)
self.rs_db.execute(dnd_q, params=None)
dnd: pd.DataFrame = self.rs_db.cursor.fetch_dataframe()
if dnd is None:
dnd = pd.DataFrame(columns=['patient_id'])
dnd.columns = [c.replace('-', '_') for c in dnd.columns]
self.logger.info("dnd data length is : {}".format(len(dnd)))
dnd_list = dnd['patient_id'].drop_duplicates().to_list()
# Remove those already covered
data_base_c_grp = data_base_c_grp.query("patient_id not in @dnd_list")
self.logger.info("Net list after removing DND - length is : {}".format(len(data_base_c_grp)))
return data_base_c_grp
def db_write(self, data_pass, run_date_str,
campaign_id_param, callback_reason_str_param,
store_daily_limit_param=5, default_sort_needed=True):
self.logger.info("Running for run date {}".format(run_date_str))
data_base_c_grp = data_pass.copy()
# If default_sort then default sort on ABV descending, for each store
if default_sort_needed:
data_base_c_grp = data_base_c_grp.sort_values(by=['store_id', 'average_bill_value'],
ascending=[True, False])
data_base_c_grp['priority'] = data_base_c_grp.groupby(['store_id']).cumcount() + 1
else:
# assumes that sorting is already done, with priority column present
pass
##########################################
# Filter on Ranking
##########################################
# Take parameter input, default is 5
store_daily_limit = store_daily_limit_param
self.logger.info("Store level daily call limit is {}".format(store_daily_limit))
read_schema = 'prod2-generico'
self.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
store_limit_q = f"""select
"store-id" as "store_id",
"store-daily-limit"
from
"{read_schema}"."store-calling-exceptions" sce
where
"campaign-id" = {campaign_id_param}
and current_date between "start-date" and "end-date";
"""
store_limit = self.rs_db.get_df(query=store_limit_q)
data_base_c_grp = pd.merge(data_base_c_grp, store_limit, on='store_id', how='left')
data_base_c_grp["store-daily-limit"] = data_base_c_grp["store-daily-limit"].fillna(store_daily_limit_param)
data_base_c_grp = data_base_c_grp[data_base_c_grp['priority'] <= data_base_c_grp["store-daily-limit"]]
data_base_c_grp = data_base_c_grp.drop(columns=["store-daily-limit"])
self.logger.info("Length of data base after Rank filtering - "
"length is {}".format(len(data_base_c_grp)))
##########################################
# WRITE to calling dashboard
##########################################
data_export = data_base_c_grp[['store_id', 'patient_id', 'priority']].copy()
data_export['list_date'] = run_date_str
data_export['call_date'] = data_export['list_date']
data_export['campaign_id'] = campaign_id_param # integer
data_export['callback_reason'] = callback_reason_str_param # string
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
##########################################
# DANGER ZONE
##########################################
self.logger.info("Insert started for length {}".format(len(data_export)))
data_export.to_sql(name='calling-dashboard', con=self.ms_connection_write.engine,
if_exists='append', index=False,
chunksize=500, method='multi')
self.logger.info("Insert done")
def close_connections(self):
# Closing the DB Connection
self.ms_connection_read.close()
self.ms_connection_write.close()
self.rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/consumer/crm_campaigns.py | crm_campaigns.py |
import pandas as pd
import numpy as np
import datetime as dt
import time
from dateutil.relativedelta import relativedelta
from scipy.stats import norm
from zeno_etl_libs.utils.ipc2.config_ipc import *
from zeno_etl_libs.utils.ipc2.engine.data_load import LoadData
from zeno_etl_libs.utils.ipc2.engine.data_pre_process import PreprocessData
from zeno_etl_libs.utils.ipc2.engine.segmentation import Segmentation
from zeno_etl_libs.utils.ipc2.engine.ts_fcst import TS_forecast
from zeno_etl_libs.utils.ipc2.engine.forecast import Forecast
from zeno_etl_libs.utils.ipc2.engine.feat_engg import Feature_Engg
def ipc_forecast(store_id, reset_date, type_list, schema, db, logger):
store_id_list = ("({})").format(store_id) # for sql pass
last_date = dt.date(day=1, month=4, year=2019) # max history #baseline
# last_date = pd.to_datetime(reset_date).date() - dt.timedelta(weeks=26) # capping sales history to 6 months
# last_date = pd.to_datetime(reset_date).date() - dt.timedelta(weeks=52) # capping sales history to 12 months
load_max_date = pd.to_datetime(reset_date).date() - dt.timedelta(days = pd.to_datetime(reset_date).dayofweek+1)
# define empty variables in case of fail
weekly_fcst = pd.DataFrame()
ts_fcst = pd.DataFrame()
ts_fcst_cols = []
logger.info("Data Loading Started...")
data_load_obj = LoadData()
(
drug_list,
sales_history,
cfr_pr,
calendar,
first_bill_date
) = data_load_obj.load_all_input(
type_list=type_list,
store_id_list=store_id_list,
last_date=last_date,
reset_date=reset_date,
load_max_date=load_max_date,
schema=schema,
db=db
)
logger.info("Data Pre Processing Started...")
data_prep_obj = PreprocessData()
(
sales,
sales_pred,
cal_sales,
sales_daily
) = data_prep_obj.preprocess_all(
sales=sales_history,
drug_list=drug_list,
cfr_pr=cfr_pr,
calendar=calendar,
first_bill_date=first_bill_date,
last_date=last_date
)
train_max_date = sales[date_col].max()
end_date = sales_pred[date_col].max()
logger.info("Segmentation Started...")
seg_obj = Segmentation()
seg_df, drug_class = seg_obj.get_weekly_segmentation(
df=sales.copy(deep=True),
df_sales_daily=sales_daily.copy(deep=True),
train_max_date=train_max_date,
end_date=end_date
)
seg_df['reset_date'] = str(reset_date)
seg_df['PLC Status L1'].fillna('NPI',inplace=True) #correction for missed combinations in fcst
merged_df1 = pd.merge(sales_pred, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[merged_df1['PLC Status L1'].isin(['Mature', 'NPI'])]
if runs_ts_flag == 1:
ts_fcst_obj = TS_forecast()
# df_ts_fcst = applyParallel(
# merged_df1.groupby('ts_id'),
# func=TS_forecast.ts_forecast(
# df=merged_df1.copy(), train_max_date = train_max_date,
# forecast_start = train_max_date + relativedelta(weeks=2)))
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast(
df=merged_df1.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
# ========================= Forecast for 1-4 weeks =========================
if run_ml_flag == 1:
start_time = time.time()
forecast_start = train_max_date + relativedelta(weeks=2)
merged_df1['All'] = 'All'
slice_col = 'All'
forecast_volume = merged_df1[merged_df1[date_col] > train_max_date][
target_col].sum()
assert forecast_volume == 0
logger.info(
"forecast start {} total volume: {}".format(forecast_start,
forecast_volume)
)
forecast_df = pd.DataFrame()
validation_df = pd.DataFrame()
for i in range(1, 5):
num_shift_lags = i + 1
# for group_name in merged_df1[slice_col].dropna.unique():
for group_name in ['All']:
logger.info('Group: {}'.format(group_name))
logger.info("Feature Engineering Started...")
feat_df = pd.DataFrame()
for one_df in [merged_df1]:
feat_engg_obj = Feature_Engg()
one_feat_df = feat_engg_obj.feat_agg(
one_df[
one_df[slice_col] == group_name
].drop(slice_col, axis=1).copy(deep=True),
train_max_date=train_max_date,
num_shift_lag=num_shift_lags
)
feat_df = pd.concat([one_feat_df, feat_df])
if pd.DataFrame(feat_df).empty:
continue
logger.info(
"Forecasting Started for {}...".format(forecast_start))
forecast_obj = Forecast()
fcst_df, val_df, Feature_Imp_all = forecast_obj.get_STM_forecast(
feat_df.copy(deep=True),
forecast_start=forecast_start,
num_shift_lags=num_shift_lags
)
forecast_df = pd.concat([forecast_df, fcst_df], axis=0)
validation_df = pd.concat([validation_df, val_df])
ml_fc_cols = [i for i in forecast_df.columns if
i.startswith('preds_')]
forecast_df['AE'] = forecast_df[ml_fc_cols].mean(axis=1)
end_time = time.time()
logger.info(
"total time for {} forecast: {}"
.format(forecast_start, end_time - start_time)
)
forecast_start = forecast_start + relativedelta(weeks=1)
weekly_fcst = pd.concat([weekly_fcst, forecast_df])
weekly_fcst['reset_date'] = reset_date
if runs_ts_flag == 0:
weekly_fcst = weekly_fcst.copy(deep=True)
if run_ml_flag == 0:
weekly_fcst = ts_fcst.copy(deep=True)
weekly_fcst['reset_date'] = reset_date
if (run_ml_flag == 1 & runs_ts_flag == 1):
weekly_fcst = pd.merge(weekly_fcst,
ts_fcst[[key_col, date_col] + ts_fcst_cols],
how='left', on=[key_col, date_col])
weekly_fcst.drop_duplicates(inplace=True)
weekly_fcst['model'] = 'LGBM'
weekly_fcst[[store_col, drug_col]] = weekly_fcst[key_col].str.split('_',
expand=True)
weekly_fcst.rename(columns={'preds_lgb': 'fcst'}, inplace=True)
# weekly_fcst.rename(columns={'preds_xgb_rf_target':'fcst'},inplace=True)
weekly_fcst = pd.merge(weekly_fcst, seg_df[['ts_id', 'std', 'Mixed']],
how='left', on=['ts_id'])
weekly_fcst.rename(columns={'Mixed': 'bucket'}, inplace=True)
for key in percentile_bucket_dict.keys():
print(key, percentile_bucket_dict[key])
indexs = weekly_fcst[weekly_fcst.bucket == key].index
weekly_fcst.loc[indexs, 'percentile'] = percentile_bucket_dict[key]
weekly_fcst.loc[indexs, 'fcst'] = np.round(
weekly_fcst.loc[indexs, 'fcst'] +
norm.ppf(percentile_bucket_dict[key]) *
weekly_fcst.loc[indexs, 'std'])
weekly_fcst = weekly_fcst[
['store_id', 'drug_id', 'model', 'date', 'fcst', 'std', 'bucket',
'percentile']]
fc_cols = [i for i in weekly_fcst.columns if i.startswith('preds_')]
weekly_fcst['std'].fillna(seg_df['std'].mean(), inplace=True)
# agg_fcst = weekly_fcst.groupby(
# ['model', 'store_id', 'drug_id', 'bucket', 'percentile']).\
# agg({'fcst': 'sum', 'std': sum_std}).reset_index()
agg_fcst = weekly_fcst.groupby(
['model', 'store_id', 'drug_id', 'bucket', 'percentile']). \
agg({'fcst': 'sum', 'std': 'mean'}).reset_index()
agg_fcst['store_id'] = agg_fcst['store_id'].astype(int)
agg_fcst['drug_id'] = agg_fcst['drug_id'].astype(int)
return agg_fcst, cal_sales, weekly_fcst, seg_df, drug_class | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/forecast_main.py | forecast_main.py |
import pandas as pd
import numpy as np
def post_processing(safety_stock_df, weekly_fcst, seg_df, store_id, schema,
db, logger):
# get drug_name, type and grade
seg_df[['store_id', 'drug_id']] = seg_df['ts_id'].str.split('_', expand=True)
seg_df['store_id'] = seg_df['store_id'].astype(int)
seg_df['drug_id'] = seg_df['drug_id'].astype(int)
drug_list1 = list(safety_stock_df["drug_id"].unique())
drug_list2 = list(seg_df["drug_id"].unique())
drug_list = tuple(set(drug_list1+drug_list2)) # get drugs in both tables
q_drug_info = f"""
select d.id as drug_id, "drug-name" as drug_name, type,
coalesce(doi."drug-grade", 'NA') as drug_grade
from "{schema}".drugs d
left join "{schema}"."drug-order-info" doi
on d.id = doi."drug-id"
where d.id in {drug_list}
and doi."store-id" = {store_id}
"""
df_drug_info = db.get_df(q_drug_info)
# get store name
q_store_name = f""" select name from "{schema}".stores where id = {store_id} """
store_name = db.get_df(q_store_name)['name'][0]
# get current inventory and avg_ptr info
q_inv = f"""
select "drug-id" as drug_id, sum("locked-quantity"+quantity+
"locked-for-audit"+"locked-for-transfer"+"locked-for-check"+
"locked-for-return") as curr_inventory
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {drug_list}
group by "drug-id"
"""
df_inv = db.get_df(q_inv)
q_avg_ptr_store = f"""
select "drug-id" as drug_id, avg(ptr) as avg_ptr
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {drug_list}
and DATEDIFF(day, date("created-at"), current_date) < 365
group by "drug-id"
"""
df_avg_ptr_store = db.get_df(q_avg_ptr_store)
q_avg_ptr_sys = f"""
select "drug-id" as drug_id, "avg-ptr" as avg_ptr_sys
from "{schema}"."drug-std-info" dsi
"""
df_avg_ptr_sys = db.get_df(q_avg_ptr_sys)
# add all to ss table
safety_stock_df['store_id'] = store_id
safety_stock_df['store_name'] = store_name
safety_stock_df = safety_stock_df.merge(
df_drug_info, on='drug_id', how='left')
safety_stock_df['drug_grade'].fillna('NA', inplace=True)
safety_stock_df = safety_stock_df.merge(
df_inv, on='drug_id', how='left')
safety_stock_df = safety_stock_df.merge(
df_avg_ptr_store, on='drug_id', how='left')
safety_stock_df = safety_stock_df.merge(
df_avg_ptr_sys, on='drug_id', how='left')
# replace NA in avg_ptr with system-avg_ptr
safety_stock_df["avg_ptr"] = np.where(safety_stock_df["avg_ptr"].isna(),
safety_stock_df["avg_ptr_sys"],
safety_stock_df["avg_ptr"])
safety_stock_df.drop("avg_ptr_sys", axis=1, inplace=True)
safety_stock_df["avg_ptr"] = safety_stock_df["avg_ptr"].astype(float)
# calculate DOH
safety_stock_df['safety_stock_days'] = np.where(
(safety_stock_df['fcst'] == 0) | (safety_stock_df['safety_stock'] == 0),
0, safety_stock_df['safety_stock'] / (safety_stock_df['fcst'] / 28))
safety_stock_df['reorder_days'] = np.where(
(safety_stock_df['fcst'] == 0) | (safety_stock_df['reorder_point'] == 0),
0, safety_stock_df['reorder_point'] / (safety_stock_df['fcst'] / 28))
safety_stock_df['order_upto_days'] = np.where(
(safety_stock_df['fcst'] == 0) | (safety_stock_df['order_upto_point'] == 0),
0, safety_stock_df['order_upto_point'] / (safety_stock_df['fcst'] / 28))
# calculate max-value, to-order-qty and to-order-val
safety_stock_df["max_value"] = safety_stock_df['order_upto_point'] * \
safety_stock_df['avg_ptr']
safety_stock_df['to_order_quantity'] = np.where(
safety_stock_df['curr_inventory'] <= safety_stock_df['reorder_point'],
safety_stock_df['order_upto_point'] - safety_stock_df['curr_inventory'],
0)
safety_stock_df['to_order_value'] = safety_stock_df['to_order_quantity'] * \
safety_stock_df['avg_ptr']
# formatting weekly_fcst table
weekly_fcst['store_name'] = store_name
weekly_fcst.rename(
columns={'date': 'week_begin_dt', 'fcst': 'weekly_fcst',
'std': 'fcst_std'}, inplace=True)
# formatting segmentation table
seg_df.rename(columns={'std': 'sales_std', 'cov': 'sales_cov',
'Mixed': 'bucket', 'Group': 'group',
'PLC Status L1': 'plc_status', 'ADI': 'adi',
'total_LY_sales': 'total_ly_sales',
'start_date': 'sale_start_date'}, inplace=True)
seg_df['plc_status'] = np.where(seg_df['plc_status'] == 'NPI',
'New Product', seg_df['plc_status'])
seg_df['sale_start_date'] = seg_df['sale_start_date'].dt.date
seg_df['store_name'] = store_name
seg_df = seg_df.merge(df_drug_info, on='drug_id', how='left')
seg_df['drug_grade'].fillna('NA', inplace=True)
# get oder_value_summary for email attachment
order_value = safety_stock_df.pivot_table(
index=['store_id', 'store_name', 'type'],
values=['to_order_quantity', 'to_order_value'], aggfunc='sum',
margins=True, margins_name='Total').reset_index()
return safety_stock_df, order_value, weekly_fcst, seg_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/post_processing.py | post_processing.py |
import pandas as pd
import numpy as np
def wh_consolidation(safety_stock_df, db, schema, logger):
"""
replace for drugs which has a substitute available in WH
(corrected logic)
"""
# getting list of SKUs to be rejected and substituted
wh_subs_query = f"""
select "drug-id" , "drug-id-replaced" , "same-release"
from "{schema}"."wh-sku-subs-master" wssm
left join "{schema}".drugs d on wssm."drug-id" = d.id
where "add-wh" = 'No'
and d."type" not in ('ethical')
"""
df_wh_subs = db.get_df(wh_subs_query)
df_wh_subs.columns = [c.replace('-', '_') for c in df_wh_subs.columns]
all_assort_drugs = list(safety_stock_df.loc[
safety_stock_df["order_upto_point"] > 0]["drug_id"].unique())
# reject cases
reject_cases_1 = df_wh_subs.loc[
df_wh_subs["drug_id"] == df_wh_subs["drug_id_replaced"]]
reject_drugs_lst_1 = list(reject_cases_1["drug_id"].unique())
reject_cases_2 = df_wh_subs.loc[
(df_wh_subs["drug_id"] != df_wh_subs["drug_id_replaced"]) &
(df_wh_subs["same_release"] == 'NO')]
reject_drugs_lst_2 = list(reject_cases_2["drug_id"].unique())
# replace cases
replace_cases = df_wh_subs.loc[
(df_wh_subs["drug_id"] != df_wh_subs["drug_id_replaced"]) &
(df_wh_subs["same_release"] == 'YES')]
reject_drugs_lst_3 = list(replace_cases["drug_id"].unique())
replace_merge_df = safety_stock_df.merge(
replace_cases, on="drug_id", how="inner").drop("same_release", axis=1)[
["drug_id", "drug_id_replaced", "safety_stock", "reorder_point", "order_upto_point"]]
# get preferred entry in case of multiple drug_id with same drug_id_replaced
# choosing the case with highest OUP
replace_merge_df = replace_merge_df.sort_values(
by=['drug_id_replaced', 'order_upto_point'], ascending=False)
preferred_drug_replace_map = replace_merge_df.groupby(
"drug_id_replaced").agg({"drug_id": "first"}) # first will have highest OUP
preferred_drug_replace_df = replace_merge_df.merge(
preferred_drug_replace_map, on=["drug_id", "drug_id_replaced"], how="inner")
substitute_drugs_add_df = preferred_drug_replace_df.copy()
substitute_drugs_add_df = substitute_drugs_add_df.drop("drug_id", axis=1)
substitute_drugs_add_df.rename(columns={"drug_id_replaced": "drug_id"}, inplace=True)
# only need to add the substitute if below condition satisfy
substitute_drugs_add_df = substitute_drugs_add_df.loc[
(substitute_drugs_add_df["order_upto_point"] > 0) &
(~substitute_drugs_add_df["drug_id"].isin(all_assort_drugs))]
# remove previous entry with 0 OUP for substitute drug if present (avoids duplicate)
substitute_drugs = list(substitute_drugs_add_df["drug_id"].unique())
safety_stock_df = safety_stock_df.loc[~(
(safety_stock_df["order_upto_point"] == 0) &
(safety_stock_df["drug_id"].isin(substitute_drugs)))]
# filling the relevant columns
substitute_drugs_add_df['model'] = 'NA'
substitute_drugs_add_df['bucket'] = 'NA'
substitute_drugs_add_df['fcst'] = 0
substitute_drugs_add_df['std'] = 0
substitute_drugs_add_df['lead_time_mean'] = 0
substitute_drugs_add_df['lead_time_std'] = 0
reject_drugs_lst = list(set(reject_drugs_lst_1 + reject_drugs_lst_2 + reject_drugs_lst_3))
logger.info(f"Drugs to reject: {len(reject_drugs_lst)}")
logger.info(f"Drugs to add as substitute: {substitute_drugs_add_df.shape[0]}")
ss_zero_cases = safety_stock_df.loc[safety_stock_df["drug_id"].isin(reject_drugs_lst)]
ss_rest_cases = safety_stock_df.loc[~safety_stock_df["drug_id"].isin(reject_drugs_lst)]
ss_zero_cases["safety_stock"] = 0
ss_zero_cases["reorder_point"] = 0
ss_zero_cases["order_upto_point"] = 0
safety_stock_df_final = pd.concat(
[ss_rest_cases, ss_zero_cases, substitute_drugs_add_df])
return safety_stock_df_final
def goodaid_consolidation(safety_stock_df, db, schema, logger,
substition_type=None):
"""
for goodaid compositions, only keep goodaid and those drugs in same
composition which are part of WH portfolio.
reject all other drugs in that composition
"""
if substition_type is None:
substition_type = ['generic']
# Good Aid SKU list
ga_sku_query = """
select wh."drug-id" , d.composition
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where wh."add-wh" = 'Yes'
and d."company-id" = 6984
and d.type in {0}
""".format(
str(substition_type).replace('[', '(').replace(']', ')'),
schema=schema)
ga_sku = db.get_df(ga_sku_query)
ga_sku.columns = [c.replace('-', '_') for c in ga_sku.columns]
logger.info('GoodAid SKU list ' + str(ga_sku.shape[0]))
# Generic Top SKU
ga_active_composition = tuple(ga_sku['composition'].values)
top_sku_query = """
select wh."drug-id" , d.composition
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where wh."add-wh" = 'Yes'
and d."company-id" != 6984
and d.type in {0}
and d.composition in {1}
""".format(
str(substition_type).replace('[', '(').replace(']', ')'),
str(ga_active_composition), schema=schema)
top_sku = db.get_df(top_sku_query)
top_sku.columns = [c.replace('-', '_') for c in top_sku.columns]
logger.info('GoodAid comp Top SKU list ' + str(top_sku.shape[0]))
# SS substition for other drugs
rest_sku_query = """
select id as drug_id, composition
from "{schema}".drugs
where composition in {0}
and id not in {1}
and "company-id" != 6984
and type in {2}
""".format(str(ga_active_composition),
str(tuple(top_sku['drug_id'].values)),
str(substition_type).replace('[', '(').replace(']', ')'),
schema=schema)
rest_sku = db.get_df(rest_sku_query)
logger.info('GoodAid comp rest SKU list ' + str(rest_sku.shape[0]))
# substitution logic starts
gaid_drug_list = list(ga_sku["drug_id"].unique())
top_drug_list = list(top_sku["drug_id"].unique())
reject_drug_list = list(rest_sku["drug_id"].unique())
ss_zero_cases = safety_stock_df.loc[safety_stock_df["drug_id"].isin(reject_drug_list)]
ss_rest_cases = safety_stock_df.loc[~safety_stock_df["drug_id"].isin(reject_drug_list)]
logger.info('Setting rest sku SS, ROP, OUP to zero')
ss_zero_cases["safety_stock"] = 0
ss_zero_cases["reorder_point"] = 0
ss_zero_cases["order_upto_point"] = 0
safety_stock_df_final = pd.concat([ss_rest_cases, ss_zero_cases])
return safety_stock_df_final
def D_class_consolidation(safety_stock_df, store_id, db, schema, logger):
"""
for D class drugs, discard drugs from assortment for which
same composition is present in A,B or C class
"""
drugs_list = tuple(safety_stock_df["drug_id"].unique())
comp = f"""
select id as drug_id, composition
from "{schema}".drugs d
where "id" in {drugs_list}
"""
df_comp = db.get_df(comp)
df = pd.merge(safety_stock_df, df_comp, how='left', on=['drug_id'])
df['store_comp'] = str(store_id) + "_" + df['composition'].astype(str)
df_blank_comp = df[df['composition'] == ""]
df = df[df['composition'] != ""].reset_index(drop=True)
df['ABC Class'] = np.where(df['bucket'].isin(['AW', 'AX', 'AY', 'AZ']), 'A', np.nan)
df['ABC Class'] = np.where(df['bucket'].isin(['BW', 'BX', 'BY', 'BZ']), 'B', df['ABC Class'])
df['ABC Class'] = np.where(df['bucket'].isin(['CW', 'CX', 'CY', 'CZ']), 'C', df['ABC Class'])
df['ABC Class'] = np.where(df['bucket'].isin(['DW', 'DX', 'DY', 'DZ']), 'D', df['ABC Class'])
df['ABC Class'].fillna('None', inplace=True)
list_ABC = df[(df['ABC Class'].isin(['A', 'B', 'C'])) & (df['order_upto_point'] > 0)][
'store_comp'].unique().tolist()
list_D = df[(df['ABC Class'].isin(['D'])) & (df['order_upto_point'] > 0)][
'store_comp'].unique().tolist()
common_comp_D = [value for value in list_D if value in list_ABC]
D_exc_comp = [value for value in list_D if value not in list_ABC]
df_D = df[df['ABC Class'] == 'D']
df_D_new = df_D.copy()
df_D_new['order_upto_point'] = np.where(
df_D_new['store_comp'].isin(common_comp_D), 0, df_D_new['order_upto_point'])
df_D_new['reorder_point'] = np.where(
df_D_new['store_comp'].isin(common_comp_D), 0, df_D_new['reorder_point'])
df_D_new['safety_stock'] = np.where(
df_D_new['store_comp'].isin(common_comp_D), 0, df_D_new['safety_stock'])
df_remove_D = df[df['ABC Class'] != 'D']
df_add_new_D = pd.concat([df_remove_D, df_D_new])
df_add_blank_comp = pd.concat([df_add_new_D, df_blank_comp])
safety_stock_df_final = df_add_blank_comp.drop(
columns=['store_comp', 'composition', 'ABC Class'])
return safety_stock_df_final | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/portfolio_consolidation.py | portfolio_consolidation.py |
import numpy as np
from zeno_etl_libs.utils.ipc.lead_time import lead_time
from zeno_etl_libs.utils.ipc2.helpers.correction_flag import compare_df, \
add_correction_flag
from zeno_etl_libs.utils.ipc2.heuristics.sl_heuristics import calculate_ss
from zeno_etl_libs.utils.ipc2.heuristics.doh_heuristics import ss_doh_wh_cap, \
ss_doh_non_wh_cap
from zeno_etl_libs.utils.ipc2.heuristics.ipcv3_heuristics import v3_corrections
from zeno_etl_libs.utils.ipc2.heuristics.ipcv4_heuristics import v4_corrections
from zeno_etl_libs.utils.ipc2.heuristics.ipcv5_heuristics import v5_corrections
from zeno_etl_libs.utils.ipc2.heuristics.ipcv3N_heuristics import v3N_corrections
def safety_stock_calc(agg_fcst, cal_sales, store_id, reset_date, v3_active_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
v4_active_flag, drug_type_list_v4, v5_active_flag,
open_po_turbhe_active, schema, db, logger):
fcst_weeks = 4
order_freq = 4
# ================== TEMPORARY FOR OPEN-PO TURBHE STORES ===================
if open_po_turbhe_active == 'Y':
q_turbhe_stores = f"""
select distinct "store-id"
from "{schema}"."store-dc-mapping" sdm
left join "{schema}".stores s on s.id =sdm."store-id"
where "forward-dc-id" = 169
and s."franchisee-id" = 1
and name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
"""
df_turbhe_stores = db.get_df(q_turbhe_stores)
turbhe_stores = df_turbhe_stores["store-id"].tolist()
# add store_id 2 & 264 (mulund east and mulund west sarvodaya)
turbhe_stores += [2, 264]
if store_id in turbhe_stores:
order_freq = 3
# ========================= LEAD TIME CALCULATIONS =========================
lt_drug, lt_store_mean, lt_store_std = lead_time(
store_id, cal_sales, reset_date, db, schema, logger)
safety_stock_df = agg_fcst.merge(
lt_drug[['drug_id', 'lead_time_mean', 'lead_time_std']],
how='left', on='drug_id')
safety_stock_df['lead_time_mean'].fillna(lt_store_mean, inplace=True)
safety_stock_df['lead_time_std'].fillna(lt_store_std, inplace=True)
# ==================== SS, ROP, OUP CALCULATION BEGINS =====================
# impute store_std for cases where store-drug std<1
safety_stock_df['lead_time_std'] = np.where(
safety_stock_df['lead_time_std'] < 1,
lt_store_std, safety_stock_df['lead_time_std'])
# calculate SS
safety_stock_df = calculate_ss(safety_stock_df, fcst_weeks, logger)
# SS-DOH CAPPING #1
logger.info(f"DOH1 (SS-WH-DOH) Correction starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = ss_doh_wh_cap(safety_stock_df, schema, db)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'DOH1')
# SS-DOH CAPPING #2
logger.info(f"DOH2 (SS-Non-WH-DOH) Correction starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = ss_doh_non_wh_cap(safety_stock_df, schema, db)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'DOH2')
# calculate ROP - add lead time demand to SS
safety_stock_df['reorder_point'] = safety_stock_df.apply(
lambda row: np.round(
row['lead_time_mean'] * row['fcst'] / fcst_weeks / 7),
axis=1) + safety_stock_df['safety_stock']
# calculate OUP - add order_freq demand to ROP
safety_stock_df['order_upto_point'] = (
safety_stock_df['reorder_point'] +
np.round(
np.where(
# if rounding off give 0, increase it to 4-week forecast
(safety_stock_df['reorder_point'] +
safety_stock_df[
'fcst'] * order_freq / fcst_weeks / 7 < 0.5) &
(safety_stock_df['fcst'] > 0),
safety_stock_df['fcst'],
safety_stock_df['fcst'] * order_freq / fcst_weeks / 7))
)
# correction for negative forecast
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['safety_stock'] < 0,
0, safety_stock_df['safety_stock'])
safety_stock_df['reorder_point'] = np.where(
safety_stock_df['reorder_point'] < 0,
0, safety_stock_df['reorder_point'])
safety_stock_df['order_upto_point'] = np.where(
safety_stock_df['order_upto_point'] < 0,
0, safety_stock_df['order_upto_point'])
# ========== CORRECTION PLUGINS (REWORK SS,ROP,OUP BASED ON REQ) ===========
final_ss_df = safety_stock_df.copy()
# if v3_active_flag == 'Y':
# logger.info("IPC V3 Correction starts")
# df_pre_corr = final_ss_df.copy()
# final_ss_df = v3_corrections(final_ss_df, store_id,
# corrections_selling_probability_cutoff,
# corrections_cumulative_probability_cutoff,
# schema, db, logger)
# df_post_corr = final_ss_df.copy()
# logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
# logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
# corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
# final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'V3')
if v3_active_flag == 'Y':
logger.info("IPC V3N Correction starts")
df_pre_corr = final_ss_df.copy()
final_ss_df = v3N_corrections(final_ss_df, store_id,
reset_date,
schema, db, logger)
df_post_corr = final_ss_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'V3N')
if v4_active_flag == 'Y':
logger.info("IPC V4 Correction starts")
df_pre_corr = final_ss_df.copy()
final_ss_df = v4_corrections(final_ss_df, drug_type_list_v4, db, schema)
df_post_corr = final_ss_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'V4')
# currently v5 only active for pilot stores
if (v5_active_flag == 'Y') & (store_id in [51, 134, 83]):
logger.info("IPC V5 STD-Qty Correction starts")
df_pre_corr = final_ss_df.copy()
final_ss_df = v5_corrections(final_ss_df, db, schema, logger)
df_post_corr = final_ss_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'V5')
# correct cases where ROP=OUP
df_pre_corr = final_ss_df.copy()
final_ss_df['order_upto_point'] = np.where(
((final_ss_df['order_upto_point'] > 0) &
(final_ss_df['reorder_point'] == final_ss_df['order_upto_point'])),
final_ss_df['reorder_point'] + 1, final_ss_df['order_upto_point'])
df_post_corr = final_ss_df.copy()
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'OUP_CORR')
return final_ss_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/safety_stock.py | safety_stock.py |
import numpy as np
import pandas as pd
import datetime as dt
def check_oup_outlier(safety_stock_df, store_id, reset_date, db, schema):
"""
Find cases where OUP > Past 90 days net-sales + pr_loss
"""
safety_stock_df.columns = [c.replace('-', '_') for c in
safety_stock_df.columns]
# check only for cases where fcst & oup > 0
df_check_cases = safety_stock_df.loc[
(safety_stock_df["fcst"] > 0) & (safety_stock_df["order_upto_point"] > 0)]
df_check_cases = df_check_cases[[
"store_id", "store_name", "drug_id", "drug_name", "type", "model", "fcst",
"safety_stock", "reorder_point", "order_upto_point", "correction_flags",
"curr_inventory", "to_order_quantity", "to_order_value"]]
drug_list = list(df_check_cases["drug_id"].unique())
drug_list_str = str(drug_list).replace('[', '(').replace(']', ')')
p90d_begin_date = dt.datetime.strptime(reset_date, '%Y-%m-%d').date() - \
dt.timedelta(days=90)
p90d_begin_date = p90d_begin_date.strftime("%Y-%m-%d")
q_p90d_sales = f"""
select "drug-id" as drug_id, sum("net-quantity") as net_sales_p90d
from "{schema}".sales s
where "store-id" = {store_id}
and "drug-id" in {drug_list_str}
and date("created-at") >= '{p90d_begin_date}'
and date("created-at") < '{reset_date}'
group by "drug-id"
"""
df_p90d_sales = db.get_df(q_p90d_sales)
q_p90d_pr_loss = f"""
select "drug-id" as drug_id, sum("loss-quantity") as pr_loss_p90d
from "{schema}"."cfr-patient-request"
where "store-id" = {store_id}
and "shortbook-date" >= '{p90d_begin_date}'
and "shortbook-date" < '{reset_date}'
and "drug-id" in {drug_list_str}
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
group by "drug-id"
"""
df_p90d_pr_loss = db.get_df(q_p90d_pr_loss)
df_check_cases = df_check_cases.merge(df_p90d_sales, on="drug_id",
how="left")
df_check_cases = df_check_cases.merge(df_p90d_pr_loss, on="drug_id",
how="left")
df_check_cases["net_sales_p90d"] = df_check_cases["net_sales_p90d"].fillna(0).astype(int)
df_check_cases["pr_loss_p90d"] = df_check_cases["pr_loss_p90d"].fillna(0).astype(int)
df_check_cases["p90d_demand"] = df_check_cases["net_sales_p90d"] + df_check_cases["pr_loss_p90d"]
df_check_cases["outlier"] = 'NA'
df_check_cases["outlier"] = np.where(
df_check_cases["order_upto_point"] > df_check_cases["p90d_demand"],
'Y', 'N')
df_outliers = df_check_cases.loc[df_check_cases["outlier"] == 'Y']
outlier_drugs = list(df_outliers["drug_id"].unique())
manual_doid_upd_df = df_outliers[["store_id", "drug_id", "safety_stock",
"reorder_point", "order_upto_point"]]
manual_doid_upd_df.rename(columns={"safety_stock": "ss",
"reorder_point": "rop",
"order_upto_point": "oup"},
inplace=True)
return outlier_drugs, df_outliers, manual_doid_upd_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/helpers/outlier_check.py | outlier_check.py |
import numpy as np
np.random.seed(0)
import pandas as pd
# import time
# import re
# from datetime import date
# from dateutil.relativedelta import relativedelta
# from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from prophet import Prophet
from statsmodels.tsa.api import ExponentialSmoothing
# import sktime
from sktime.forecasting.ets import AutoETS
from zeno_etl_libs.utils.ipc2.helpers.helper_functions import sum_std,\
applyParallel, applyParallel_croston
# from boruta import BorutaPy
from zeno_etl_libs.utils.ipc2.config_ipc import (
date_col,
target_col,
models
)
import logging
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
class TS_forecast:
def train_test_split(self, df, train_max_date, forecast_start):
df.rename(columns={date_col: 'ds', target_col: 'y'}, inplace=True)
df.sort_values(by=['ds'], inplace=True)
train = df[df['ds'] <= train_max_date]
test = df[df['ds'] >= forecast_start]
return train, test
def Croston_TSB(self, ts, extra_periods=4, alpha=0.4, beta=0.4):
d = np.array(ts) # Transform the input into a numpy array
cols = len(d) # Historical period length
d = np.append(d, [
np.nan] * extra_periods) # Append np.nan into the demand array to cover future periods
# level (a), probability(p) and forecast (f)
a, p, f = np.full((3, cols + extra_periods), np.nan)
# Initialization
first_occurence = np.argmax(d[:cols] > 0)
a[0] = d[first_occurence]
p[0] = 1 / (1 + first_occurence)
f[0] = p[0] * a[0]
# Create all the t+1 forecasts
for t in range(0, cols):
if d[t] > 0:
a[t + 1] = alpha * d[t] + (1 - alpha) * a[t]
p[t + 1] = beta * (1) + (1 - beta) * p[t]
else:
a[t + 1] = a[t]
p[t + 1] = (1 - beta) * p[t]
f[t + 1] = p[t + 1] * a[t + 1]
# Future Forecast
a[cols + 1:cols + extra_periods] = a[cols]
p[cols + 1:cols + extra_periods] = p[cols]
f[cols + 1:cols + extra_periods] = f[cols]
df = pd.DataFrame.from_dict(
{"Demand": d, "Forecast": f, "Period": p, "Level": a,
"Error": d - f})
return df[-extra_periods:]
def ETS_forecast(self, train, test):
try:
train.set_index(['ds'], inplace=True)
test.set_index(['ds'], inplace=True)
train.index.freq = train.index.inferred_freq
test.index.freq = test.index.inferred_freq
# fit in statsmodels
# model = AutoETS(sp=52,auto=True,allow_multiplicative_trend = False, additive_only=True)
# fit = model.fit(train['y'])
try:
# fit = ETSModel(np.asarray(train['y']) ,seasonal_periods=52 ,trend='add', seasonal='add').fit()
fit = AutoETS(auto=True).fit(train['y'])
preds = fit.predict(test.index)
except Exception as e:
logger.info("error in Auto-ETS")
logger.info(str(e))
fit = ExponentialSmoothing(train['y']).fit()
preds = fit.forecast(len(test) + 1)
preds = preds[-len(test):]
except Exception as e:
logger.info("error in ETS fcst")
logger.info(str(e))
preds = 0
return preds
def ma_forecast(self, data):
"""
Purpose: Compute MA forecast for the for the forecast horizon specified
Inputs: time series to create forecast
Output: series with forecasted values
"""
sma_df = data.copy(deep=True)
yhat = []
if len(data) >= 8:
for i in range(5):
sma_val = sma_df.rolling(8).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
else:
for i in range(5):
sma_val = sma_df.rolling(len(data)).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
logger.info(yhat)
return yhat[-4:]
def prophet_fcst(self, train, test, params=None):
# reg_list = []
try:
if params is None:
pro = Prophet()
else:
pro = Prophet(n_changepoints=params)
# for j in train.columns:
# if j not in col_list:
# pro.add_regressor(j)
# reg_list.append(j)
pro.fit(train[['ds', 'y']])
pred_f = pro.predict(test)
test = test[["ds", "y"]]
test = pd.merge(test, pred_f, on="ds", how="left")
except Exception as e:
logger.info("error in prophet fcst")
logger.info(str(e))
test['yhat'] = 0
return test
def ts_forecast(self, df, train_max_date, forecast_start):
train, test = self.train_test_split(df, train_max_date=train_max_date,
forecast_start=forecast_start)
test = test.sort_values(by=['ds'])
if 'croston' in models:
preds_croston = self.Croston_TSB(train['y'])
test['preds_croston'] = preds_croston['Forecast'].values
if 'ETS' in models:
preds_ETS = self.ETS_forecast(train.copy(), test.copy())
try:
test['preds_ETS'] = preds_ETS.values
except:
test['preds_ETS'] = 0
if 'MA' in models:
preds_ma = self.ma_forecast(train['y'])
test['preds_ma'] = preds_ma
if 'prophet' in models:
preds_prophet = self.prophet_fcst(train.copy(), test.copy())
test['preds_prophet'] = preds_prophet['yhat'].values
return test
def apply_ts_forecast(self, df, train_max_date, forecast_start):
# global train_date
# train_date = train_max_date
# global forecast_start_date
# forecast_start_date = forecast_start
preds = applyParallel_croston(
df.groupby('ts_id'),
func=self.ts_forecast, train_max_date=train_max_date,
forecast_start=forecast_start
)
preds.rename(columns={'ds': date_col, 'y': target_col}, inplace=True)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
preds['preds_AE_ts'] = preds[ts_fcst_cols].mean(axis=1)
return preds, ts_fcst_cols | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/engine/ts_fcst.py | ts_fcst.py |
import pandas as pd
import datetime
import numpy as np
from zeno_etl_libs.utils.ipc2.config_ipc import date_col, store_col, \
drug_col, target_col, key_col, local_testing
class PreprocessData:
def add_ts_id(self, df):
df = df[~df[drug_col].isnull()].reset_index(drop=True)
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def preprocess_sales(self, df, drug_list):
df.rename(columns={
'net_sales_quantity': target_col
}, inplace=True)
df.rename(columns={
'sales_date': date_col
}, inplace=True)
set_dtypes = {
store_col: int,
drug_col: int,
date_col: str,
target_col: float
}
df = df.astype(set_dtypes)
df[target_col] = df[target_col].round()
df[date_col] = pd.to_datetime(df[date_col])
df = df.groupby(
[store_col, drug_col, key_col, date_col]
)[target_col].sum().reset_index()
df = df[df[drug_col].isin(drug_list[drug_col].unique().tolist())]
return df
def get_formatted_data(self, df):
df_start = df.groupby([key_col])[date_col].min().reset_index().rename(
columns={date_col: 'sales_start'})
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start=min_date,
end=end_date,
freq='d'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = pd.merge(df, df_start, how='left', on=key_col)
df = df[df[date_col] >= df['sales_start']]
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
df[[store_col, drug_col]] = df[[store_col, drug_col]].astype(int)
return df
def preprocess_cfr_pr(self, df):
set_dtypes = {
store_col: int,
drug_col: int,
'loss_quantity': int
}
df = df.astype(set_dtypes)
df['shortbook_date'] = pd.to_datetime(df['shortbook_date'])
return df
def merge_cfr_pr(self, sales, cfr_pr):
df = sales.merge(cfr_pr,
left_on=[store_col, drug_col, date_col],
right_on=[store_col, drug_col, 'shortbook_date'],
how='left')
df[date_col] = df[date_col].combine_first(df['shortbook_date'])
df[target_col].fillna(0, inplace=True)
df['loss_quantity'].fillna(0, inplace=True)
df[target_col] += df['loss_quantity']
df.drop(['shortbook_date', 'loss_quantity'], axis=1, inplace=True)
return df
def preprocess_calendar(self, df, last_date):
df.rename(columns={'date': date_col}, inplace=True)
df[date_col] = pd.to_datetime(df[date_col])
cal_sales = df.copy()
cal_sales['week_begin_dt'] = cal_sales.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
cal_sales['month_begin_dt'] = cal_sales.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
cal_sales['key'] = 1
ld = pd.to_datetime(last_date)
cal_sales = cal_sales[cal_sales[date_col] > ld]
return df, cal_sales
def merge_calendar(self, sales, calendar):
df = sales.merge(calendar,
how='left',
on=date_col
)
# df_week_days_count = df.groupby([key_col, 'year', 'week_of_year'])[date_col].count().reset_index().rename(columns = {date_col:'week_days_count'})
# df['week_days_count'] = 1
df['week_begin_dt'] = df.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
df_week_days_count = df.groupby(['ts_id', 'week_begin_dt'])[
date_col].count().reset_index().rename(
columns={date_col: 'week_days_count'})
# df = df.groupby(['ts_id', store_col, drug_col, ]).resample('W-Mon', on =date_col )[target_col].sum().reset_index()
df = df.groupby(['ts_id', store_col, drug_col, 'week_begin_dt'])[
target_col].sum().reset_index()
df = pd.merge(df, df_week_days_count, how='left',
on=[key_col, 'week_begin_dt'])
df = df[df['week_days_count'] == 7].reset_index(drop=True)
df.drop(columns=['week_days_count'], inplace=True)
df.rename(columns={'week_begin_dt': date_col}, inplace=True)
return df
def preprocess_bill_date(self, df):
df.rename(columns={'store-id': store_col}, inplace=True)
df['bill_date'] = pd.to_datetime(df['bill_date'])
return df
def merge_first_bill_date(self, sales, first_bill_date):
df = pd.merge(sales, first_bill_date, on=[store_col])
df = df[df[date_col] >= df['bill_date']].reset_index(drop=True)
df.drop(columns=['bill_date'], inplace=True)
return df
def make_future_df(self, df):
start_date_df = (
df
.groupby(key_col)[date_col]
.min()
.reset_index()
.rename(columns={date_col: 'start_date'})
)
df = df[[key_col, date_col, target_col]]
end_date = df[date_col].max() + datetime.timedelta(weeks=5)
min_date = df[date_col].min()
date_range = pd.date_range(
start=min_date,
end=end_date,
freq="W-MON"
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = 0
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = df.merge(start_date_df, on=key_col, how='left')
df = df[
df[date_col] >= df['start_date']
]
df.drop('start_date', axis=1, inplace=True)
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
return df
def preprocess_all(
self,
sales=None,
cfr_pr=None,
drug_list=None,
calendar=None,
first_bill_date=None,
last_date=None,
):
sales = self.add_ts_id(sales)
# filter
#################################################
if local_testing == 1:
tsid_list = \
sales.sort_values(by=['net_sales_quantity'], ascending=False)[
key_col].unique().tolist()[:10]
sales = sales[sales[key_col].isin(tsid_list)]
#################################################
sales = self.preprocess_sales(sales, drug_list)
sales = self.get_formatted_data(sales)
cfr_pr = self.preprocess_cfr_pr(cfr_pr)
sales_daily = self.merge_cfr_pr(sales, cfr_pr)
calendar, cal_sales = self.preprocess_calendar(calendar, last_date)
sales = self.merge_calendar(sales_daily, calendar)
first_bill_date = self.preprocess_bill_date(first_bill_date)
sales = self.merge_first_bill_date(sales, first_bill_date)
sales_pred = self.make_future_df(sales.copy())
return (
sales,
sales_pred,
cal_sales,
sales_daily
) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/engine/data_pre_process.py | data_pre_process.py |
import numpy as np
np.random.seed(0)
import pandas as pd
import time
import re
from dateutil.relativedelta import relativedelta
from category_encoders.target_encoder import TargetEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from category_encoders.cat_boost import CatBoostEncoder
from xgboost import XGBRegressor
from xgboost import XGBRFRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
# from boruta import BorutaPy
from zeno_etl_libs.utils.ipc2.config_ipc import (
date_col,
target_col,
flag_sample_weights,
flag_seasonality_index,
models
)
import logging
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
class Forecast:
def one_hot_encode(self, df, one_hot_cols):
added_one_hot_cols = []
for col in one_hot_cols:
one_hot_df = pd.get_dummies(df[col], prefix=col, drop_first=False)
df = pd.concat([df.drop(col, axis=1), one_hot_df], axis=1)
added_one_hot_cols += one_hot_df.columns.to_list()
return df, added_one_hot_cols
def encode(self, X_train, y_train, cat_cols, encoder_type='Target'):
if len(cat_cols) == 0:
return X_train, None
cat_cols = list(set(cat_cols) & set(X_train.columns))
logger.debug('Categorical Encoding Started...')
if encoder_type == 'Target':
encoder = TargetEncoder(cols=cat_cols)
elif encoder_type == 'Catboost':
encoder = CatBoostEncoder(cols=cat_cols)
elif encoder_type == 'LeaveOneOut':
encoder = LeaveOneOutEncoder(cols=cat_cols)
X_train_enc = encoder.fit_transform(
X_train, y_train
)
return X_train_enc, encoder
# def get_target_encode(X_train, y_train, target_encode_cols):
# for col in target_encode_cols:
# encoder = TargetEncoder()
# encoder.fit_transform(df[[col, province_col]], df[target_col])
# encoder.transform()
# return df
def get_feature_imp(self, algo, algo_name, feature_cols, forecast_month):
importance = algo.feature_importances_.round(4)
names = np.array(feature_cols)
Feature_Imp = pd.DataFrame(data=np.column_stack((names, importance)
),
columns=['names', 'importance'])
Feature_Imp.columns = ['Feature', 'Feature_Importance']
Feature_Imp[date_col] = forecast_month
Feature_Imp = Feature_Imp[[date_col, 'Feature', 'Feature_Importance']]
Feature_Imp = Feature_Imp.rename(
columns={'Feature_Importance': algo_name})
return (Feature_Imp)
def get_train_test_split(
self, df, train_max_date, forecast_start_date, forecast_end_date,
num_shift_lags=1
):
# TODO - no need to do this. Lags are offset enough. we can use latest week data
train = df[df[date_col] <= train_max_date]
# train
train = train[
(train[date_col] < forecast_start_date)
]
# train.dropna(inplace = True)
train.set_index(['ts_id', date_col], inplace=True)
X_train = train.drop(target_col, axis=1)
y_train = train[target_col].fillna(0)
# test
test = df[
(df[date_col] >= forecast_start_date) &
(df[date_col] <= forecast_end_date)
]
test.set_index(['ts_id', date_col], inplace=True)
X_test = test.drop(target_col, axis=1)
y_test = test[target_col]
return X_train, y_train, X_test, y_test
def get_regex_filtered_data(self, l):
regex = re.compile(r"\[|\]|<", re.IGNORECASE)
filtered_l = [
regex.sub("_", col) if any(
x in str(col) for x in set(("[", "]", "<", " "))) else col
for col in l
]
filtered_l = [i.replace(" ", "_") for i in filtered_l]
return filtered_l
def add_sample_weights(self, df, max_date, forecast_start):
df['days_till_last_day'] = (df[date_col] - max_date).dt.days
df['sample_weights'] = np.exp(df['days_till_last_day'] / 2)
df.loc[
df[date_col] == (forecast_start - relativedelta(months=12)),
'sample_weights'
] = 1
df.loc[
df[date_col] == (forecast_start - relativedelta(months=24)),
'sample_weights'
] = 1
df = df.set_index(['ts_id', date_col])
return df['sample_weights']
def get_model_predictions(
self, model, model_name, X_test, y_test, Feature_Imp_all,
feature_cols, forecast_start
):
if Feature_Imp_all.empty:
Feature_Imp_all = self.get_feature_imp(
model, model_name, feature_cols, forecast_start
)
else:
Feature_Imp_all = pd.merge(
Feature_Imp_all,
self.get_feature_imp(model, model_name, feature_cols,
forecast_start),
how='outer',
on=[date_col, 'Feature']
)
if pd.DataFrame(X_test).empty:
return y_test, Feature_Imp_all
y_pred = model.predict(X_test)
pred_col = 'preds_{}'.format(model_name)
y_test[pred_col] = y_pred
y_test.loc[y_test[pred_col] < 0, pred_col] = 0
y_test[pred_col] = y_test[pred_col].fillna(0)
lgb_acc = 1 - self.wmape(y_test[target_col], y_test[pred_col])
logger.info(
"{} Accuracy {}: {}"
.format(model_name, forecast_start.strftime("%b"), lgb_acc)
)
return y_test, Feature_Imp_all
def wmape(self, actuals, forecast):
error = abs(actuals - forecast)
wmape_val = error.sum() / actuals.sum()
return wmape_val
def get_STM_forecast(self, df, forecast_start, num_shift_lags=1):
global num_cols, cat_cols
ts_features = [
i for i in df.columns if (
('lag' in i)
| ('_Sin' in i)
| ('_Cos' in i)
| ('_mean' in i)
| ('_trend' in i)
| ('ewm' in i)
| ('seasonality_index' in i)
)
]
flag_cols = [i for i in df.columns if i.endswith('_flag')]
num_cols = (
ts_features
)
target_encode_cols = [
]
one_hot_cols = [
'classification',
'Group',
'ABC',
'WXYZ',
'PLC Status L1'
]
df, added_one_hot_cols = self.one_hot_encode(df, one_hot_cols)
# df = self.get_target_encode(df, target_encode_cols)
train_max_date = (forecast_start - relativedelta(weeks=num_shift_lags))
X_train, y_train, X_test, y_test = self.get_train_test_split(
df, train_max_date, forecast_start, forecast_start,
num_shift_lags=num_shift_lags
)
# TODO:
# Add more cat features from item heirarchy and province, planning item,
# rolling features
# ewma
# promo
# Nan --> keep it
# ETS as a feature
cat_features = [
# 'covid_flag'
] + target_encode_cols
feature_cols = num_cols + flag_cols + added_one_hot_cols + [
'LOL',
'Year_Num', 'Quarter_Num', 'Quarter_Sin', 'Quarter_Cos',
'Month_Num', 'Month_Sin', 'Month_Cos',
'cov', 'ADI'
]
feature_cols = list(set(df.columns) & set(feature_cols))
# cat_features = []
X_train = X_train[feature_cols + cat_features]
X_test = X_test[feature_cols + cat_features]
X_train.columns = self.get_regex_filtered_data(X_train.columns)
X_test.columns = self.get_regex_filtered_data(X_test.columns)
y_test = y_test.reset_index()
val = (
X_train
.reset_index()
.merge(y_train.reset_index(), on=[date_col, 'ts_id'],
how='left')
)
_, _, X_val, y_val = self.get_train_test_split(
val,
train_max_date=train_max_date - relativedelta(weeks=8),
forecast_start_date=train_max_date - relativedelta(weeks=7),
forecast_end_date=train_max_date,
num_shift_lags=num_shift_lags
)
y_val = y_val.reset_index()
filtered_cat_features = self.get_regex_filtered_data(cat_features)
# X_train.dropna()
Feature_Imp_all = pd.DataFrame()
# X_train[filtered_cat_features] = X_train[filtered_cat_features].astype(str)
# Encoding
X_train_target, encoder = self.encode(X_train, y_train,
filtered_cat_features, 'Target')
if encoder != None:
X_test_target = encoder.transform(
X_test[encoder.get_feature_names()])
X_val_target = encoder.transform(X_val[encoder.get_feature_names()])
else:
X_test_target = X_test.copy(deep=True)
X_val_target = X_val.copy(deep=True)
smaple_weights = self.add_sample_weights(
X_train_target.reset_index()[['ts_id', date_col]],
max_date=train_max_date,
forecast_start=forecast_start
)
if 'XGB' in models:
###XGBRF##
logger.info("Forecasting XGRF...")
xgb_rf = XGBRFRegressor(n_estimators=750, random_state=42)
xgb_sample_weights = None
if flag_sample_weights['xgb']:
xgb_sample_weights = smaple_weights
X_train_xgb = X_train_target
X_test_xgb = X_test_target
X_val_xgb = X_val_target
if flag_seasonality_index['xgb']:
seas_cols = [i for i in X_train_target.columns if
'seasonality_index' in i]
X_train_xgb = X_train_target.drop(seas_cols, axis=1).copy()
X_test_xgb = X_test_target.drop(seas_cols, axis=1).copy()
X_val_xgb = X_val_target.drop(seas_cols, axis=1).copy()
xgb_rf.fit(X_train_xgb, y_train, sample_weight=xgb_sample_weights)
y_test, Feature_Imp_all = self.get_model_predictions(
model=xgb_rf,
model_name='xgb_rf_target',
X_test=X_test_xgb,
y_test=y_test,
Feature_Imp_all=Feature_Imp_all,
feature_cols=X_train_xgb.columns,
forecast_start=forecast_start
)
y_val, Feature_Imp_all = self.get_model_predictions(
model=xgb_rf,
model_name='xgb_rf_target',
X_test=X_val_xgb,
y_test=y_val,
Feature_Imp_all=Feature_Imp_all,
feature_cols=X_train_xgb.columns,
forecast_start=forecast_start
)
if 'CTB' in models:
###Catboost Target##
logger.info("Forecasting CB...")
cb = CatBoostRegressor(
n_estimators=1000, learning_rate=0.01,
cat_features=filtered_cat_features,
# one_hot_max_size = 16,
random_state=42, verbose=0
)
ctb_sample_weights = None
if flag_sample_weights['ctb']:
ctb_sample_weights = smaple_weights
X_train_ctb = X_train
X_test_ctb = X_test
X_val_ctb = X_val
if flag_seasonality_index['ctb']:
seas_cols = [i for i in X_train.columns if
'seasonality_index' in i]
X_train_ctb = X_train.drop(seas_cols, axis=1).copy()
X_test_ctb = X_test.drop(seas_cols, axis=1).copy()
X_val_ctb = X_val.drop(seas_cols, axis=1).copy()
cb.fit(X_train_ctb, y_train, sample_weight=ctb_sample_weights)
y_test, Feature_Imp_all = self.get_model_predictions(
model=cb,
model_name='cb_target',
X_test=X_test_ctb,
y_test=y_test,
Feature_Imp_all=Feature_Imp_all,
feature_cols=X_train_ctb.columns,
forecast_start=forecast_start
)
y_val, Feature_Imp_all = self.get_model_predictions(
model=cb,
model_name='cb_target',
X_test=X_val_ctb,
y_test=y_val,
Feature_Imp_all=Feature_Imp_all,
feature_cols=X_train_ctb.columns,
forecast_start=forecast_start
)
if 'LGBM' in models:
###LGBM##
logger.info("Forecasting LGBM...")
lgb = LGBMRegressor(
n_estimators=2000, learning_rate=0.005,
max_depth=10, num_leaves=int((2 ** 10) / 2), max_bin=1000,
random_state=42, verbose=-1,
categorical_feature=filtered_cat_features,
)
X_train1 = X_train.copy(deep=True)
X_test1 = X_test.copy(deep=True)
X_val1 = X_val.copy(deep=True)
logger.info("LGBM train: {}".format(X_train1.head(2)))
logger.info(
'Filtered cat features:{}'.format(filtered_cat_features))
logger.info('Filtered cat features columns:{}'.format(
X_train1[filtered_cat_features].columns))
logger.info('Filtered cat features dtypes:{}'.format(
X_train1[filtered_cat_features].dtypes))
# X_train1[filtered_cat_features] = X_train1[filtered_cat_features].astype('category')
# X_test1[filtered_cat_features] = X_test1[filtered_cat_features].astype('category')
# X_val1[filtered_cat_features] = X_val1[filtered_cat_features].astype('category')
lgb_sample_weights = None
if flag_sample_weights['lgbm']:
lgb_sample_weights = smaple_weights
X_train_lgbm = X_train1
X_test_lgbm = X_test1
X_val_lgbm = X_val1
if flag_seasonality_index['lgbm']:
seas_cols = [i for i in X_train1.columns if
'seasonality_index' in i]
X_train_lgbm = X_train1.drop(seas_cols, axis=1).copy()
X_test_lgbm = X_test1.drop(seas_cols, axis=1).copy()
X_val_lgbm = X_val1.drop(seas_cols, axis=1).copy()
lgb.fit(X_train_lgbm, y_train, sample_weight=lgb_sample_weights)
y_test, Feature_Imp_all = self.get_model_predictions(
model=lgb,
model_name='lgb',
X_test=X_test_lgbm,
y_test=y_test,
Feature_Imp_all=Feature_Imp_all,
feature_cols=X_test_lgbm.columns,
forecast_start=forecast_start
)
y_val, Feature_Imp_all = self.get_model_predictions(
model=lgb,
model_name='lgb',
X_test=X_val_lgbm,
y_test=y_val,
Feature_Imp_all=Feature_Imp_all,
feature_cols=X_test_lgbm.columns,
forecast_start=forecast_start
)
return y_test, y_val, Feature_Imp_all | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/engine/forecast.py | forecast.py |
class LoadData:
def load_file(self, db, query):
df = db.get_df(query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def load_all_input(
self,
type_list=None,
store_id_list=None,
last_date=None,
reset_date=None,
load_max_date=None,
schema=None,
db=None
):
drug_list = self.load_file(
query="""
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema),
db=db
)
sales_history = self.load_file(
query="""
select date("created-at") as "sales-date","store-id", "drug-id" ,
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" in {store_id_list}
and date("created-at") >= '{last_date}'
and date("created-at") <= '{load_max_date}'
group by "store-id", "drug-id", "sales-date"
""".format(
store_id_list=store_id_list, last_date=last_date,
load_max_date=load_max_date, schema=schema),
db=db
)
cfr_pr = self.load_file(
query=f"""
select "store-id", "drug-id","shortbook-date",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "shortbook-date" >= '{last_date}'
and "shortbook-date" <= '{load_max_date}'
and "drug-id" <> -1
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
and "drug-type" in {type_list}
and "store-id" in {store_id_list}
group by "store-id","drug-id", "shortbook-date"
""",
db=db
)
calendar = self.load_file(
query="""
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
where date < '{reset_date}'
""".format(schema=schema, reset_date=reset_date),
db=db
)
first_bill_date = self.load_file(
query="""
select "store-id" , min(date("created-at")) as bill_date from "{schema}".sales
where "store-id" in {store_id_list}
group by "store-id"
""".format(schema=schema, store_id_list=store_id_list),
db=db
)
return (
drug_list,
sales_history,
cfr_pr,
calendar,
first_bill_date
) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/engine/data_load.py | data_load.py |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
from zeno_etl_libs.utils.ipc2.config_ipc import (
date_col,
target_col,
store_col,
drug_col,
eol_cutoff
)
class Segmentation:
def add_ts_id(self, df):
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def _calc_abc(self, df52):
B_cutoff = 0.8
C_cutoff = 0.95
D_cutoff = 0.995
tot_sales = (
df52.groupby([
'ts_id'
])[target_col].sum().reset_index()
)
tot_sales.rename(columns={target_col: 'total_LY_sales'}, inplace=True)
tot_sales.sort_values('total_LY_sales', ascending=False, inplace=True)
tot_sales["perc_sales"] = (
tot_sales['total_LY_sales'] / tot_sales['total_LY_sales'].sum()
)
tot_sales["cum_perc_sales"] = tot_sales.perc_sales.cumsum()
tot_sales["ABC"] = "A"
tot_sales.loc[tot_sales.cum_perc_sales > B_cutoff, "ABC"] = "B"
tot_sales.loc[tot_sales.cum_perc_sales > C_cutoff, "ABC"] = "C"
tot_sales.loc[tot_sales.cum_perc_sales > D_cutoff, "ABC"] = "D"
# tot_sales = self.add_ts_id(tot_sales)
return tot_sales[['ts_id', 'ABC', 'total_LY_sales']]
# TODO: lower COV cutoffs
def get_abc_classification(self, df52):
province_abc = df52.groupby(
[store_col]
).apply(self._calc_abc)
province_abc = province_abc[['ts_id', "ABC"]].reset_index(drop=True)
# one
tot_sales = (
df52
.groupby(['ts_id'])[target_col]
.agg(['sum', 'mean'])
.reset_index()
)
tot_sales.rename(
columns={'sum': 'total_LY_sales', 'mean': 'avg_ly_sales'},
inplace=True)
tot_sales = tot_sales.merge(
province_abc,
on=['ts_id'],
how='left'
)
tot_sales = tot_sales.drop_duplicates()
# tot_sales = self.add_ts_id(tot_sales)
tot_sales = tot_sales[['ts_id', 'ABC']]
return tot_sales
def get_xyzw_classification(self, df1):
input_ts_id = df1['ts_id'].unique()
df1 = df1[df1[target_col] > 0]
cov_df = df1.groupby(['ts_id'])[target_col].agg(
["mean", "std", "count", "sum"])
cov_df.reset_index(drop=False, inplace=True)
cov_df['cov'] = np.where(
((cov_df["count"] > 2) & (cov_df["sum"] > 0)),
(cov_df["std"]) / (cov_df["mean"]),
np.nan
)
cov_df['WXYZ'] = 'Z'
cov_df.loc[cov_df['cov'] <= 1.2, 'WXYZ'] = 'Y'
cov_df.loc[cov_df['cov'] <= 0.8, 'WXYZ'] = 'X'
cov_df.loc[cov_df['cov'] <= 0.5, 'WXYZ'] = 'W'
# cov_df = self.add_ts_id(cov_df)
cov_df = cov_df[['ts_id', 'cov', 'WXYZ']]
non_mapped_ts_ids = list(
set(input_ts_id) - set(cov_df['ts_id'].unique())
)
non_mapped_cov = pd.DataFrame({
'ts_id': non_mapped_ts_ids,
'cov': [np.nan] * len(non_mapped_ts_ids),
'WXYZ': ['Z'] * len(non_mapped_ts_ids)
})
cov_df = pd.concat([cov_df, non_mapped_cov], axis=0)
cov_df = cov_df.reset_index(drop=True)
return cov_df
def get_std(self, df1):
input_ts_id = df1['ts_id'].unique()
# df1 = df1[df1[target_col]>0]
std_df = df1.groupby(['ts_id'])[target_col].agg(["std"])
return std_df
def calc_interval_mean(self, x, key):
df = pd.DataFrame({"X": x, "ts_id": key}).reset_index(
drop=True).reset_index()
df = df[df.X > 0]
df["index_shift"] = df["index"].shift(-1)
df["interval"] = df["index_shift"] - df["index"]
df = df.dropna(subset=["interval"])
df['ADI'] = np.mean(df["interval"])
return df[['ts_id', 'ADI']]
def calc_adi(self, df):
# df = self.add_ts_id(df)
logger.info(
'Combinations entering adi: {}'.format(df['ts_id'].nunique()))
dict_of = dict(iter(df.groupby(['ts_id'])))
logger.info("Total tsids in df: {}".format(df.ts_id.nunique()))
logger.info("Total dictionary length: {}".format(len(dict_of)))
list_dict = [
self.calc_interval_mean(dict_of[x][target_col], x) for x in
tqdm(dict_of.keys())
]
data = (
pd.concat(list_dict)
.reset_index(drop=True)
.drop_duplicates()
.reset_index(drop=True)
)
logger.info('Combinations exiting adi: {}'.format(data.ts_id.nunique()))
return data
def get_PLC_segmentation(self, df, mature_cutoff_date, eol_cutoff_date):
df1 = df[df[target_col] > 0]
df1 = df1.groupby(['ts_id']).agg({date_col: [min, max]})
df1.reset_index(drop=False, inplace=True)
df1.columns = [' '.join(col).strip() for col in df1.columns.values]
df1['PLC Status L1'] = 'Mature'
df1.loc[
(df1[date_col + ' min'] > mature_cutoff_date), 'PLC Status L1'
] = 'NPI'
df1.loc[
(df1[date_col + ' max'] <= eol_cutoff_date), 'PLC Status L1'
] = 'EOL'
# df1 = self.add_ts_id(df1)
df1 = df1[['ts_id', 'PLC Status L1']]
return df1
def get_group_mapping(self, seg_df):
seg_df['Mixed'] = seg_df['ABC'].astype(str) + seg_df['WXYZ'].astype(str)
seg_df['Group'] = 'Group3'
group1_mask = seg_df['Mixed'].isin(['AW', 'AX', 'BW', 'BX'])
seg_df.loc[group1_mask, 'Group'] = 'Group1'
group2_mask = seg_df['Mixed'].isin(['AY', 'AZ', 'BY', 'BZ'])
seg_df.loc[group2_mask, 'Group'] = 'Group2'
return seg_df
def calc_dem_pat(self, cov_df, adi_df):
logger.info('Combinations entering calc_dem_pat: {}'.format(
cov_df.ts_id.nunique()))
logger.info('Combinations entering calc_dem_pat: {}'.format(
adi_df.ts_id.nunique()))
df = pd.merge(cov_df, adi_df, how='left', on='ts_id')
df["cov2"] = np.power(df["cov"], 2)
df["classification"] = "Lumpy"
df.loc[
(df.ADI >= 1.32) & (df.cov2 < 0.49), "classification"
] = "Intermittent"
df.loc[
(df.ADI < 1.32) & (df.cov2 >= 0.49), "classification"
] = "Erratic"
df.loc[
(df.ADI < 1.32) & (df.cov2 < 0.49), "classification"
] = "Smooth"
logger.info(
'Combinations exiting calc_dem_pat: {}'.format(df.ts_id.nunique()))
return df[['ts_id', 'classification']]
def get_start_end_dates_df(self, df, key_col, date_col, target_col,
train_max_date, end_date):
start_end_date_df = (
df[df[target_col] > 0]
.groupby(key_col)[date_col]
.agg({'min', 'max'})
.reset_index()
.rename(columns={'min': 'start_date', 'max': 'end_date'})
)
start_end_date_df.loc[
(
start_end_date_df['end_date'] > (
train_max_date - relativedelta(weeks=eol_cutoff)
)
), 'end_date'
] = end_date
return start_end_date_df
def get_weekly_segmentation(self, df, df_sales_daily, train_max_date,
end_date):
df = df[df[date_col] <= train_max_date]
df1 = df[
df[date_col] > (train_max_date - relativedelta(weeks=52))
].copy(deep=True)
df_std = df_sales_daily[
df_sales_daily[date_col] > (train_max_date - relativedelta(days=90))
].copy(deep=True)
df1 = self.add_ts_id(df1)
abc_df = self._calc_abc(df1)
xyzw_df = self.get_xyzw_classification(df1)
std_df = self.get_std(df_std)
adi_df = self.calc_adi(df1)
demand_pattern_df = self.calc_dem_pat(xyzw_df[['ts_id', 'cov']], adi_df)
mature_cutoff_date = train_max_date - relativedelta(weeks=52)
eol_cutoff_date = train_max_date - relativedelta(weeks=13)
plc_df = self.get_PLC_segmentation(df, mature_cutoff_date,
eol_cutoff_date)
start_end_date_df = self.get_start_end_dates_df(
df, key_col='ts_id',
date_col=date_col,
target_col=target_col,
train_max_date=train_max_date,
end_date=end_date
)
seg_df = plc_df.merge(abc_df, on='ts_id', how='outer')
seg_df = seg_df.merge(xyzw_df, on='ts_id', how='outer')
seg_df = seg_df.merge(adi_df, on='ts_id', how='outer')
seg_df = seg_df.merge(demand_pattern_df, on='ts_id', how='outer')
seg_df = seg_df.merge(start_end_date_df, on='ts_id', how='outer')
seg_df = seg_df.merge(std_df, on='ts_id', how='outer')
seg_df = self.get_group_mapping(seg_df)
seg_df['Mixed'] = np.where(seg_df['Mixed']=='nannan', np.nan, seg_df['Mixed'])
drug_class = seg_df[
['ts_id', 'total_LY_sales', 'std', 'cov', 'ABC', 'WXYZ']]
drug_class[[store_col, drug_col]] = drug_class['ts_id'].str.split('_',
expand=True)
drug_class.rename(
columns={'total_LY_sales': 'net_sales', 'std': 'sales_std_dev',
'cov': 'sales_cov', 'ABC': 'bucket_abc',
'WXYZ': 'bucket_xyz'}, inplace=True)
drug_class.drop(columns=['ts_id'], inplace=True)
# seg_df[[store_col, drug_col]] = seg_df['ts_id'].str.split('_', expand = True)
# seg_df.drop(columns=['ts_id'],inplace=True)
# seg_df.rename(columns={'std':'sales_std_dev', 'cov':'sales_cov', 'ABC':'bucket_abcd', 'WXYZ':'bucket_wxyz', 'Mixed':'bucket'}, inplace=True)
# seg_df['PLC Status L1'] = np.where(seg_df['PLC Status L1']=='NPI', 'New_Product', seg_df['PLC Status L1'])
# seg_df['start_date'] = seg_df['start_date'].astype(str)
# seg_df = seg_df[[store_col, drug_col,'PLC Status L1', 'total_LY_sales', 'bucket_abcd', 'bucket_wxyz', 'bucket', 'classification', 'Group', 'sales_std_dev', 'sales_cov', 'ADI', 'start_date' ]]
# seg_df = pd.merge(seg_df, drug_class[[store_col, 'store_name', drug_col, ]])
return seg_df, drug_class | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/engine/segmentation.py | segmentation.py |
from datetime import date
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
import time
from statsmodels.tsa.arima_model import ARMA
from sklearn.linear_model import LinearRegression
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
from zeno_etl_libs.utils.ipc2.config_ipc import (
date_col,
target_col,
store_col,
drug_col,
eol_cutoff,
add_lags_diff_flag,
add_monthly_lags_flag,
rolling_time_feat,
ewma_lags,
trend_lags,
lags,
lags_diff,
monthly_lags,
)
class Feature_Engg:
def add_week_of_month(self, df):
df["week_of_month"] = df[date_col].apply(lambda d: (d.day - 1) // 7 + 1)
return df
def add_month(self, df):
df['Month'] = df[date_col].dt.month
return df
def calc_si(self, df, num_shift_lag=1):
max_date = df[date_col].max()
time_lag = (len(df) // cutoff_dic["si_week_freq"]) * cutoff_dic[
"si_week_freq"]
min_date = max_date + relativedelta(months=-time_lag)
df = df[(df[date_col] > min_date)]
try:
tsid_mean = np.mean(df[target_col].shift(num_shift_lag))
except BaseException as e:
logger.info(df.ts_id.iloc[0])
logger.info(e)
# continue
df = df.groupby(["ts_id", "Week_Number"])[
target_col].mean().reset_index()
df["Seas_index"] = df[target_col] / tsid_mean
df = df[["ts_id", "Week_Number", "Seas_index"]]
return df
def ratio_based_si(self, df, train_max_date, num_shift_lag):
if 'ts_id' not in df.columns:
df = self.add_ts_id(df)
train = df[df[date_col] <= train_max_date]
dict_of = dict(iter(train.groupby(["ts_id"])))
si_list = [self.calc_si(dict_of[x], num_shift_lag) for x in
dict_of.keys()]
si_df = pd.concat(si_list)
if 'Seas_index' in df.columns:
df.drop(['Seas_index'], axis=1, inplace=True)
df = pd.merge(df, si_df, how="left", on=["ts_id", "Week_Number"])
df['Seas_index'].fillna(1, inplace=True)
return df
def add_ARMA_forecast(self, data):
model = ARMA(data[target_col], order=(0, 1))
model_fit = model.fit(disp=False)
# forecast for required time priods
yhat = model_fit.predict(
len(data) - cutoff_dic["forecast_horizon_period"], len(data) - 1
)
data["ARMA_Forecast"] = yhat
data["Actuals + ARMA"] = np.where(
data["ARMA_Forecast"].isnull(), data[target_col],
data["ARMA_Forecast"]
)
return data[["ts_id", date_col, "Actuals + ARMA"]]
def trend_sim_lin_reg(self, df):
df.sort_values(by=date_col, inplace=True)
df.reset_index()
df["index"] = df.index
x = df["index"].values.reshape(-1, 1)
y = df[target_col].values.reshape(-1, 1)
# fit linear regression
regressor = LinearRegression()
regressor.fit(x, y)
return regressor.coef_
def add_trend_lags(self, actuals, fcst):
# for i in lags:
# fcst['Lag_' + str(i)] = fcst['Actuals + ARMA'].shift(i)
# fcst = fcst.fillna(0)
# fcst["rolling_ly_lag"] = (0.1 * fcst["Lag_104"]) + (0.9 * fcst["Lag_52"])
# fcst.drop(['Lag_' + str(i) for i in lags], axis = 1, inplace = True)
actuals_trend = actuals.groupby(["ts_id"])[
target_col].sum().reset_index()
actuals_trend["trend_value"] = self.trend_sim_lin_reg(actuals)[0, 0]
return actuals_trend, fcst
def add_lags(self, df, lag_list, num_shift_lag=1):
is_drop_ts_id = False
if 'ts_id' not in df.columns:
df = self.add_ts_id(df)
is_drop_ts_id = True
df_grp_sum = (df.groupby([date_col, "ts_id"])[target_col]
.sum()
.unstack())
lag_l = []
lag_l_diff = []
for lag in lag_list:
lag_df = (
df_grp_sum
.shift(lag + num_shift_lag - 1)
.fillna(method="bfill")
.stack()
.reset_index()
)
lag_df_diff = (
df_grp_sum
.shift(lag + num_shift_lag - 1)
.diff(1)
.fillna(method="bfill")
.stack()
.reset_index()
)
lag_df.rename(columns={0: "lag_" + str(lag)}, inplace=True)
lag_df_diff.rename(columns={0: "lag_" + str(lag) + '_diff'},
inplace=True)
if "lag_" + str(lag) in df.columns:
df.drop("lag_" + str(lag), axis=1, inplace=True)
if "lag_" + str(lag) + '_diff' in df.columns:
df.drop("lag_" + str(lag) + '_diff', axis=1, inplace=True)
lag_l.append(lag_df.set_index(["ts_id", date_col]))
lag_l.append(lag_df_diff.set_index(["ts_id", date_col]))
lag_df = None
for l in lag_l:
if lag_df is None:
lag_df = l
else:
lag_df = lag_df.join(l)
df = df.merge(lag_df.reset_index(), on=["ts_id", date_col], how="left")
df.drop("ts_id", axis=1, inplace=is_drop_ts_id)
return df
def add_lag_diff(self, df, lags_diff, num_shift_lag=1):
drop_cols = []
for i, j in lags_diff:
col_name = 'lag_diff_{}_{}'.format(i, j)
if col_name in df.columns:
df.drop(col_name, axis=1, inplace=True)
if 'lag_' + str(i) not in df.columns:
df = self.add_lags(df, [i], num_shift_lag=num_shift_lag)
drop_cols.append('lag_' + str(i))
if 'lag_' + str(j) not in df.columns:
df = self.add_lags(df, [j], num_shift_lag=num_shift_lag)
drop_cols.append('lag_' + str(j))
drop_cols.append('lag_' + str(j) + '_diff')
df[col_name] = df['lag_' + str(i)] - df['lag_' + str(j)]
if len(drop_cols) > 0:
df.drop(drop_cols, axis=1, inplace=True)
return df
def add_montly_lags(self, df, monthly_lags, num_shift_lag=1):
start = time.time()
mo_lag_dict = {}
for lag in monthly_lags:
mo_lag_dict[lag] = []
for week_num in df['week_of_month'].unique():
one_week_df = df[df['week_of_month'] == week_num][
['ts_id', date_col, target_col]]
df_grp = one_week_df.groupby([date_col, 'ts_id'])[
target_col].sum().unstack()
df_grp.sort_index(axis=1, inplace=True)
for lag in monthly_lags:
lag1 = lag
mo_lag = (
df_grp
.shift(lag1)
.bfill()
.unstack()
.reset_index()
.rename(columns={0: str(lag) + '_mo_lag'})
)
# for diff_num in range(1,5):
# diff_col = str(lag)+'_mo_lag_diff'+ str(diff_num)
# mo_lag_diff = (
# df_grp
# .shift(lag1)
# .diff(diff_num)
# .bfill()
# .unstack()
# .reset_index()
# .rename(columns = {0: str(lag) +'_mo_lag_diff' + str(diff_num)})
# )
# mo_lag = mo_lag.merge(mo_lag_diff, on = ['ts_id', date_col], how = 'left')
mo_lag_dict[lag].append(mo_lag)
for lag in monthly_lags:
col_name = str(lag) + '_mo_lag'
if col_name in df.columns:
df.drop(col_name, axis=1, inplace=True)
for diff_num in range(1, 5):
diff_col = str(lag) + '_mo_lag_diff' + str(diff_num)
if diff_col in df.columns:
df.drop(diff_col, axis=1, inplace=True)
mo_lag = pd.concat(mo_lag_dict[lag])
for diff_num in range(1, 5):
diff_col = str(lag) + '_mo_lag_diff' + str(diff_num)
mo_lag_diff = (
mo_lag
.groupby([date_col, 'ts_id'])
[str(lag) + '_mo_lag']
.sum()
.unstack()
.sort_index()
.diff(diff_num)
.bfill()
.stack()
.reset_index()
.rename(columns={0: diff_col})
)
mo_lag = mo_lag.merge(mo_lag_diff, on=['ts_id', date_col],
how='left')
df = df.merge(mo_lag, on=['ts_id', date_col], how='left')
end = time.time()
logger.debug(
"Time for updated monthly lags: {} mins".format((end - start) / 60))
return df
def add_rolling_lag(self, df, num_shift_lag=1):
df["rolling lag"] = (
(0.4 * df[target_col].shift(num_shift_lag))
+ (0.3 * df[target_col].shift(num_shift_lag + 1))
+ (0.2 * df[target_col].shift(num_shift_lag + 2))
+ (0.1 * df[target_col].shift(num_shift_lag + 3))
)
return df
def add_start_end_dates(self, df):
# Look for monthly format 4-4-5 and then make logic
is_drop_week_of_month = False
if 'week_of_month' not in df.columns:
df = self.add_week_of_month(df)
is_drop_week_of_month = True
df["MonthStart"] = 0
df.loc[df['week_of_month'] == 1, 'MonthStart'] = 1
df.drop('week_of_month', axis=1, inplace=is_drop_week_of_month)
# df[date_col]= pd.to_datetime(df[date_col])
month_end_list = (
df[df['MonthStart'] == 1][date_col].dt.date - relativedelta(
weeks=1)
).values
df['MonthEnd'] = 0
df.loc[
df[date_col].isin(month_end_list), 'MonthEnd'
] = 1
# df["MonthEnd"] = df[date_col].dt.is_month_end.astype(int)
df["QuarterStart"] = (
df[date_col].dt.month.isin([1, 4, 7, 10]).astype(int)
& df['MonthStart']
)
df["QuarterEnd"] = (
df[date_col].dt.month.isin([3, 6, 9, 12]).astype(int)
& df['MonthEnd']
)
return df
def add_holiday_ratio(self, df):
df["Holiday_Ratio"] = df["holiday_count"].fillna(0) / 7
return df
def add_rolling_time_features(self, df, week_list, agg_dict,
num_shift_lag=1):
roll_l = []
roll_l_diff = []
drop_cols = set()
df_grp_sum = (
df.groupby([date_col, "ts_id"])[target_col]
.sum()
.unstack()
.shift(num_shift_lag)
)
for num in week_list:
week_df = (
df_grp_sum
.rolling(num)
.agg(agg_dict)
.bfill()
.ffill()
.unstack()
.unstack(level=1)
.reset_index()
).rename(
columns={
"level_0": "ts_id",
"mean": "avg_week" + str(num),
"median": "median_week" + str(num),
"std": "std_week" + str(num),
"max": "max_week" + str(num),
"min": "min_week" + str(num),
}
)
week_df_diff = (
df_grp_sum
.rolling(num)
.agg('mean')
.diff(1)
.bfill()
.ffill()
.unstack()
.reset_index()
).rename(columns={0: "avg_week" + str(num) + "_diff"})
drop_cols = drop_cols.union(
set(week_df.drop(["ts_id", date_col], axis=1).columns)
| set(week_df_diff.drop(["ts_id", date_col], axis=1).columns)
)
roll_l.append(week_df.set_index(["ts_id", date_col]))
roll_l.append(week_df_diff.set_index(["ts_id", date_col]))
drop_cols = list(drop_cols & set(df.columns))
df.drop(drop_cols, axis=1, inplace=True)
week_df = None
for l in roll_l:
if week_df is None:
week_df = l
else:
week_df = week_df.join(l)
df = df.merge(week_df.reset_index(), on=["ts_id", date_col], how="left")
# for i in [13, 25, 52]:
# roll_df = df_grp_sum.shift(num_shift_lag).bfill().rolling(i, min_periods = 1)
# roll_df = (
# roll_df.quantile(0.75)
# - roll_df.quantile(0.25)
# ).unstack().reset_index().rename(columns = {0: 'Quantile_diff_'+str(i)})
# if 'Quantile_diff_'+str(i) in df.columns:
# df.drop('Quantile_diff_'+str(i), axis = 1, inplace = True)
# df = df.merge(roll_df, on = ['ts_id', date_col], how = 'left')
return df
def add_ewma(self, df, week_list, agg_dict, num_shift_lag=1):
ewma_l = []
df_grp_sum = (df.groupby([date_col, "ts_id"])[target_col]
.sum()
.unstack()
.shift(num_shift_lag)
.bfill()
.ffill())
for num in week_list:
week_df = (
df_grp_sum
.ewm(span=num, ignore_na=True, adjust=True, min_periods=1)
.agg(agg_dict)
.bfill()
.ffill()
.unstack()
.unstack(level=1)
.reset_index()
)
week_df.rename(columns={
"level_0": "ts_id",
"mean": "ewma_" + str(num)
}, inplace=True)
if "ewma_" + str(num) in df.columns:
df.drop("ewma_" + str(num), axis=1, inplace=True)
ewma_l.append(week_df.set_index(["ts_id", date_col]))
week_df = None
for l in ewma_l:
if week_df is None:
week_df = l
else:
week_df = week_df.join(l)
df = df.merge(week_df.reset_index(), on=["ts_id", date_col], how="left")
return df
def add_trend(self, df, week_list, num_shift_lag=1):
# is_drop = True
if 'ts_id' not in df.columns:
df = self.add_ts_id(df)
df_grp = (
df.groupby([date_col, "ts_id"])[target_col]
.sum()
.unstack()
)
df_grp = df_grp.shift(num_shift_lag).bfill()
numerator = df_grp.rolling(5, min_periods=1).mean()
# all_trends_df = None
df.set_index([date_col, "ts_id"], inplace=True)
for num in week_list:
denominator = df_grp.rolling(num, min_periods=1).mean()
one_trend_df = (numerator / (
denominator + 1e-8)).bfill().ffill().stack().reset_index()
one_trend_df.rename(columns={0: "trend_week" + str(num)},
inplace=True)
if "trend_week" + str(num) in df.columns:
df.drop(columns="trend_week" + str(num), inplace=True)
df = df.join(one_trend_df.set_index([date_col, "ts_id"]),
how='left')
# all_trends_df.append(one_trend_df)
return df.reset_index()
def create_one_hot_holiday(self, df, col, name, on="holiday"):
df[name] = 0
df.loc[df[on] == col, name] = 1
return df
def add_week_of_month(self, df):
df["week_of_month"] = df[date_col].apply(lambda d: (d.day - 1) // 7 + 1)
return df
def add_year_month(self, df):
year_df = df[date_col].dt.year
month_df = df[date_col].dt.month
df['YearMonth'] = year_df.astype(str) + '_' + month_df.astype(str)
return df
def add_month(self, df):
df['Month'] = df[date_col].dt.month
return df
def feat_agg(self, df, train_max_date, num_shift_lag):
if pd.DataFrame(df).empty:
return df
if target_col not in df.columns:
raise ValueError(
"{} col not in dataframe passed".format(target_col))
if date_col not in df.columns:
raise ValueError("{} col not in dataframe passed".format(date_col))
df = self.add_week_of_month(df)
df = self.add_month(df)
df.loc[df[date_col] > train_max_date, target_col] = np.nan
logger.debug("Adding TS Features...")
logger.debug("Adding lags...")
logger.debug("Lags: {}".format(lags))
df = self.add_lags(df, lags, num_shift_lag=num_shift_lag)
if add_lags_diff_flag:
logger.debug("Adding Lag Diff")
logger.debug(
"lags_diff: {}".format(lags_diff, num_shift_lag=num_shift_lag))
df = self.add_lag_diff(df, lags_diff)
if add_monthly_lags_flag:
logger.debug("Adding Monthly lags..")
df = self.add_montly_lags(df, monthly_lags,
num_shift_lag=num_shift_lag)
logger.debug("Adding start end dates...")
df = self.add_start_end_dates(df)
logger.debug("Adding rolling time features...")
df = self.add_rolling_time_features(
df, rolling_time_feat["lags"], rolling_time_feat["agg_func_dict"],
num_shift_lag=num_shift_lag
)
logger.debug("Adding ewma...")
df = self.add_ewma(df, ewma_lags, {"mean"}, num_shift_lag=num_shift_lag)
logger.debug("Adding trend...")
df = self.add_trend(df, trend_lags, num_shift_lag=num_shift_lag)
logger.debug("TS Features added successfully...")
logger.info("maxdate after TS: {}".format(df[date_col].max()))
return df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/engine/feat_engg.py | feat_engg.py |
import pandas as pd
import numpy as np
import datetime as dt
def v5_corrections(store_id, safety_stock_df, db, schema, logger):
"""
Main function to perform V5 corrections
"""
# Get Drug STD Qty and list of repeatable drug_ids
df_3m_drugs, unique_drugs_3m = get_3m_drug_std_qty(store_id, db, schema, logger)
# Locate drugs to perform correction check
df_std_check = safety_stock_df.loc[safety_stock_df["drug_id"].isin(
unique_drugs_3m)][["drug_id", "fcst", "safety_stock", "reorder_point", "order_upto_point"]]
# Drugs not forecasted by IPC
drugs_3m_not_set = list(set(unique_drugs_3m) ^ set(df_std_check["drug_id"].unique()))
logger.info(f"Number of drugs not forecasted: {len(drugs_3m_not_set)}")
# Merge STD Qty with SS table and find drugs correction areas
df_std_check = df_3m_drugs.merge(df_std_check, on="drug_id", how="left")
df_std_check = df_std_check.dropna()
df_std_check["rop>=std_qty"] = np.where(
df_std_check["reorder_point"] >= df_std_check["std_qty"], "Y", "N")
tot_rep_drugs = df_std_check.shape[0]
corr_req = df_std_check.loc[df_std_check['rop>=std_qty'] == 'N'].shape[0]
corr_not_req = df_std_check.loc[df_std_check['rop>=std_qty'] == 'Y'].shape[0]
logger.info(f"Number of repeatable drugs: {tot_rep_drugs}")
logger.info(f"Number of repeatable drugs corrections required: {corr_req}")
logger.info(f"Number of repeatable drugs corrections not required: {corr_not_req}")
# CORRECTION STARTS
order_freq = 4
column_order = list(df_std_check.columns)
column_order += ["corr_ss", "corr_rop", "corr_oup"]
# CASE1: No changes required
df_no_change = df_std_check.loc[df_std_check["rop>=std_qty"] == "Y"].copy()
df_no_change["corr_ss"] = df_no_change["safety_stock"].astype(int)
df_no_change["corr_rop"] = df_no_change["reorder_point"].astype(int)
df_no_change["corr_oup"] = df_no_change["order_upto_point"].astype(int)
# CASE2: SS & ROP & OUP is Non Zero
df_change1 = df_std_check.loc[(df_std_check["rop>=std_qty"] == "N") &
(df_std_check["safety_stock"] != 0) &
(df_std_check["reorder_point"] != 0) &
(df_std_check["order_upto_point"] != 0)].copy()
df_change1["mul_1"] = df_change1["reorder_point"] / df_change1["safety_stock"]
df_change1["mul_2"] = df_change1["order_upto_point"] / df_change1["reorder_point"]
df_change1["corr_rop"] = df_change1["std_qty"]
df_change1["corr_ss"] = np.ceil(df_change1["corr_rop"] / df_change1["mul_1"]).astype(int)
# If ROP >= OUP, then in those cases, increase OUP.
df_change11 = df_change1.loc[
df_change1["corr_rop"] >= df_change1["order_upto_point"]].copy()
df_change12 = df_change1.loc[
df_change1["corr_rop"] < df_change1["order_upto_point"]].copy()
df_change11["corr_oup"] = np.ceil(df_change11["corr_rop"] + (
df_change11["fcst"] * order_freq / 28)).astype(int)
df_change12["corr_oup"] = np.ceil(df_change12["corr_rop"] + (
df_change12["fcst"] * order_freq / 28)).astype(int)
df_change1 = df_change11.append(df_change12)
df_change1 = df_change1[column_order]
# CASE3: Any of SS & ROP & OUP is Zero
df_change2 = df_std_check.loc[(df_std_check["rop>=std_qty"] == "N")].copy()
df_change2 = df_change2.loc[~((df_change2["safety_stock"] != 0) &
(df_change2["reorder_point"] != 0) &
(df_change2["order_upto_point"] != 0))].copy()
df_change2["corr_rop"] = df_change2["std_qty"].astype(int)
df_change2["corr_ss"] = np.floor(df_change2["corr_rop"] / 2).astype(int)
# If ROP >= OUP, then in those cases, increase OUP.
df_change21 = df_change2.loc[
df_change2["corr_rop"] >= df_change2["order_upto_point"]].copy()
df_change22 = df_change2.loc[
df_change2["corr_rop"] < df_change2["order_upto_point"]].copy()
df_change21["corr_oup"] = np.ceil(df_change21["corr_rop"] + (
df_change21["fcst"] * order_freq / 28)).astype(int)
df_change22["corr_oup"] = np.ceil(df_change22["corr_rop"] + (
df_change22["fcst"] * order_freq / 28)).astype(int)
df_change2 = df_change21.append(df_change22)
df_change2 = df_change2[column_order]
# Combine all 3 cases
df_corrected = df_no_change.append(df_change1)
df_corrected = df_corrected.append(df_change2)
df_corrected = df_corrected.sort_index(ascending=True)
# Get DF of corrected drugs and merge with input DF
df_corrected_to_merge = df_corrected.loc[df_corrected["rop>=std_qty"] == "N"][
["drug_id", "corr_ss", "corr_rop", "corr_oup"]]
corr_safety_stock_df = safety_stock_df.merge(df_corrected_to_merge,
on="drug_id", how="left")
# Make corrections for required drugs
corr_safety_stock_df["safety_stock"] = np.where(
corr_safety_stock_df["corr_ss"] >= 0, corr_safety_stock_df["corr_ss"],
corr_safety_stock_df["safety_stock"])
corr_safety_stock_df["reorder_point"] = np.where(
corr_safety_stock_df["corr_rop"] >= 0, corr_safety_stock_df["corr_rop"],
corr_safety_stock_df["reorder_point"])
corr_safety_stock_df["order_upto_point"] = np.where(
corr_safety_stock_df["corr_oup"] >= 0, corr_safety_stock_df["corr_oup"],
corr_safety_stock_df["order_upto_point"])
corr_safety_stock_df.drop(["corr_ss", "corr_rop", "corr_oup"], axis=1, inplace=True)
corr_safety_stock_df["max_value"] = corr_safety_stock_df["order_upto_point"] * \
corr_safety_stock_df["fptr"]
assert safety_stock_df.shape == corr_safety_stock_df.shape
# Evaluate PRE and POST correction
pre_post_metrics = {
"metric": ["pre_corr", "post_corr"],
"ss_qty": [safety_stock_df["safety_stock"].sum(),
corr_safety_stock_df["safety_stock"].sum()],
"ss_val": [round((safety_stock_df["safety_stock"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["safety_stock"] * corr_safety_stock_df["fptr"]).sum(), 2)],
"rop_qty": [safety_stock_df["reorder_point"].sum(), corr_safety_stock_df["reorder_point"].sum()],
"rop_val": [round((safety_stock_df["reorder_point"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["reorder_point"] * corr_safety_stock_df["fptr"]).sum(), 2)],
"oup_qty": [safety_stock_df["order_upto_point"].sum(), corr_safety_stock_df["order_upto_point"].sum()],
"oup_val": [round((safety_stock_df["order_upto_point"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["order_upto_point"] * corr_safety_stock_df["fptr"]).sum(), 2)]
}
pre_post_metics_df = pd.DataFrame.from_dict(pre_post_metrics).set_index('metric').T
pre_post_metics_df["delta"] = pre_post_metics_df["post_corr"] - pre_post_metics_df["pre_corr"]
pre_post_metics_df["change%"] = round((pre_post_metics_df["delta"] / pre_post_metics_df["pre_corr"]) * 100, 2)
logger.info(f"\n{str(pre_post_metics_df)}")
return corr_safety_stock_df
def max_mode(pd_series):
return int(max(pd_series.mode()))
def get_3m_drug_std_qty(store_id, db, schema, logger):
"""
To fetch repeatable patient-drug qty from past 90days and calculate
standard drug qty.
"""
start_date = (dt.date.today() - dt.timedelta(days=90)).strftime("%Y-%m-%d")
end_date = dt.date.today().strftime("%Y-%m-%d")
q_3m = """
select "patient-id" , "old-new" , "drug-id" ,
date("created-at") as "on-date", quantity as "tot-qty"
from "{schema}".sales
where "store-id" = {0}
and "is-repeatable" = 1
and "bill-flag" = 'gross'
and "created-at" > '{1} 00:00:00' and "created-at" < '{2} 00:00:00'
""".format(store_id, start_date, end_date, schema=schema)
df_3m = db.get_df(q_3m)
df_3m.columns = [c.replace('-', '_') for c in df_3m.columns]
# Get patient-drug-level STD Qty
df_3m["3m_bills"] = 1
df_3m["std_qty"] = df_3m["tot_qty"]
df_3m_patient = df_3m.groupby(["patient_id", "drug_id"],
as_index=False).agg(
{"3m_bills": "sum", "tot_qty": "sum", "std_qty": max_mode})
logger.info(f"Total repeatable patients: {len(df_3m_patient.patient_id.unique())}")
# Get drug-level STD Qty
df_3m_drugs = df_3m_patient.groupby("drug_id", as_index=False).agg(
{"std_qty": "max"})
# STD Qty > 10 is considered outliers, to drop.
drug_count_before = df_3m_drugs.shape[0]
df_3m_drugs = df_3m_drugs.loc[df_3m_drugs["std_qty"] <= 10]
drug_count_after = df_3m_drugs.shape[0]
logger.info(f"Number of outlier drugs STD Qty: {drug_count_before-drug_count_after}")
# Repeatable drugs STD Qty to check against IPC set ROP
unique_drugs_3m = list(df_3m_drugs["drug_id"].unique())
return df_3m_drugs, unique_drugs_3m | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/heuristics/ipcv5_heuristics_old.py | ipcv5_heuristics_old.py |
import pandas as pd
import numpy as np
import datetime as dt
def v3_corrections(final_ss_df, store_id, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, schema, db, logger):
final_ss_df['store_id'] = store_id
q_prob = f"""
select *
from "{schema}"."ipc-corrections-rest-cases"
where "store-id" = {store_id}
"""
q_prob_111 = f"""
select *
from "{schema}"."ipc-corrections-111-cases"
where "store-id" = {store_id}
"""
prob_matrix = db.get_df(q_prob)
df_111 = db.get_df(q_prob_111)
prob_matrix.columns = [c.replace('-', '_') for c in prob_matrix.columns]
df_111.columns = [c.replace('-', '_') for c in df_111.columns]
# list of drugs for which corrections is required. i.e. max value 0.
df_corrections_list = final_ss_df[
final_ss_df['order_upto_point'] == 0][['store_id', 'drug_id']]
df_corrections = pd.merge(
df_corrections_list, prob_matrix, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections = df_corrections.drop(columns={'corrected_max'})
df_corrections['order_upto_point'] = np.round(
df_corrections['current_ma_3_months'])
df_corrections_1 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_less_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_more_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 0)]
df_corrections_1 = df_corrections_1[
(df_corrections_1['selling_probability'] >=
corrections_selling_probability_cutoff['ma_less_than_2']) &
(df_corrections_1['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections_2[
(df_corrections_2['selling_probability'] >=
corrections_selling_probability_cutoff['ma_more_than_2']) &
(df_corrections_2['current_flag_ma_less_than_2'] == 0)]
df_corrections = pd.concat(
[df_corrections_1, df_corrections_2]).reset_index(drop=True)
df_corrections_final = df_corrections.copy()[
['store_id', 'drug_id', 'current_bucket', 'selling_probability',
'cumm_prob', 'current_flag_ma_less_than_2', 'avg_ptr',
'current_ma_3_months']]
df_corrections = df_corrections[
['store_id', 'drug_id', 'order_upto_point']]
df_corrections['reorder_point'] = np.floor(
df_corrections['order_upto_point'] / 2)
df_corrections['safety_stock'] = np.floor(
df_corrections['order_upto_point'] / 4)
df_corrections = df_corrections.set_index(['store_id', 'drug_id'])
final_ss_df = final_ss_df.set_index(['store_id', 'drug_id'])
final_ss_df.update(df_corrections)
final_ss_df = final_ss_df.reset_index()
df_corrections = df_corrections.reset_index()
df_corrections = pd.merge(
df_corrections, df_corrections_final, on=['store_id', 'drug_id'],
how='left', validate='one_to_one')
# update 111 cases here.
df_corrections_111 = pd.merge(
df_corrections_list, df_111, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections_111 = df_corrections_111.drop(
columns={'original_max', 'corrected_max',
'inv_impact', 'max_impact'}, axis=1)
df_corrections_111['order_upto_point'] = np.round(
df_corrections_111['ma_3_months'])
df_corrections_111['reorder_point'] = np.floor(
df_corrections_111['order_upto_point'] / 2)
df_corrections_111['safety_stock'] = np.floor(
df_corrections_111['order_upto_point'] / 4)
df_corrections_111 = df_corrections_111.set_index(
['store_id', 'drug_id'])
final_ss_df = final_ss_df.set_index(['store_id', 'drug_id'])
final_ss_df.update(df_corrections_111)
final_ss_df = final_ss_df.reset_index()
df_corrections_111 = df_corrections_111.reset_index()
# set reset date
curr_date = str(dt.date.today())
df_corrections['reset_date'] = curr_date
df_corrections_111['reset_date'] = curr_date
final_ss_df.drop('store_id', axis=1, inplace=True)
return final_ss_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/heuristics/ipcv3_heuristics.py | ipcv3_heuristics.py |
from zeno_etl_libs.helper.aws.s3 import S3
import pandas as pd
import numpy as np
def v5_corrections(safety_stock_df, db, schema, logger):
# ======================= DEFINE BUCKETS TO CORRECT ========================
corr_buckets = ['AW', 'AX', 'AY', 'BX', 'BY']
# ==========================================================================
logger.info("Reading STD-Qty for all drugs")
# get drug-std_qty data (TO CHANGE READ IF LIVE FOR ALL STORES)
s3 = S3()
file_path = s3.download_file_from_s3('STD_QTY_IPC/df_std_qty.csv')
df_std_qty = pd.read_csv(file_path)
logger.info("Define Max-STD-Qty for all drugs based on confidence")
for index, row in df_std_qty.iterrows():
if row["std_qty_3_cf"] == 'H':
max_std_qty = row["std_qty_3"]
elif row["std_qty_2_cf"] in ['H', 'M']:
max_std_qty = row["std_qty_2"]
else:
max_std_qty = row["std_qty"]
df_std_qty.loc[index, "max_std_qty"] = max_std_qty
# merge std and max.std to base df
safety_stock_df = safety_stock_df.merge(
df_std_qty[["drug_id", "std_qty", "max_std_qty"]], on="drug_id", how="left")
safety_stock_df["std_qty"] = safety_stock_df["std_qty"].fillna(1)
safety_stock_df["max_std_qty"] = safety_stock_df["max_std_qty"].fillna(1)
# drugs to correct and not correct
df_to_corr = safety_stock_df.loc[
safety_stock_df["bucket"].isin(corr_buckets)]
df_not_to_corr = safety_stock_df.loc[
~safety_stock_df["bucket"].isin(corr_buckets)]
logger.info(f"Num drugs considered for correction {df_to_corr.shape[0]}")
logger.info(f"Num drugs not considered for correction {df_not_to_corr.shape[0]}")
logger.info("Correction logic starts")
for index, row in df_to_corr.iterrows():
fcst = row["fcst"]
rop = row["reorder_point"]
oup = row["order_upto_point"]
std = row["std_qty"]
max_std = row["max_std_qty"]
new_rop = std_round(rop, std=max_std)
if (new_rop != 0) & (new_rop / rop >= 2):
new_rop = std_round(rop, std=std)
if (new_rop / rop >= 2) & (new_rop > fcst):
new_rop = rop # no correction
if (new_rop == 0) & (oup > 0):
new_oup = std
else:
new_oup = std_round(oup, std=max_std)
if (new_oup <= new_rop) & (new_oup != 0):
new_oup = std_round(new_rop + 1, std=max_std)
df_to_corr.loc[index, "reorder_point"] = new_rop
df_to_corr.loc[index, "order_upto_point"] = new_oup
corr_safety_stock_df = pd.concat([df_to_corr, df_not_to_corr])
corr_safety_stock_df.drop(columns=["std_qty", "max_std_qty"],
axis=1, inplace=True)
return corr_safety_stock_df
def std_round(x, std):
"""
round x to the closest higher multiple of std-qty
"""
return std * np.ceil(x/std) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/heuristics/ipcv5_heuristics.py | ipcv5_heuristics.py |
import pandas as pd
import numpy as np
import datetime as datetime
from datetime import timedelta
from zeno_etl_libs.utils.ipc2.config_ipc import key_col, date_col, store_col, drug_col, target_col
def load_data(store_id,start_d, end_d,db,schema):
mos = f''' select * from "{schema}".sales
where date("created-at") >= '{start_d}' and date("created-at") < '{end_d}' and "store-id" in ({store_id}) '''
cfr = f'''select cpr."store-id" , cpr."drug-id" , cpr."shortbook-date", sum("loss-quantity") as "loss-quantity" from
"{schema}"."cfr-patient-request" cpr
where "shortbook-date" >= '{start_d}' and "shortbook-date" < '{end_d}'
and cpr."drug-id" <> -1
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
and "drug-type" in ('ethical', 'ayurvedic', 'generic', 'discontinued-products', 'banned', 'general', 'high-value-ethical', 'baby-product', 'surgical', 'otc', 'glucose-test-kit', 'category-2', 'category-1', 'category-4', 'baby-food', '', 'category-3')
and "store-id" in ({store_id})
group by cpr."store-id" , cpr."drug-id" , "shortbook-date" '''
df_mos = db.get_df(mos)
df_cfr = db.get_df(cfr)
df_mos.columns = [c.replace('-', '_') for c in df_mos.columns]
df_cfr.columns = [c.replace('-', '_') for c in df_cfr.columns]
return df_mos,df_cfr
def pre_process_data(df_mos,df_cfr):
set_dtypes = {
store_col: int,
drug_col: int,
'loss_quantity': int
}
df_cfr = df_cfr.astype(set_dtypes)
df_cfr['shortbook_date'] = pd.to_datetime(df_cfr['shortbook_date'])
df_mos['created_at'] = pd.to_datetime(df_mos['created_at'])
df_mos['sales_date'] = pd.to_datetime(df_mos['created_at'].dt.date)
df_mos['drug_id'].fillna(0,inplace=True)
df_mos['ts_id'] = df_mos['store_id'].astype(int).astype(str) + "_" + df_mos['drug_id'].astype(int).astype(str)
df_mos = df_mos.groupby(['ts_id', 'sales_date', 'store_id', 'drug_id' ])['net_quantity'].sum().reset_index()
df_mos.rename(columns={'sales_date':date_col},inplace=True)
df_mos.rename(columns={'net_quantity':target_col},inplace=True)
return df_mos, df_cfr
def get_formatted_data(df, key_col, date_col, store_col, drug_col, target_col):
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start= min_date,
end= end_date,
freq= 'd'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns = {0: target_col})
)
df[[store_col, drug_col]] = df[key_col].str.split('_', expand = True)
df[[store_col, drug_col]] = df[[store_col, drug_col]].astype(float).astype(int)
return df
def aggreagte_data(df_sales, df_cfr):
df = df_sales.merge(df_cfr,
left_on=[store_col, drug_col, date_col],
right_on=[store_col, drug_col, 'shortbook_date'],
how='left')
df[date_col] = df[date_col].combine_first(df['shortbook_date'])
df[target_col].fillna(0, inplace=True)
df['loss_quantity'].fillna(0, inplace=True)
df[target_col] += df['loss_quantity']
df.drop(['shortbook_date', 'loss_quantity'], axis=1, inplace=True)
df_l3m_sales = df.groupby([store_col,drug_col])[target_col].sum().reset_index()
df_l3m_sales.rename(columns={target_col:'l3m_sales_qty'},inplace=True)
return df_l3m_sales
def implement_corrections(final_ss_df,df_l3m_sales):
cols = final_ss_df.columns
df = pd.merge(final_ss_df,df_l3m_sales,how='left', on=[store_col,drug_col] )
df['fcst_zero_w_sales'] = np.where(((df['fcst']==0)&(df['l3m_sales_qty']>0)&(df['order_upto_point']==0)),1, 0)
df['order_upto_point'] = np.where(df['fcst_zero_w_sales']==1,np.round((df['l3m_sales_qty']/3)*(18/30)), df['order_upto_point'])
df['order_upto_point'] = np.where(((df['fcst_zero_w_sales']==1)&(df['l3m_sales_qty']==2)&(df['order_upto_point']==0)),1, df['order_upto_point'])
df['reorder_point'] = np.where(df['fcst_zero_w_sales']==1,np.floor((df['l3m_sales_qty']/3)*(13/30)), df['reorder_point'])
df['reorder_point'] = np.where((df['fcst_zero_w_sales']==1)&(df['reorder_point']==df['order_upto_point'])&(df['reorder_point']>0),df['order_upto_point']-1, df['reorder_point'])
df['safety_stock'] = np.where(df['fcst_zero_w_sales']==1,np.floor((df['l3m_sales_qty']/3)*(7/30)), df['safety_stock'])
df = df[cols]
return df
def v3N_corrections(final_ss_df, store_id, reset_date, schema, db, logger):
end_d = pd.to_datetime(reset_date)
start_d = end_d - timedelta(days= 90)
start_d = str(start_d.date())
end_d = str(end_d.date())
df_mos,df_cfr = load_data(store_id=store_id,start_d=start_d, end_d=end_d,db=db,schema=schema)
df_mos, df_cfr = pre_process_data(df_mos=df_mos, df_cfr=df_cfr)
df_sales = get_formatted_data(df=df_mos, key_col=key_col, date_col = date_col, target_col=target_col, store_col=store_col, drug_col=drug_col)
df_l3m_sales = aggreagte_data(df_sales=df_sales, df_cfr = df_cfr)
final_ss_df = implement_corrections(final_ss_df=final_ss_df, df_l3m_sales=df_l3m_sales)
return final_ss_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc2/heuristics/ipcv3N_heuristics.py | ipcv3N_heuristics.py |
import numpy as np
import pandas as pd
from datetime import datetime
from scipy.stats import norm
from zeno_etl_libs.utils.ipc.lead_time import lead_time
from zeno_etl_libs.utils.ipc.heuristics.ipcv4_heuristics import ipcv4_heuristics
from zeno_etl_libs.utils.ipc.heuristics.ipcv5_heuristics import v5_corrections
# from zeno_etl_libs.utils.ipc.heuristics.ipcv6_heuristics import v6_corrections
def non_ipc_safety_stock_calc(
store_id, cal_sales, reset_date, final_predict, drug_class,
corrections_flag, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, chronic_max_flag,
train_flag, drug_type_list_v4, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off, db, schema, logger):
'''LEAD TIME CALCULATION'''
lt_drug, lt_store_mean, lt_store_std = lead_time(
store_id, cal_sales, reset_date, db, schema, logger)
service_level = 0.95
num_days = 4 * 7
order_freq = 4
z = norm.ppf(service_level)
print(lt_store_mean, lt_store_std)
drug_class = drug_class.copy()
drug_class['bucket'] = drug_class['bucket_abc'] + drug_class['bucket_xyz']
safety_stock_df = final_predict.merge(
lt_drug[['drug_id', 'lead_time_mean', 'lead_time_std']],
how='left', on='drug_id')
safety_stock_df['lead_time_mean'].fillna(lt_store_mean, inplace=True)
safety_stock_df['lead_time_std'].fillna(lt_store_std, inplace=True)
safety_stock_df = safety_stock_df.merge(
drug_class[['drug_id', 'bucket']], on='drug_id', how='left')
safety_stock_df['bucket'].fillna('NA', inplace=True)
safety_stock_df['demand_daily'] = safety_stock_df['fcst']/num_days
safety_stock_df['demand_daily_deviation'] = (
safety_stock_df['std']/np.sqrt(num_days))
# heuristics #1
safety_stock_df['lead_time_std'] = np.where(
safety_stock_df['lead_time_std'] < 1,
lt_store_std, safety_stock_df['lead_time_std'])
# non ipc store safety stock
safety_stock_df['safety_stock'] = np.round(
z * np.sqrt(
(
safety_stock_df['lead_time_mean'] *
safety_stock_df['demand_daily_deviation'] *
safety_stock_df['demand_daily_deviation']
) +
(
safety_stock_df['lead_time_std'] *
safety_stock_df['lead_time_std'] *
safety_stock_df['demand_daily'] *
safety_stock_df['demand_daily']
)))
safety_stock_df['reorder_point'] = np.round(
safety_stock_df['safety_stock'] +
safety_stock_df['demand_daily'] * safety_stock_df['lead_time_mean'])
safety_stock_df['order_upto_point'] = np.round(
safety_stock_df['reorder_point'] +
safety_stock_df['demand_daily'] * order_freq)
safety_stock_df['safety_stock_days'] = np.round(
num_days * safety_stock_df['safety_stock'] /
safety_stock_df['fcst'])
safety_stock_df['reorder_days'] = np.round(
num_days * safety_stock_df['reorder_point'] /
safety_stock_df['fcst'])
safety_stock_df['order_upto_days'] = np.round(
num_days * safety_stock_df['order_upto_point'] /
safety_stock_df['fcst'])
# getting order value
drug_list = list(safety_stock_df['drug_id'].unique())
print(len(drug_list))
drug_str = str(drug_list).replace('[', '(').replace(']', ')')
fptr_query = """
select "drug-id" , avg(ptr) as fptr, sum(quantity) as curr_inventory
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {drug_str}
group by "drug-id"
""".format(store_id=store_id, drug_str=drug_str, schema=schema)
fptr = db.get_df(fptr_query)
fptr.columns = [c.replace('-', '_') for c in fptr.columns]
fptr["fptr"] = fptr["fptr"].astype(float)
safety_stock_df = safety_stock_df.merge(fptr, on='drug_id', how='left')
safety_stock_df['fptr'].fillna(100, inplace=True)
safety_stock_df['max_value'] = (
safety_stock_df['fptr'] * safety_stock_df['order_upto_point'])
# correction plugin - Start
if corrections_flag & train_flag:
safety_stock_df['correction_flag'] = 'N'
safety_stock_df['store_id'] = store_id
print("corrections code is running now:")
q_prob = f"""select * from "{schema}"."ipc-corrections-rest-cases" """
q_prob_111 = f"""select * from "{schema}"."ipc-corrections-111-cases" """
prob_matrix = db.get_df(q_prob)
df_111 = db.get_df(q_prob_111)
prob_matrix.columns = [c.replace('-', '_') for c in prob_matrix.columns]
df_111.columns = [c.replace('-', '_') for c in df_111.columns]
# list of drugs for which corrections is required. i.e. max value 0.
df_corrections_list = safety_stock_df[
safety_stock_df['order_upto_point'] == 0][['store_id', 'drug_id']]
df_corrections = pd.merge(
df_corrections_list, prob_matrix, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections = df_corrections.drop(columns={'corrected_max'})
df_corrections['order_upto_point'] = np.round(
df_corrections['current_ma_3_months'])
df_corrections_1 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_less_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_more_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 0)]
df_corrections_1 = df_corrections_1[
(df_corrections_1['selling_probability'] >=
corrections_selling_probability_cutoff['ma_less_than_2']) &
(df_corrections_1['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections_2[
(df_corrections_2['selling_probability'] >=
corrections_selling_probability_cutoff['ma_more_than_2']) &
(df_corrections_2['current_flag_ma_less_than_2'] == 0)]
df_corrections = pd.concat(
[df_corrections_1, df_corrections_2]).reset_index(drop=True)
df_corrections_final = df_corrections.copy()[
['store_id', 'drug_id', 'current_bucket', 'selling_probability',
'cumm_prob', 'current_flag_ma_less_than_2',
'avg_ptr', 'current_ma_3_months']]
# adding run time current inventory
df_corrections_final = pd.merge(
df_corrections_final,
safety_stock_df[['store_id', 'drug_id', 'curr_inventory']],
on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df_corrections = df_corrections[
['store_id', 'drug_id', 'order_upto_point']]
df_corrections['reorder_point'] = np.floor(
df_corrections['order_upto_point'] / 2)
df_corrections['safety_stock'] = np.floor(
df_corrections['order_upto_point'] / 4)
df_corrections['correction_flag'] = 'Y'
df_corrections['is_ipc'] = 'Y'
df_corrections = df_corrections.set_index(['store_id', 'drug_id'])
safety_stock_df = safety_stock_df.set_index(['store_id', 'drug_id'])
safety_stock_df.update(df_corrections)
safety_stock_df = safety_stock_df.reset_index()
df_corrections = df_corrections.reset_index()
df_corrections = pd.merge(
df_corrections, df_corrections_final, on=['store_id', 'drug_id'],
how='left', validate='one_to_one')
# update 111 cases here.
df_corrections_111 = pd.merge(
df_corrections_list, df_111, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections_111 = df_corrections_111.drop(
columns={'current_inventory', 'original_max', 'corrected_max',
'inv_impact', 'max_impact'}, axis=1)
df_corrections_111['order_upto_point'] = np.round(
df_corrections_111['ma_3_months'])
df_corrections_111['reorder_point'] = np.floor(
df_corrections_111['order_upto_point'] / 2)
df_corrections_111['safety_stock'] = np.floor(
df_corrections_111['order_upto_point'] / 4)
df_corrections_111['correction_flag'] = 'Y'
df_corrections_111['is_ipc'] = 'Y'
# adding run time current inventory
df_corrections_111 = pd.merge(
df_corrections_111,
safety_stock_df[['store_id', 'drug_id', 'curr_inventory']],
on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df_corrections_111 = df_corrections_111.set_index(
['store_id', 'drug_id'])
safety_stock_df = safety_stock_df.set_index(['store_id', 'drug_id'])
safety_stock_df.update(df_corrections_111)
safety_stock_df = safety_stock_df.reset_index()
df_corrections_111 = df_corrections_111.reset_index()
# set reset date
curr_date = str(datetime.now())
df_corrections['reset_date'] = curr_date
df_corrections_111['reset_date'] = curr_date
safety_stock_df = safety_stock_df.drop(['store_id'], axis=1)
else:
print('corrections block skipped :')
df_corrections = pd.DataFrame()
df_corrections_111 = pd.DataFrame()
safety_stock_df['correction_flag'] = 'N'
# Correction plugin - End #
# Chronic drug changes
if chronic_max_flag == 'Y':
# based on ME OOS feedback - keep chronic drugs
drug_max_zero = tuple(
safety_stock_df.query('order_upto_point == 0')['drug_id'])
# reading chronic drug list
drug_chronic_max_zero_query = '''
select id as drug_id from "{schema}".drugs
where category = 'chronic'
and id in {0}
'''.format(str(drug_max_zero), schema=schema)
drug_chronic_max_zero = db.get_df(drug_chronic_max_zero_query)[
'drug_id']
# setting non zero max for such drugs
safety_stock_df.loc[
(safety_stock_df['drug_id'].isin(drug_chronic_max_zero)) &
(safety_stock_df['order_upto_point'] == 0),
'order_upto_point'] = 1
safety_stock_df.loc[
(safety_stock_df['drug_id'].isin(drug_chronic_max_zero)) &
(safety_stock_df['order_upto_point'] == 0),
'correction_flag'] = 'Y_chronic'
safety_stock_df = ipcv4_heuristics(safety_stock_df, drug_type_list_v4,
db, schema)
if v5_active_flag == "Y":
logger.info("IPC V5 Correction Starts")
safety_stock_df = v5_corrections(store_id, safety_stock_df,
db, schema, logger)
logger.info("IPC V5 Correction Successful")
# if v6_active_flag == "Y":
# logger.info("IPC V6 Correction Starts")
# safety_stock_df, drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 = \
# v6_corrections(store_id, safety_stock_df, reset_date, v6_type_list,
# v6_ptr_cut_off, logger)
#
# # add algo name to v6 write table
# drugs_max_to_lock_ipcv6["algo"] = 'non-ipc'
# drug_rejects_ipcv6["algo"] = 'non-ipc'
# logger.info("IPC V6 Correction Successful")
# else:
drugs_max_to_lock_ipcv6 = pd.DataFrame()
drug_rejects_ipcv6 = pd.DataFrame()
return safety_stock_df, df_corrections, df_corrections_111, \
drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/safety_stock/safety_stock.py | safety_stock.py |
import pandas as pd
import datetime
def forecast_patient_data(store_id_list, type_list, reset_date, db, schema,
logger=None, last_date=None):
''' FETCHING HISTORICAL PATIENT DATA'''
if last_date is None:
last_date = datetime.date(day=1, month=4, year=2019)
print('Date range', str(last_date), str(reset_date))
# store list
if type(store_id_list) is not list:
store_id_list = [store_id_list]
store_id_list = str(store_id_list).replace('[', '(').replace(']', ')')
# drug list
drug_list_query = """
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema)
drug_list = db.get_df(drug_list_query)
drug_list_tuple = tuple(drug_list['drug_id'])
# getting patient data
patient_data_query = """
select date(a."created-at") as "sales-date",
inv."drug-id",
count(distinct "patient-id") as "patient-count"
from "{schema}"."bills-1" f
join "{schema}"."bill-items-1" a on f.id = a."bill-id"
left join "{schema}"."inventory-1" inv on a."inventory-id" = inv.id
where date(a."created-at") >= '{last_date}'
and date(a."created-at") <= '{reset_date}'
and f."store-id" in {store_id_list}
and inv."drug-id" in {drug_list}
group by date(a."created-at"), inv."drug-id"
union all
select date(a."returned-at") as "sales-date",
inv."drug-id",
count(distinct "patient-id")*-1 as "patient-count"
from "{schema}"."customer-return-items-1" a
join "{schema}"."bills-1" b on a."bill-id" = b.id
left join "{schema}"."inventory-1" inv on a."inventory-id" = inv.id
where date(a."returned-at") >= '{last_date}'
and date(a."returned-at") <= '{reset_date}'
and b."store-id" in {store_id_list}
and inv."drug-id" in {drug_list}
group by date(a."returned-at"), inv."drug-id"
""".format(store_id_list=store_id_list, last_date=str(last_date),
reset_date=str(reset_date), drug_list=drug_list_tuple,
schema=schema)
patient_data = db.get_df(patient_data_query)
patient_data.columns = [col.replace('-', '_') for col in patient_data.columns]
'''CREATING DAY-DRUG patient_data CROSS TABLE'''
calendar_query = """
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
""".format(schema=schema)
calendar = db.get_df(calendar_query)
calendar.columns = [c.replace('-', '_') for c in calendar.columns]
calendar['date'] = pd.to_datetime(calendar['date'])
patient_data['sales_date'] = pd.to_datetime(patient_data['sales_date'])
print('Distinct drug count', patient_data.drug_id.nunique())
print('No of days', patient_data.sales_date.nunique())
cal_patient_weekly = calendar.loc[
(pd.to_datetime(calendar['date']) >= patient_data.sales_date.min()) &
(calendar['date'] <= patient_data.sales_date.max())]
# removing the first week if it has less than 7 days
min_year = cal_patient_weekly.year.min()
x = cal_patient_weekly.loc[(cal_patient_weekly.year == min_year)]
min_month = x.month.min()
x = x.loc[(x.month == min_month)]
min_week = x.week_of_year.min()
if x.loc[x.week_of_year == min_week].shape[0] < 7:
print('removing dates for', min_year, min_month, min_week)
cal_patient_weekly = cal_patient_weekly.loc[
~((cal_patient_weekly.week_of_year == min_week) &
(cal_patient_weekly.year == min_year))]
# removing the latest week if it has less than 7 days
max_year = cal_patient_weekly.year.max()
x = cal_patient_weekly.loc[(cal_patient_weekly.year == max_year)]
max_month = x.month.max()
x = x.loc[(x.month == max_month)]
max_week = x.week_of_year.max()
if x.loc[x.week_of_year == max_week].shape[0] < 7:
print('removing dates for', max_year, max_month, max_week)
cal_patient_weekly = cal_patient_weekly.loc[
~((cal_patient_weekly.week_of_year == max_week) &
(cal_patient_weekly.year == max_year))]
# adding week begin date
cal_patient_weekly['week_begin_dt'] = cal_patient_weekly.apply(
lambda x: x['date'] - datetime.timedelta(x['day_of_week']), axis=1)
drugs = patient_data[['drug_id']].drop_duplicates()
drugs['key'] = 1
cal_patient_weekly['key'] = 1
cal_drug_w = drugs.merge(cal_patient_weekly, on='key', how='inner')
cal_drug_w.drop('key', axis=1, inplace=True)
cal_drug_patient_w = cal_drug_w.merge(
patient_data, left_on=['drug_id', 'date'],
right_on=['drug_id', 'sales_date'],
how='left')
cal_drug_patient_w.drop('sales_date', axis=1, inplace=True)
cal_drug_patient_w.patient_count.fillna(0, inplace=True)
# assertion test to check no of drugs * no of days equals total entries
drug_count = cal_drug_patient_w.drug_id.nunique()
day_count = cal_drug_patient_w.date.nunique()
print('Distinct no of drugs', drug_count)
print('Distinct dates', day_count)
print('DF shape', cal_drug_patient_w.shape[0])
# assert drug_count*day_count == cal_drug_sales.shape[0]
# checking for history available and store opening date
first_bill_query = """
select min(date("created-at")) as bill_date
from "{schema}"."bills-1"
where "store-id" in {0}
""".format(store_id_list, schema=schema)
first_bill_date = db.get_df(first_bill_query).values[0][0]
print(first_bill_date)
cal_drug_patient_w = cal_drug_patient_w.query(
'date >= "{}"'.format(first_bill_date))
cal_drug_patient_weekly = cal_drug_patient_w.groupby(
['drug_id', 'week_begin_dt', 'week_of_year']
)['patient_count'].sum().reset_index()
cal_drug_patient_weekly.rename(
columns={'week_begin_dt': 'date'}, inplace=True)
return cal_drug_patient_weekly | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/data_prep/patient_data.py | patient_data.py |
import datetime
from zeno_etl_libs.utils.ipc.data_prep import forecast_data_prep
from zeno_etl_libs.utils.non_ipc.data_prep.patient_data import forecast_patient_data
from zeno_etl_libs.utils.ipc.item_classification import abc_xyz_classification
def non_ipc_data_prep(store_id_list, reset_date, type_list, db, schema,
agg_week_cnt=4, logger=None):
# getting demand data
cal_drug_sales_weekly, _, _ = forecast_data_prep(
store_id_list, type_list, reset_date, db, schema)
# getting patient data
cal_drug_patient_weekly = forecast_patient_data(
store_id_list, type_list, reset_date, db, schema)
# merging patient and demand data
cal_drug_data_weekly = cal_drug_sales_weekly.merge(cal_drug_patient_weekly)
'''ADDITIONAL CHECKS'''
n = 12
prev_n_week_dt = (
cal_drug_data_weekly['date'].max() - datetime.timedelta(n*7))
logger.info('Previous week date for last 12 weeks' + str(prev_n_week_dt))
prev_n_week_sales = cal_drug_data_weekly[
cal_drug_data_weekly['date'] > prev_n_week_dt].\
groupby('drug_id')['net_sales_quantity'].sum().reset_index()
prev_no_sales_drug_weekly = prev_n_week_sales.loc[
prev_n_week_sales['net_sales_quantity'] <= 0, 'drug_id'].values
prev_sales_drug_weekly = prev_n_week_sales.loc[
prev_n_week_sales['net_sales_quantity'] > 0, 'drug_id'].values
logger.info('No net sales of drugs within last 12 weeks' +
str(len(prev_no_sales_drug_weekly)))
logger.info('Sales of drugs within last 12 weeks' +
str(len(prev_sales_drug_weekly)))
# getting drug id with atleast one sale in last 12 weeks
cal_drug_data_weekly = cal_drug_data_weekly[
cal_drug_data_weekly.drug_id.isin(prev_sales_drug_weekly)]
'''4 WEEKS AGGREGATION'''
cal_drug_data_weekly['week_number'] = cal_drug_data_weekly.\
groupby('drug_id')['date'].rank(ascending=False) - 1
cal_drug_data_weekly['agg_wk_count'] = (
cal_drug_data_weekly['week_number']/agg_week_cnt).astype(int) + 1
agg_wk_ct_lt_4 = cal_drug_data_weekly.\
groupby('agg_wk_count')['week_number'].nunique().reset_index()
agg_wk_ct_lt_4 = agg_wk_ct_lt_4.query('week_number < 4')['agg_wk_count']
# removing incomplete 4-week period
cal_drug_data_weekly = cal_drug_data_weekly[
~cal_drug_data_weekly['agg_wk_count'].isin(agg_wk_ct_lt_4)]
cal_drug_data_agg_weekly = cal_drug_data_weekly.\
groupby(['drug_id', 'agg_wk_count']).\
agg({'date': 'min', 'net_sales_quantity': 'sum', 'patient_count': 'sum'
}).\
reset_index()
cal_drug_data_agg_weekly.sort_values(['drug_id', 'date'], inplace=True)
'''SKU CLASSIFICATIONS'''
# Taking latest 3 4-week period for classification
bucket_period = 3
agg_wk_classification = cal_drug_data_agg_weekly.loc[
cal_drug_data_agg_weekly['agg_wk_count'] <= bucket_period, 'date'
].dt.date.unique()
cal_drug_data_classification = cal_drug_data_agg_weekly[
cal_drug_data_agg_weekly['date'].isin(agg_wk_classification)]
cal_drug_data_classification.rename(
columns={'date': 'month_begin_dt'}, inplace=True)
drug_class, bucket_sales = abc_xyz_classification(
cal_drug_data_classification)
return cal_drug_data_agg_weekly, cal_drug_data_weekly, drug_class,\
bucket_sales | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/data_prep/non_ipc_data_prep.py | non_ipc_data_prep.py |
import numpy as np
import pandas as pd
from functools import reduce
from scipy.optimize import minimize, LinearConstraint
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc,\
train_error
def optimise_ab_mae(weights, naive_fcst, ma_fcst, ets_fcst, actual):
fcst = weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst
return np.sum(abs((fcst - actual)))/len(naive_fcst)
def optimise_ab_sse(weights, naive_fcst, ma_fcst, ets_fcst, actual):
fcst = weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst
return np.sum((fcst - actual)**2)
def optimise_c_mae(
weights, naive_fcst, ma_fcst, ets_fcst, croston_fcst, actual):
fcst = (
weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst +
weights[3]*croston_fcst)
return np.sum(abs((fcst - actual)))/len(naive_fcst)
def optimise_c_sse(
weights, naive_fcst, ma_fcst, ets_fcst, croston_fcst, actual):
fcst = (
weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst +
weights[3]*croston_fcst)
return np.sum((fcst - actual)**2)
def ensemble_minimisation(
train, error, predict, kind='mae', logger=None):
# mergring train dfs for weighted average of mdels
train = train.copy()
train_cols = ['drug_id', 'month_begin_dt', 'year', 'month',
'actual', 'fcst', 'std', 'ape', 'ae']
train = [x[train_cols] for x in train]
all_train = reduce(
lambda left, right: pd.merge(
left, right,
on=['drug_id', 'month_begin_dt', 'year', 'month'], how='outer'),
train)
all_train.columns = [
'drug_id', 'month_begin_dt', 'year', 'month',
'actual', 'fcst_naive', 'std_naive', 'ape_naive', 'ae_naive',
'actual_ma', 'fcst_ma', 'std_ma', 'ape_ma', 'ae_ma',
'actual_ets', 'fcst_ets', 'std_ets', 'ape_ets', 'ae_ets',
'actual_croston', 'fcst_croston', 'std_croston', 'ape_croston', 'ae_croston']
all_train.drop(
['actual_ma', 'actual_ets', 'actual_croston'], axis=1, inplace=True)
# mergring predict dfs for forecast
predict = predict.copy()
predict_cols = ['drug_id', 'month_begin_dt', 'year', 'month',
'fcst', 'std']
predict = [x[predict_cols] for x in predict]
all_predict = reduce(
lambda left, right: pd.merge(
left, right,
on=['drug_id', 'month_begin_dt', 'year', 'month'], how='outer'),
predict)
all_predict.columns = [
'drug_id', 'month_begin_dt', 'year', 'month',
'fcst_naive', 'std_naive', 'fcst_ma', 'std_ma',
'fcst_ets', 'std_ets', 'fcst_croston', 'std_croston']
'''BASE MODELS WEIGHT OPTIMISATION - A/B'''
all_train_ab = all_train[all_train['ape_croston'].isna()]
all_predict_ab = all_predict[all_predict['fcst_croston'].isna()]
# individial forecast and actuals
naive_fcst_ab = all_train_ab['fcst_naive'].values
ma_fcst_ab = all_train_ab['fcst_ma'].values
ets_fcst_ab = all_train_ab['fcst_ets'].values
actual_ab = all_train_ab['actual'].values
# initialisation
weights_ab = np.array([1/3, 1/3, 1/3])
bounds_ab = ((0, 1), (0, 1), (0, 1))
# constrains on weights: sum(wi) = 1
constrain_ab = LinearConstraint([1, 1, 1], [1], [1])
# minimising errors for A/B buckets
if kind == 'mae':
results = minimize(
optimise_ab_mae, x0=weights_ab, bounds=bounds_ab,
constraints=constrain_ab,
args=(naive_fcst_ab, ma_fcst_ab, ets_fcst_ab, actual_ab))
final_weights_ab = results.x
elif kind == 'sse':
results = minimize(
optimise_ab_sse, x0=weights_ab, bounds=bounds_ab,
constraints=constrain_ab,
args=(naive_fcst_ab, ma_fcst_ab, ets_fcst_ab, actual_ab))
final_weights_ab = results.x
else:
final_weights_ab = weights_ab
# creating final train, error and predict dataset
all_train_ab['fcst'] = np.round(
final_weights_ab[0]*naive_fcst_ab + final_weights_ab[1]*ma_fcst_ab +
final_weights_ab[2]*ets_fcst_ab)
all_train_ab['std'] = np.round(np.sqrt(
(final_weights_ab[0]*naive_fcst_ab)**2 +
(final_weights_ab[1]*ma_fcst_ab)**2 +
(final_weights_ab[2]*ets_fcst_ab)**2))
all_train_ab['hyper_params'] = str(tuple(final_weights_ab))
all_train_ab['model'] = kind
all_predict_ab['fcst'] = np.round(
final_weights_ab[0]*all_predict_ab['fcst_naive'] +
final_weights_ab[1]*all_predict_ab['fcst_ma'] +
final_weights_ab[2]*all_predict_ab['fcst_ets'])
all_predict_ab['std'] = np.round(np.sqrt(
(final_weights_ab[0]*all_predict_ab['fcst_naive'])**2 +
(final_weights_ab[1]*all_predict_ab['fcst_ma'])**2 +
(final_weights_ab[2]*all_predict_ab['fcst_ets'])**2))
all_predict_ab['model'] = kind
'''BASE MODELS WEIGHT OPTIMISATION - C'''
all_train_c = all_train[~all_train['ape_croston'].isna()]
all_predict_c = all_predict[~all_predict['fcst_croston'].isna()]
# individial forecast and actuals
naive_fcst_c = all_train_c['fcst_naive'].values
ma_fcst_c = all_train_c['fcst_ma'].values
ets_fcst_c = all_train_c['fcst_ets'].values
croston_fcst_c = all_train_c['fcst_croston'].values
actual_c = all_train_c['actual'].values
# initialisation
weights_c = np.array([1/4, 1/4, 1/4, 1/4])
bounds_c = ((0, 1), (0, 1), (0, 1), (0, 1))
# constrains on weights: sum(wi) = 1
constrain_c = LinearConstraint([1, 1, 1, 1], [1], [1])
# minimising errors for C buckets
if kind == 'mae':
results = minimize(
optimise_c_mae, x0=weights_c, bounds=bounds_c,
constraints=constrain_c,
args=(naive_fcst_c, ma_fcst_c, ets_fcst_c,
croston_fcst_c, actual_c))
final_weights_c = results.x
elif kind == 'sse':
results = minimize(
optimise_c_sse, x0=weights_c, bounds=bounds_c,
constraints=constrain_c,
args=(naive_fcst_c, ma_fcst_c, ets_fcst_c,
croston_fcst_c, actual_c))
final_weights_c = results.x
else:
final_weights_c = weights_c
# creating final train, error and predict dataset
all_train_c['fcst'] = np.round(
final_weights_c[0]*naive_fcst_c + final_weights_c[1]*ma_fcst_c +
final_weights_c[2]*ets_fcst_c + final_weights_c[3]*croston_fcst_c)
all_train_c['std'] = np.round(np.sqrt(
(final_weights_c[0]*naive_fcst_c)**2 +
(final_weights_c[1]*ma_fcst_c)**2 +
(final_weights_c[2]*ets_fcst_c)**2 +
(final_weights_c[3]*croston_fcst_c)**2))
all_train_c['hyper_params'] = str(tuple(final_weights_c))
all_train_c['model'] = kind
all_predict_c['fcst'] = np.round(
final_weights_c[0]*all_predict_c['fcst_naive'] +
final_weights_c[1]*all_predict_c['fcst_ma'] +
final_weights_c[2]*all_predict_c['fcst_ets'] +
final_weights_c[3]*all_predict_c['fcst_croston'])
all_predict_c['std'] = np.round(np.sqrt(
(final_weights_c[0]*all_predict_c['fcst_naive'])**2 +
(final_weights_c[1]*all_predict_c['fcst_ma'])**2 +
(final_weights_c[2]*all_predict_c['fcst_ets'])**2 +
(final_weights_c[3]*all_predict_c['fcst_croston'])**2))
all_predict_c['model'] = kind
'''COMPILING TRAINING AND FORECAST '''
# train
ensemble_train = pd.concat([all_train_ab, all_train_c], axis=0)
ensemble_train['ape'] = ensemble_train.apply(
lambda row: ape_calc(row['actual'], row['fcst']), axis=1)
ensemble_train['ae'] = ensemble_train.apply(
lambda row: ae_calc(row['actual'], row['fcst']), axis=1)
cols = train_cols + ['hyper_params', 'model']
ensemble_train = ensemble_train[cols]
# train error
ensemble_train_error = ensemble_train.groupby('drug_id').\
apply(train_error).\
reset_index(drop=True)
ensemble_train_error['model'] = kind
# predict
ensemble_predict = pd.concat([all_predict_ab, all_predict_c], axis=0)
cols = predict_cols + ['model']
ensemble_predict = ensemble_predict[cols]
return ensemble_train, ensemble_train_error, ensemble_predict | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/forecast/ensemble_minimisation.py | ensemble_minimisation.py |
import numpy as np
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import make_future_df
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc
def croston_tsb(ts, horizon=1, alpha=0.5, beta=0.7):
# Transform the input into a numpy array
d = np.array(ts)
# Historical period length
cols = len(d)
# Append np.nan into the demand array to cover future periods
d = np.append(d, [np.nan]*horizon)
# level (a), probability(p) and forecast (f)
a, p, f = np.full((3, cols+horizon), np.nan)
# Initialization
first_occurence = np.argmax(d[:cols] > 0)
a[0] = d[first_occurence]
p[0] = 1/(1 + first_occurence)
f[0] = p[0]*a[0]
# Create all the t forecasts
for t in range(0, cols):
if d[t] > 0:
a[t+1] = alpha*d[t] + (1-alpha)*a[t]
p[t+1] = beta*(1) + (1-beta)*p[t]
else:
a[t+1] = a[t]
p[t+1] = (1-beta)*p[t]
f[t+1] = p[t+1]*a[t+1]
# creating forecast
for t in range(cols, cols+horizon-1):
if f[t] > 1:
a[t+1] = alpha*f[t] + (1-alpha)*a[t]
p[t+1] = beta*(1) + (1-beta)*p[t]
else:
a[t+1] = a[t]
p[t+1] = (1-beta)*p[t]
f[t+1] = p[t+1]*a[t+1]
# Future Forecast
# a[cols+1:cols+horizon] = a[cols]
# p[cols+1:cols+horizon] = p[cols]
# f[cols+1:cols+horizon] = f[cols]
# df = pd.DataFrame.from_dict(
# {"Demand":d,"Forecast":f,"Period":p,"Level":a,"Error":d-f})
return np.round(f), d-f
def croston_train_weekly(df, out_of_sample=4, horizon=4, params=None):
if params is not None:
alpha = params[0]
beta = params[1]
else:
alpha = 0.5
beta = 0.7
train = df.copy()
train.drop(train.tail(out_of_sample).index, inplace=True)
# dividing the series into train and validation set
input_series = train['net_sales_quantity'].values
validation = df['net_sales_quantity'].tail(out_of_sample).values
train_forecast, train_error = croston_tsb(
input_series, horizon, alpha, beta)
fcst = train_forecast[-out_of_sample:]
error = train_forecast[:-out_of_sample]
std = np.sqrt((np.std(input_series)**2 + sum(np.square(error))/len(error)))
predict_df = make_future_df(train[:-out_of_sample+1], 1)
predict_df['fcst'] = sum(fcst)
predict_df['std'] = np.round(std*np.sqrt(horizon))
predict_df['actual'] = sum(validation)
predict_df['ape'] = [
ape_calc(actual, forecast) for actual, forecast in zip(
predict_df['actual'], predict_df['fcst'])]
predict_df['ae'] = [
ae_calc(actual, forecast) for actual, forecast in zip(
predict_df['actual'], predict_df['fcst'])]
predict_df['hyper_params'] = str(params)
return predict_df
def croston_predict_weekly(df, out_of_sample=4, horizon=4, params=None):
if params is not None:
alpha = params[0]
beta = params[1]
else:
alpha = 0.5
beta = 0.7
train = df.copy()
# dividing the series into train and validation set
input_series = train['net_sales_quantity'].values
train_forecast, train_error = croston_tsb(
input_series, horizon, alpha, beta)
fcst = train_forecast[-out_of_sample:]
error = train_forecast[:-out_of_sample]
std = np.sqrt((np.std(input_series)**2 + sum(np.square(error))/len(error)))
predict_df = make_future_df(train[:-out_of_sample+1], 1)
predict_df['fcst'] = sum(fcst)
predict_df['std'] = np.round(std*np.sqrt(horizon))
return predict_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/forecast/croston.py | croston.py |
import time
import pandas as pd
from itertools import product
from zeno_etl_libs.utils.warehouse.forecast.moving_average import ma_train_monthly,\
ma_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.ets import ets_train_monthly,\
ets_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.errors import train_error
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import apply_parallel_ets
from zeno_etl_libs.utils.non_ipc.forecast.\
helper_functions import non_ipc_error_report, apply_parallel_croston
from zeno_etl_libs.utils.non_ipc.forecast.croston import croston_train_weekly,\
croston_predict_weekly
from zeno_etl_libs.utils.non_ipc.forecast.\
ensemble_champion import ensemble_champion
from zeno_etl_libs.utils.non_ipc.forecast.\
ensemble_minimisation import ensemble_minimisation
def non_ipc_forecast(
drug_sales_monthly, drug_data_weekly, drug_class, out_of_sample,
horizon, train_flag, logger=None, kind='mae'):
if train_flag:
'''BASE FORECASTING - NAIVE'''
logger.info('STARTING NAIVE TRAINING AND FORECAST')
# making copy for data
naive_train_data = drug_sales_monthly.copy()
naive_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
k = 1 # for naive using the ma train function
# train
start = time.time()
naive_train = naive_train_data.groupby('drug_id').apply(
ma_train_monthly, k, out_of_sample).\
reset_index(drop=True)
end = time.time()
logger.info('Naive Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
naive_train_error = naive_train.groupby('drug_id').apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('Naive Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
naive_predict = naive_train_data.groupby('drug_id').\
apply(ma_predict_monthly, k, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('Naive Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Naive error reports
# _ = non_ipc_error_report(naive_train_error, naive_train, drug_class)
# model informations
naive_train['hyper_params'] = ''
naive_train['model'] = 'naive'
naive_train_error['model'] = 'naive'
naive_predict['model'] = 'naive'
'''BASE FORECASTING - MOVING AVERAGE'''
logger.info('STARTING MOVING AVERAGE TRAINING AND FORECAST')
ma_train_data = drug_sales_monthly.copy()
ma_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
k = 3 # for MA3
# train
start = time.time()
ma_train = ma_train_data.groupby('drug_id').apply(
ma_train_monthly, k, out_of_sample).\
reset_index(drop=True)
end = time.time()
logger.info('MA Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
ma_train_error = ma_train.groupby('drug_id').apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('MA Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
ma_predict = ma_train_data.groupby('drug_id').\
apply(ma_predict_monthly, k, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('MA Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Moving average error reports
# _ = non_ipc_error_report(ma_train_error, ma_train, drug_class)
# model informations
ma_train['hyper_params'] = ''
ma_train['model'] = 'ma'
ma_train_error['model'] = 'ma'
ma_predict['model'] = 'ma'
'''BASE FORECASTING - EXPONENTIAL SMOOTHING'''
logger.info('STARTING ESM TRAINING AND FORECAST')
# model parameters
# holts winter implementation - single, double and triple exponential
trend = ['additive', None]
seasonal = ['additive', None]
damped = [True, False]
seasonal_periods = [12]
use_boxcox = [True, False]
ets_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
ets_train_data = drug_sales_monthly.copy()
ets_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
# train
start = time.time()
ets_train = apply_parallel_ets(
ets_train_data.groupby('drug_id'), ets_train_monthly,
ets_params, horizon, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('ETS Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
ets_train_error = ets_train.groupby('drug_id').apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('ETS Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
ets_predict = apply_parallel_ets(
ets_train_data.groupby('drug_id'), ets_predict_monthly,
ets_train, horizon, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('ETS Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Exponential smoothing error reports
# _ = non_ipc_error_report(ets_train_error, ets_train, drug_class)
# model information
ets_train['model'] = 'ets'
ets_train_error['model'] = 'ets'
ets_predict['model'] = 'ets'
'''BASE FORECASTING - CROSTON FOR C BUCKET'''
logger.info('STARTING CROSTON TRAINING AND FORECAST')
# getting drug list for C bucket
c_bucket_drug_list = list(
drug_class[drug_class['bucket_abc'] == 'C']['drug_id'])
logger.info('No of drugs in Bucket C for Croston' +
str(len(c_bucket_drug_list)))
croston_train_data = drug_data_weekly.copy()
croston_train_data = croston_train_data[
croston_train_data['drug_id'].isin(c_bucket_drug_list)]
croston_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
# Runtime parameters
croston_out_of_sample = 4
croston_horizon = 4
croston_params = (0.5, 0.5)
# train
start = time.time()
croston_train = apply_parallel_croston(
croston_train_data.groupby('drug_id'), croston_train_weekly,
croston_horizon, croston_out_of_sample, croston_params).\
reset_index(drop=True)
end = time.time()
logger.info('Croston Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
croston_train_error = croston_train.groupby('drug_id').\
apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('Croston Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
croston_predict = apply_parallel_croston(
croston_train_data.groupby('drug_id'), croston_predict_weekly,
croston_horizon, croston_out_of_sample, croston_params).\
reset_index(drop=True)
end = time.time()
logger.info('Croston Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Croston error reports
# _ = non_ipc_error_report(croston_train_error, croston_train, drug_class)
# model information
croston_train['model'] = 'croston'
croston_train_error['model'] = 'croston'
croston_predict['model'] = 'croston'
'''BASE MODELS: COMBINING'''
train = [naive_train, ma_train, ets_train, croston_train]
error = [
naive_train_error, ma_train_error, ets_train_error,
croston_train_error]
predict = [naive_predict, ma_predict, ets_predict, croston_predict]
base_train = pd.concat(train, axis=0)
base_train['final_fcst'] = 'N'
base_train_error = pd.concat(error, axis=0)
base_train_error['final_fcst'] = 'N'
base_predict = pd.concat(predict, axis=0)
base_predict['final_fcst'] = 'N'
'''ENSEMBLE FORECASTING - CHAMPION MODEL'''
logger.info('STARTING ENSEMBLE CHAMPION MODEL SELECTION')
champion_train, champion_train_error, champion_predict = ensemble_champion(
train, error, predict, logger)
champion_train['model'] = 'champion_' + champion_train['model']
champion_train_error['model'] = 'champion_' + champion_train_error['model']
champion_predict['model'] = 'champion_' + champion_predict['model']
champion_train['final_fcst'] = 'Y'
champion_train_error['final_fcst'] = 'Y'
champion_predict['final_fcst'] = 'Y'
# Champion model ensmeble training errors
# _ = non_ipc_error_report(champion_train_error, champion_train, drug_class)
'''ENSEMBLE FORECASTING - SSE MINIMISATION'''
optimised_train, optimised_train_error,\
optimised_predict = ensemble_minimisation(
train, error, predict, kind, logger)
optimised_train['final_fcst'] = 'N'
optimised_train_error['final_fcst'] = 'N'
optimised_predict['final_fcst'] = 'N'
# Optimised errors model ensmeble training errors
# _ = non_ipc_error_report(
# optimised_train_error, optimised_train, drug_class)
'''BASE MODELS: COMBINING'''
ensemble_train = [champion_train, optimised_train]
ensemble_error = [champion_train_error, optimised_train_error]
ensemble_predict = [champion_predict, optimised_predict]
ensemble_train = pd.concat(ensemble_train, axis=0)
ensemble_error = pd.concat(ensemble_error, axis=0)
ensemble_predict = pd.concat(ensemble_predict, axis=0)
else:
'''BASE FORECASTING - SIMPLE EXPONENTIAL SMOOTHING'''
logger.info('STARTING SES FORECAST')
# model parameters
# holts winter implementation - single exponential
trend = [None]
seasonal = [None]
damped = [False]
seasonal_periods = [12]
use_boxcox = [False]
ses_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
ses_train_data = drug_sales_monthly.copy()
ses_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
ses_train_data['hyper_params'] = str(ses_params[0])
# predict
start = time.time()
ses_predict = apply_parallel_ets(
ses_train_data.groupby('drug_id'), ets_predict_monthly,
ses_train_data, horizon, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('ETS Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# model information
ses_predict['model'] = 'ses'
# creating final return df
base_train = pd.DataFrame()
base_train_error = pd.DataFrame()
base_predict = pd.DataFrame()
ensemble_train = pd.DataFrame()
ensemble_error = pd.DataFrame()
ensemble_predict = ses_predict
ensemble_predict['final_fcst'] = 'Y'
return base_train, base_train_error,\
base_predict, ensemble_train, ensemble_error, ensemble_predict | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/forecast/forecast_main.py | forecast_main.py |
import pandas as pd
import numpy as np
from functools import reduce
def ensemble_champion(train, error, predict, logger=None):
# mergring error dfs for best model selection
all_train_error = reduce(
lambda left, right: pd.merge(left, right, on='drug_id', how='outer'),
error)
all_train_error.columns = [
'drug_id', 'mae_naive', 'mape_naive', 'model_naive', 'mae_ma',
'mape_ma', 'model_ma', 'mae_ets', 'mape_ets', 'model_ets',
'mae_croston', 'mape_croston', 'model_croston']
# Best model selection
all_train_error['mape_best'] = all_train_error[[
'mape_naive', 'mape_ma', 'mape_ets', 'mape_croston']].min(axis=1)
all_train_error['model_best'] = np.select([
all_train_error['mape_best'] == all_train_error['mape_ets'],
all_train_error['mape_best'] == all_train_error['mape_ma'],
all_train_error['mape_best'] == all_train_error['mape_croston'],
all_train_error['mape_best'] == all_train_error['mape_naive']],
['ets', 'ma', 'croston', 'naive']
)
# Different models concatenation
naive_drug_best = all_train_error[all_train_error['model_best'] == 'naive']
ma_drug_best = all_train_error[all_train_error['model_best'] == 'ma']
ets_drug_best = all_train_error[all_train_error['model_best'] == 'ets']
croston_drug_best = all_train_error[
all_train_error['model_best'] == 'croston']
print(
len(all_train_error), len(naive_drug_best), len(ma_drug_best),
len(ets_drug_best), len(croston_drug_best))
logger.info('Total drugs: ' + str(len(all_train_error)))
logger.info('Naive drugs: ' + str(len(naive_drug_best)))
logger.info('MA drugs: ' + str(len(ma_drug_best)))
logger.info('ETS drugs: ' + str(len(ets_drug_best)))
logger.info('Croston drugs: ' + str(len(croston_drug_best)))
# creating ensemble dfs
naive_train_best = train[0][train[0]['drug_id'].isin(
naive_drug_best['drug_id'])]
naive_train_error_best = error[0][error[0]['drug_id'].isin(
naive_drug_best['drug_id'])]
naive_predict_best = predict[0][predict[0]['drug_id'].isin(
naive_drug_best['drug_id'])]
ma_train_best = train[1][train[1]['drug_id'].isin(
ma_drug_best['drug_id'])]
ma_train_error_best = error[1][error[1]['drug_id'].isin(
ma_drug_best['drug_id'])]
ma_predict_best = predict[1][predict[1]['drug_id'].isin(
ma_drug_best['drug_id'])]
ets_train_best = train[2][train[2]['drug_id'].isin(
ets_drug_best['drug_id'])]
ets_train_error_best = error[2][error[2]['drug_id'].isin(
ets_drug_best['drug_id'])]
ets_predict_best = predict[2][predict[2]['drug_id'].isin(
ets_drug_best['drug_id'])]
croston_train_best = train[3][train[3]['drug_id'].isin(
croston_drug_best['drug_id'])]
croston_train_error_best = error[3][error[3]['drug_id'].isin(
croston_drug_best['drug_id'])]
croston_predict_best = predict[3][predict[3]['drug_id'].isin(
croston_drug_best['drug_id'])]
ensemble_train = pd.concat(
[naive_train_best, ma_train_best, ets_train_best, croston_train_best],
axis=0)
ensemble_train_error = pd.concat(
[naive_train_error_best, ma_train_error_best, ets_train_error_best,
croston_train_error_best], axis=0)
ensemble_predict = pd.concat(
[naive_predict_best, ma_predict_best, ets_predict_best,
croston_predict_best], axis=0)
return ensemble_train, ensemble_train_error, ensemble_predict | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/non_ipc/forecast/ensemble_champion.py | ensemble_champion.py |
import numpy as np
import pandas as pd
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from datetime import datetime
from dateutil.tz import gettz
from zeno_etl_libs.db.db import MSSql
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.utils.warehouse.data_prep.wh_data_prep import get_launch_stock_per_store
def lead_time():
mssql = MSSql(connect_via_tunnel=False)
cnxn = mssql.open_connection()
cursor = cnxn.cursor()
# Reading lead time data
# Last 90 days on purchase date
# excluding TEPL distributor
# Diff between PO created date and gatepass date
sql_bhw = '''
SELECT
*
FROM
(
SELECT
199 as "wh_id",
i.Barcode as "drug_id" ,
i.name as "drug_name",
a.Altercode as "distributor_id",
a.Name as "distributor_name",
a2.vdt as "gate-pass-date",
--sp.Vdt as "purchase_date",
--sp.RefVdt as "po_opend_date",
s.PBillDt as "po_created_date",
s.UpdatedOn as "purchase_confirm_date",
sp.Qty as "quantity" ,
DATEDIFF(day, s.PBillDt , a2.vdt) as "lead_time"
FROM
SalePurchase2 sp
left join Item i on
sp.Itemc = i.code
left join Salepurchase1 s on
sp.Vtype = s.Vtyp
and sp.Vno = s.Vno
and sp.Vdt = s.Vdt
left join Acknow a2 on sp.Pbillno =a2.Pbillno
left join acm a on
sp.Acno = a.code
Where
sp.Vtype = 'PB'
and sp.Vdt >= cast(DATEADD(day, -91, GETDATE()) as date)
and sp.Vdt <= cast(DATEADD(day, -1, GETDATE()) as date)
and i.Compname NOT IN ('GOODAID', 'PURE & C')
and i.Barcode NOT LIKE '%[^0-9]%'
and isnumeric(i.Barcode) = 1
and a.code NOT IN (59468, 59489)) a
Where
(a."lead_time">0
and a."lead_time"<7);
'''
data_bhw = pd.read_sql(sql_bhw, cnxn)
data_bhw[['drug_id']] \
= data_bhw[['drug_id']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
#TEPL Data
mssql_tepl = MSSql(connect_via_tunnel=False, db='Esdata_TEPL')
cnxn = mssql_tepl.open_connection()
cursor = cnxn.cursor()
sql_tepl = '''
SELECT
*
FROM
(
SELECT
342 as "wh_id",
i.Barcode as "drug_id" ,
i.name as "drug_name",
a.Altercode as "distributor_id",
a.Name as "distributor_name",
a2.vdt as "gate-pass-date",
--sp.Vdt as "purchase_date",
--sp.RefVdt as "po_opend_date",
s.PBillDt as "po_created_date",
s.UpdatedOn as "purchase_confirm_date",
sp.Qty as "quantity" ,
DATEDIFF(day, s.PBillDt , a2.vdt) as "lead_time"
FROM
SalePurchase2 sp
left join Item i on
sp.Itemc = i.code
left join Salepurchase1 s on
sp.Vtype = s.Vtyp
and sp.Vno = s.Vno
and sp.Vdt = s.Vdt
left join Acknow a2 on sp.Pbillno =a2.Pbillno
left join acm a on
sp.Acno = a.code
Where
sp.Vtype = 'PB'
and sp.Vdt >= cast(DATEADD(day, -91, GETDATE()) as date)
and sp.Vdt <= cast(DATEADD(day, -1, GETDATE()) as date)
and i.Compname NOT IN ('GOODAID', 'PURE & C')
and i.Barcode NOT LIKE '%[^0-9]%'
and isnumeric(i.Barcode) = 1) a
Where
(a."lead_time">0
and a."lead_time"<7);
'''
data_tepl = pd.read_sql(sql_tepl, cnxn)
data_tepl[['drug_id']] \
= data_tepl[['drug_id']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
data=pd.concat([data_bhw,data_tepl],sort=False,ignore_index=False)
run_date = str(datetime.now(tz=gettz('Asia/Kolkata')))
lead_time_data = 'warehouse_lead_time/lead_time_data_dump_{}.csv'.format(run_date)
s3 = S3()
s3.save_df_to_s3(df=data, file_name=lead_time_data)
data=data.drop(["wh_id"],axis=1)
# Reading Preferred distributor from S3
s3 = S3()
preferred_distributor = pd.read_csv(s3.download_file_from_s3(file_name="warehouse/preferred_distributors.csv"))
df_new = pd.merge(data, preferred_distributor, how='left', on='drug_id')
df_new[["lead_time", "distributor_1"]] = df_new[["lead_time", "distributor_1"]].fillna(0)
df_new[["distributor_1"]] = df_new[["distributor_1"]].astype('int')
# function for weighted mean
def w_avg(df, values, weights):
d = df[values]
w = df[weights]
return (d * w).sum() / w.sum()
df_new_1 = df_new.groupby(["drug_id", "distributor_id"]).apply(w_avg, 'lead_time', 'quantity').rename(
'weighted_lead_time').reset_index()
df_std = df_new.groupby(["drug_id", "distributor_id"])[["lead_time"]].std().reset_index()
df_std.rename(columns={'lead_time': 'lead_time_std'}, inplace=True)
df_drug_distributor = pd.merge(df_new_1, df_std, how='left', on=['drug_id', 'distributor_id'])
df_drug_distributor = pd.merge(df_drug_distributor, preferred_distributor, how='left', on=["drug_id"])
df_drug_distributor[["distributor_1", "lead_time_std"]] = df_drug_distributor[
["distributor_1", "lead_time_std"]].fillna(0)
df_drug_distributor[["distributor_1"]] = df_drug_distributor[["distributor_1"]].astype('int')
# lead time mean Capping 7 days.
df_drug_distributor['weighted_lead_time'] = np.where(df_drug_distributor['weighted_lead_time'] > 7, 7,
df_drug_distributor['weighted_lead_time'])
# minimum lead time 2 days
df_drug_distributor['weighted_lead_time'] = np.where(df_drug_distributor['weighted_lead_time'] < 2, 2,
df_drug_distributor['weighted_lead_time'])
# Lead time Std capping of 2 days
df_drug_distributor['lead_time_std'] = np.where(df_drug_distributor['lead_time_std'] > 2, 2,
df_drug_distributor['lead_time_std'])
# Minimum Lead time std is 1 day
df_drug_distributor['lead_time_std'] = np.where(df_drug_distributor['lead_time_std'] < 1, 1,
df_drug_distributor['lead_time_std'])
df_drug_distributor[["distributor_id"]] = df_drug_distributor[["distributor_id"]].astype('int')
df_drug_distributor['same_distributor'] = np.where(
df_drug_distributor['distributor_id'] == df_drug_distributor["distributor_1"], True, False)
preferred_distributor_drug = df_drug_distributor[df_drug_distributor["same_distributor"] == True]
other_distributor_drug = df_drug_distributor[df_drug_distributor["same_distributor"] == False]
# Drugs not in preferred distributor
drugs_not_in_preferred_distributor = df_drug_distributor[
~df_drug_distributor['drug_id'].isin(preferred_distributor_drug['drug_id'])]
drugs_not_in_preferred_distributor_mean = drugs_not_in_preferred_distributor.groupby(["drug_id"])[
["weighted_lead_time"]].mean().reset_index()
drugs_not_in_preferred_distributor_std = drugs_not_in_preferred_distributor.groupby(["drug_id"])[
["weighted_lead_time"]].std().reset_index()
drugs_not_in_preferred_distributor_1 = pd.merge(drugs_not_in_preferred_distributor_mean,
drugs_not_in_preferred_distributor_std, how='left', on='drug_id')
drugs_not_in_preferred_distributor_1 = drugs_not_in_preferred_distributor_1.fillna(0)
# Capping
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] > 7, 7,
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'])
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] < 2, 2,
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'])
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] > 2, 2,
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'])
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] < 1, 1,
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'])
drugs_not_in_preferred_distributor_1.rename(
columns={'weighted_lead_time_x': 'weighted_lead_time', 'weighted_lead_time_y': 'lead_time_std'}, inplace=True)
drug_in_preferred_distributor = preferred_distributor_drug.drop(
['drug_name', 'distributor_id', 'distributor_1', 'distributor_name_1', 'same_distributor'], axis=1)
drug_lead_time_std = pd.concat([drug_in_preferred_distributor, drugs_not_in_preferred_distributor_1], sort=False,
ignore_index=True)
weighted_lead_time_mean = drug_lead_time_std[["drug_id", "weighted_lead_time"]]
weighted_lead_time_std = drug_lead_time_std[["drug_id", "lead_time_std"]]
#Assumption barcoding lead time 2 days and barcoding lead time std of 0.5 days
barcoding_lead_time=2
barcoding_lead_time_std=0.5
weighted_lead_time_mean['barcoding_lead_time']=barcoding_lead_time
weighted_lead_time_std['barcoding_lead_time_std']=barcoding_lead_time_std
weighted_lead_time_mean['weighted_lead_time'] = weighted_lead_time_mean['weighted_lead_time'] + \
weighted_lead_time_mean['barcoding_lead_time']
weighted_lead_time_std['lead_time_std'] = np.sqrt(
weighted_lead_time_std['lead_time_std'] * weighted_lead_time_std['lead_time_std'] +
weighted_lead_time_std['barcoding_lead_time_std'] * weighted_lead_time_std['barcoding_lead_time_std'])
weighted_lead_time_mean=weighted_lead_time_mean.drop(['barcoding_lead_time'],axis=1)
weighted_lead_time_std=weighted_lead_time_std.drop(['barcoding_lead_time_std'],axis=1)
return weighted_lead_time_mean, weighted_lead_time_std
def review_time():
# Review Time for distributor
s3 = S3()
df_1 = pd.read_csv(s3.download_file_from_s3(file_name="warehouse/review_time_warehouse_distributor.csv"))
# Preferred distributor
df_2 = pd.read_csv(s3.download_file_from_s3(file_name="warehouse/preferred_distributors.csv"))
# If null then take 4 days of review time
df_1 = df_1.fillna(4)
df_1['review_time'] = df_1['review_time'].astype('int')
review_time_new = pd.merge(df_2, df_1, left_on='distributor_1', right_on='distributor_id', how='left')
review_time_new = review_time_new.drop(
["drug_name", "distributor_1", "distributor_name_1", "distributor_id", "distributor_name"], axis=1)
return review_time_new
def wh_safety_stock_calc(
ss_runtime_var, wh_drug_list, forecast, last_month_sales, demand_daily_deviation, current_month_date,
forecast_date, reset_date, logger=None, expected_nso=0, nso_history_days=90, rs_db=None):
""" Safety stock calculation for warehouse """
# Lead time mean & Std
lead_time_mean, lead_time_std = lead_time()
service_level = ss_runtime_var['service_level'] # 0.95
ordering_freq = ss_runtime_var['ordering_freq'] # 4
max_review_period = review_time()
z = ss_runtime_var['z']
cap_ss_days = ss_runtime_var['cap_ss_days']
if cap_ss_days == 0:
cap_ss_days = 100000
# getting latest month forecast
forecast['month_begin_dt'] = pd.to_datetime(
forecast['month_begin_dt']).dt.date
first_month = forecast['month_begin_dt'].min()
forecast_first_month = forecast[forecast['month_begin_dt'] == first_month]
# creating inventory level dataframe
repln = forecast_first_month.copy()
repln = pd.merge(repln, lead_time_mean, how='left', on='drug_id') # merge lead time mean
repln = pd.merge(repln, lead_time_std, how='left', on='drug_id') # merge lead time std
repln = pd.merge(repln, max_review_period, how='left', on='drug_id') # merge review time
# rename the columns
repln.rename(columns={'weighted_lead_time': 'lead_time_mean', 'review_time': 'max_review_period'}, inplace=True)
# Use default of 4 , 2 and 4 lead time mean , std and r.t if data is missing
repln['lead_time_mean'] = repln['lead_time_mean'].fillna(4)
repln['lead_time_std'] = repln['lead_time_std'].fillna(2)
repln['max_review_period'] = repln['max_review_period'].fillna(4)
repln['ordering_freq'] = ordering_freq
repln['service_level'] = service_level
repln['z_value'] = z
repln = wh_drug_list.merge(repln, on='drug_id')
num_days = monthrange(first_month.year, first_month.month)[1]
repln['demand_daily'] = repln['fcst'] / num_days
# check to see if forecast error is to be used instead of actual demand daily deviation
if ss_runtime_var['use_fcst_error'] == 'Y':
hist_fcst_err = get_forecast_error(rs_db, ss_runtime_var['fcst_hist_to_use'], last_month_sales,
current_month_date, forecast_date, num_days)
hist_fcst_err['demand_daily_deviation'] = hist_fcst_err['demand_daily_deviation'] / np.sqrt(num_days)
repln = repln.merge(hist_fcst_err, on='drug_id', how='left')
print("used forecast error instead of demand deviation")
else:
repln = repln.merge(demand_daily_deviation, on='drug_id', how='left')
repln['demand_daily_deviation'].fillna(0, inplace=True)
# warehouse overall safety stock
repln['ss_wo_cap'] = np.round(repln['z_value'] * np.sqrt(
(
repln['lead_time_mean'] *
repln['demand_daily_deviation'] *
repln['demand_daily_deviation']
) +
(
repln['lead_time_std'] *
repln['lead_time_std'] *
repln['demand_daily'] *
repln['demand_daily']
)))
repln = repln.merge(last_month_sales, on='drug_id', how='left')
repln['safety_stock_days'] = np.round(
repln['ss_wo_cap'] * num_days / repln['fcst'], 1)
# calculate capping days
repln['cap_ss_days'] = np.round(repln['lead_time_mean'] +
repln['z_value'] * repln['lead_time_std'] +
repln['max_review_period'])
repln['cap_ss_days'] = np.where(repln['cap_ss_days'] > cap_ss_days, cap_ss_days, repln['cap_ss_days'])
# capping SS days based in forecasted sales
repln['safety_stock'] = np.where(repln['safety_stock_days'] > repln['cap_ss_days'],
np.round(repln['cap_ss_days'] * repln['fcst'] / num_days),
repln['ss_wo_cap'])
# setting min SS at 2 days based on forecasted sales
repln['safety_stock'] = np.where(repln['safety_stock_days'] < 2, np.round(2 * repln['fcst'] / num_days),
repln['safety_stock'])
# capping SS days based on last month's sales
repln['safety_stock'] = np.where(repln['safety_stock'] * num_days / repln['last_month_sales'] > cap_ss_days,
np.round(cap_ss_days * repln['last_month_sales'] / num_days),
repln['safety_stock'])
repln['rop_without_nso'] = np.round(repln['safety_stock'] + repln['demand_daily'] * (repln['lead_time_mean'] +
repln['max_review_period']))
#tweaking ROP to include launch stock
launch_stock_per_store = get_launch_stock_per_store(rs_db, nso_history_days, reset_date)
repln = repln.merge(launch_stock_per_store, on='drug_id', how='left')
repln['launch_stock_per_store'].fillna(0, inplace=True)
repln['expected_nso'] = expected_nso
repln['reorder_point'] = repln['rop_without_nso'] + \
np.round((repln['lead_time_mean'] + repln['max_review_period']) *
repln['expected_nso'] / num_days) * \
repln['launch_stock_per_store']
repln['reorder_point'] = np.round(repln['reorder_point'])
repln['oup_without_nso'] = np.round(
repln['rop_without_nso'] +
repln['demand_daily'] * repln['ordering_freq'])
repln['order_upto_point'] = np.round(
repln['reorder_point'] +
repln['demand_daily'] * repln['ordering_freq'])
# shelf safety stock
repln['shelf_min'] = np.round(repln['safety_stock'] / 2)
repln['shelf_max'] = repln['safety_stock']
# days of safety stock, reorder point and order upto point calculations
repln['last_month_sales'].fillna(0, inplace=True)
repln['safety_stock_days'] = np.round(
repln['safety_stock'] * num_days / repln['last_month_sales'], 1)
repln['reorder_point_days'] = np.round(
repln['reorder_point'] * num_days / repln['last_month_sales'], 1)
repln['order_upto_days'] = np.round(
repln['order_upto_point'] * num_days / repln['last_month_sales'], 1)
return repln
def get_forecast_error(rs_db, fcst_hist_to_use, last_month_sales, current_month_date, forecast_date, num_days):
first_forecast_month = str(current_month_date - relativedelta(months=fcst_hist_to_use))
q = """
select
wss."drug-id" as drug_id,
wss."month-begin-dt" as month_forecasted,
wss."fcst" as forecast,
(
select
wss1."last-month-sales"
from
"prod2-generico"."wh-safety-stock" wss1
where
wss."drug-id" = wss1."drug-id"
and date(add_months(wss."month-begin-dt",
1))= wss1."month-begin-dt"
order by
wss1."drug-id",
wss1."month-begin-dt"
limit 1
) as actual
from
"prod2-generico"."wh-safety-stock" wss
where
1 = 1
and wss.fcst notnull
and wss."month-begin-dt" >= '{}'
and wss."month-begin-dt" >= '2022-02-01'
order by
wss."drug-id",
wss."month-begin-dt"
""".format(first_forecast_month)
hist_fcst_err = rs_db.get_df(q)
last_month_date = pd.to_datetime(forecast_date) - relativedelta(months=1)
last_month_sales['last_month_date'] = last_month_date.date()
hist_fcst_err = hist_fcst_err.merge(last_month_sales, left_on=['drug_id', 'month_forecasted'],
right_on=['drug_id', 'last_month_date'], how='left')
hist_fcst_err['actual'] = np.where(np.isnan(hist_fcst_err['actual']), hist_fcst_err['last_month_sales'],
hist_fcst_err['actual'])
hist_fcst_err.drop(columns=['last_month_sales', 'last_month_date'], inplace=True)
hist_fcst_err = hist_fcst_err[np.isnan(hist_fcst_err['actual']) == False]
hist_fcst_err['squared_error'] = (hist_fcst_err['forecast'] - hist_fcst_err['actual']) ** 2
hist_fcst_err = hist_fcst_err.groupby('drug_id').apply(get_rmse).reset_index()
hist_fcst_err['demand_daily_deviation'] = hist_fcst_err['rmse'] / np.sqrt(num_days)
return hist_fcst_err[['drug_id', 'demand_daily_deviation']]
def get_rmse(df):
if len(df) >= 2:
rmse = np.sqrt(df['squared_error'].sum() / len(df))
else:
rmse = None
return pd.Series(dict(rmse=rmse)) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/safety_stock/wh_safety_stock.py | wh_safety_stock.py |
import pandas as pd
def stores_ss_consolidation(safety_stock_df, db, schema,
min_column='safety_stock',
ss_column='reorder_point',
max_column='order_upto_point'):
# getting list of SKUs to be substituted and substituted with
wh_list_query = f"""
select "drug-id" , "drug-id-replaced" , "same-release"
from "{schema}"."wh-sku-subs-master"
where "add-wh" = 'No'
"""
wh_list = db.get_df(wh_list_query)
wh_list.columns = [c.replace('-', '_') for c in wh_list.columns]
# 3 lists - to not keep, to substitute and to substitute with
sku_reject_list = wh_list.loc[
wh_list['same_release'] == 'NO', 'drug_id']
sku_to_replace_list = wh_list.loc[
wh_list['same_release'] == 'YES', 'drug_id']
sku_substitute_list = wh_list.loc[
wh_list['same_release'] == 'YES', 'drug_id_replaced']
# seperating safety_stock_df where change will happen and where it wont
sku_cnsld_list = list(sku_reject_list) + list(sku_to_replace_list) + list(sku_substitute_list)
safety_stock_df_cnsld = safety_stock_df[
(safety_stock_df['drug_id'].isin(sku_cnsld_list))
]
print('SS to be changed due to WH ', safety_stock_df_cnsld.shape[0])
safety_stock_df_rest = safety_stock_df[
~(safety_stock_df['drug_id'].isin(sku_cnsld_list))
]
if len(safety_stock_df_cnsld) > 0:
# SKU to be changed - not to keep and substitute with
sku_reject = safety_stock_df_cnsld.merge(
wh_list.query('same_release == "NO"')[
['drug_id']].drop_duplicates(),
how='inner', on='drug_id')
sku_to_replace = safety_stock_df_cnsld.merge(
wh_list.query('same_release == "YES"')[
['drug_id', 'drug_id_replaced']].drop_duplicates(),
how='inner', on='drug_id')
sku_substitute = safety_stock_df_cnsld.merge(
wh_list.query('same_release == "YES"')[
['drug_id_replaced']].drop_duplicates(),
how='inner', left_on='drug_id', right_on='drug_id_replaced')
sku_substitute.drop('drug_id_replaced', axis=1, inplace=True)
print('SKU rejected ', sku_reject.shape[0])
print('SKU replace ', sku_to_replace.shape[0])
print('SKU substitute ', sku_substitute.shape[0])
# updated ss calculation - to reject
sku_reject_new = sku_reject.copy()
sku_reject_new[min_column] = 0
sku_reject_new[ss_column] = 0
sku_reject_new[max_column] = 0
# updated ss calculation - to replace with wh skus
sku_substitute_new = sku_to_replace.groupby('drug_id_replaced')[
[min_column, ss_column, max_column]].sum().reset_index()
sku_substitute_new.rename(columns={'drug_id_replaced': 'drug_id'}, inplace=True)
sku_to_replace_new = sku_to_replace.copy()
sku_to_replace_new.drop('drug_id_replaced', axis=1, inplace=True)
sku_to_replace_new[min_column] = 0
sku_to_replace_new[ss_column] = 0
sku_to_replace_new[max_column] = 0
# updated ss calculation - to substitute with
sku_substitute_new = sku_substitute.merge(
sku_substitute_new[['drug_id', min_column, ss_column, max_column]],
on='drug_id', suffixes=('', '_y'), how='left')
sku_substitute_new[min_column + '_y'].fillna(0, inplace=True)
sku_substitute_new[ss_column + '_y'].fillna(0, inplace=True)
sku_substitute_new[max_column + '_y'].fillna(0, inplace=True)
sku_substitute_new[min_column] = (
sku_substitute_new[min_column] +
sku_substitute_new[min_column + '_y'])
sku_substitute_new[ss_column] = (
sku_substitute_new[ss_column] +
sku_substitute_new[ss_column + '_y'])
sku_substitute_new[max_column] = (
sku_substitute_new[max_column] +
sku_substitute_new[max_column + '_y'])
sku_substitute_new.drop(
[min_column + '_y', ss_column + '_y', max_column + '_y'],
axis=1, inplace=True)
# merging final dataframe
safety_stock_df_prev = pd.concat(
[sku_reject, sku_to_replace, sku_substitute],
axis=0, ignore_index=True)
safety_stock_df_new = pd.concat(
[safety_stock_df_rest, sku_reject_new, sku_to_replace_new,
sku_substitute_new], axis=0, ignore_index=True)
else:
safety_stock_df_new = safety_stock_df.copy()
safety_stock_df_prev = pd.DataFrame()
# test cases 1- pre and post count should be same
pre_drug_count = safety_stock_df.shape[0]
post_drug_count = safety_stock_df_new.shape[0]
pre_max_qty = safety_stock_df[max_column].sum()
post_max_qty = safety_stock_df_new[max_column].sum()
if pre_drug_count == post_drug_count:
print('WARNING: SKU count dont match after consolidation')
print('Reduction in max quantity:',
str(round(100*(1 - post_max_qty/pre_max_qty), 2)) + '%')
return safety_stock_df_new, safety_stock_df_prev | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/wh_intervention/store_portfolio_consolidation.py | store_portfolio_consolidation.py |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from calendar import monthrange
from zeno_etl_libs.utils.ipc.data_prep import forecast_data_prep
def data_checks(drug_sales_monthly, wh_drug_list, reset_date, logger,
rs_db):
# MONTHLY CHECKS
logger.info(
str(drug_sales_monthly.drug_id.nunique()) +
str(drug_sales_monthly['month_begin_dt'].nunique()))
logger.info(str(
drug_sales_monthly.drug_id.nunique() *
drug_sales_monthly['month_begin_dt'].nunique()))
assert (drug_sales_monthly.drug_id.nunique() *
drug_sales_monthly['month_begin_dt'].nunique()
== len(drug_sales_monthly))
# CHECKING FOR DRUGS NOT IN SALES DATA MONTHLY
drug_missed_fcst = wh_drug_list[
~wh_drug_list.drug_id.isin(drug_sales_monthly['drug_id'])]['drug_id']
drug_missed_fcst = str(list(drug_missed_fcst))
drug_missed_fcst = drug_missed_fcst.replace('[', '(').replace(']', ')')
if len(drug_missed_fcst) > 2:
drug_missed_fcst = rs_db.get_df('''
select
id as drug_id,
"drug-name" as drug_name,
type,
date("created-at") as creation_date
from
"prod2-generico".drugs
where
id in {}
'''.format(drug_missed_fcst))
drug_missed_sale_history = rs_db.get_df('''
select
"drug-id" as drug_id,
date(max("created-at")) as last_sale_date
from
"prod2-generico".sales
where
"created-at" < {reset_date}
and quantity > 0
and "drug-id" in {drug_id_list}
group by
"drug-id"
'''.format(drug_id_list = str(
list(drug_missed_fcst['drug_id'])).replace('[', '(').replace(
']', ')'), reset_date = str(reset_date)))
drug_missed_fcst = drug_missed_fcst.merge(
drug_missed_sale_history, on='drug_id', how='inner')
logger.info(
'Drug in SKU list but with no history' + str(drug_missed_fcst))
# DRUGS NOT -> DISCONTINUIED/BANNED OR NULL & SALE NOT ZERO IN 6 MONTH
days = 152
logger.info('Total missing sales' + str(len(drug_missed_fcst)))
logger.info(
'Removing unnecessary drug types' +
str(drug_missed_fcst[
drug_missed_fcst.type.isin(
['discontinued-products', 'banned', ''])
].shape[0]))
logger.info(
'Removing drugs with no sales in last 6 months' +
str(drug_missed_fcst[
drug_missed_fcst['last_sale_date'] <=
(reset_date - timedelta(days=days))].shape[0]))
drug_missed_fcst_list = drug_missed_fcst[
(~drug_missed_fcst.type.isin(
['discontinued-products', 'banned', ''])) &
(drug_missed_fcst['last_sale_date'] >
(reset_date - timedelta(days=days)))
].sort_values('last_sale_date')
logger.info('Missing drug list' + str(drug_missed_fcst_list))
return 0
def get_product_list(rs_db):
'''Getting product list to be kept in warehousee'''
# TODO - IN FUTURE TO BE COMIING FROM WMS DB
wh_drug_list_query = '''
select
wssm."drug-id" as drug_id,
d."drug-name" drug_name,
d."type",
d.category,
d.company,
'NA' as bucket
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products')
and d.company <> 'GOODAID'
'''
wh_drug_list = rs_db.get_df(wh_drug_list_query)
return wh_drug_list
def wh_data_prep(
store_id_list, current_month_date, reset_date, type_list, rs_db, logger,
ss_runtime_var, schema):
'''Getting data prepared for warehouse forecasting'''
# CALLING STORES DATA PREP FOR ALL STORES AS LOGIC IS SAME
last_date = datetime(day=1, month=4, year=2021).date()
next_month_date = datetime(current_month_date.year +
int(current_month_date.month / 12),
((current_month_date.month % 12) + 1), 1).date()
_, drug_sales_monthly, _, demand_daily_deviation = forecast_data_prep(
store_id_list, type_list, reset_date, rs_db, schema, logger, last_date=None,
is_wh='Y')
# GETTING PRODUCT LIST
wh_drug_list = get_product_list(rs_db)
logger.info('# of Drugs in WH list' + str(len(wh_drug_list)))
# FILTERING OUT DRUG ID NOT CONSIDERED IN ABX-XYZ CLASSIFICATION
drug_sales_monthly = drug_sales_monthly[
drug_sales_monthly.drug_id.isin(wh_drug_list['drug_id'])]
# Extrapolate current month's sales but with condition
if ss_runtime_var['for_next_month'] == 'Y':
if ss_runtime_var['debug_mode'] == 'Y':
curr_day = pd.to_datetime(reset_date).day - 1
curr_month_days = monthrange(
current_month_date.year, current_month_date.month)[1]
else:
curr_day = datetime.now().day - 1
curr_month_days = monthrange(
current_month_date.year, current_month_date.month)[1]
drug_sales_monthly['net_sales_quantity'] = np.where(
drug_sales_monthly['month_begin_dt'] == str(current_month_date),
round(drug_sales_monthly['net_sales_quantity'] *
curr_month_days / curr_day),
drug_sales_monthly['net_sales_quantity'])
else:
drug_sales_monthly = drug_sales_monthly[
drug_sales_monthly['month_begin_dt'] != str(current_month_date)]
# DATA CHECKS
_ = data_checks(
drug_sales_monthly, wh_drug_list, current_month_date, logger, rs_db)
# FILTERING OUT LENGTH OF TIME SERIES BASED ON FIRST BILL DATE
drug_list = drug_sales_monthly.drug_id.unique()
bill_date_query = '''
select
i."drug-id" as drug_id,
min(date(bi."created-at")) as "first_bill_date"
from
"prod2-generico"."bill-items-1" bi
join "prod2-generico"."inventory-1" i on
i.id = bi."inventory-id"
where
i."drug-id" in {}
group by
i."drug-id"
'''.format(tuple(drug_list) + (0, 0))
bill_date = rs_db.get_df(bill_date_query)
bill_date['first_bill_date'] = pd.to_datetime(bill_date['first_bill_date'])
bill_date['bill_month'] = [
datetime(b_date.year, b_date.month, 1).date()
for b_date in bill_date['first_bill_date']]
# TAKING HISTORY FROM THE POINT FIRST SALE IS MADE
drug_sales_monthly = drug_sales_monthly.merge(
bill_date, how='left', on='drug_id')
assert sum(drug_sales_monthly['first_bill_date'].isna()) == 0
drug_sales_monthly = drug_sales_monthly.query(
'month_begin_dt >= bill_month')
# EXPLORING HISTORY OF DRUGS
drug_history = drug_sales_monthly. \
groupby('drug_id')['net_sales_quantity'].count().reset_index()
drug_history.columns = ['drug_id', 'month_history']
logger.info('Total Drugs' + str(len(drug_history)))
logger.info('History >= 12 months' + str(
len(drug_history.query('month_history >=12'))))
logger.info('History 3-11 months' + str(
len(drug_history.query('month_history < 12').
query('month_history >=3'))))
logger.info('History < 3 months' + str(
len(drug_history.query('month_history < 3'))))
return drug_sales_monthly, wh_drug_list, drug_history, demand_daily_deviation
def get_launch_stock_per_store(rs_db, days, reset_date):
new_stores_list_query = """
select
id as store_id,
date("opened-at") as opened_at
from
"prod2-generico".stores s
where
"opened-at" >= '{reset_date}' - {days}
and "opened-at" <= '{reset_date}'
and id not in (281, 297)
and "franchisee-id" = 1
""".format(reset_date=reset_date, days=days)
new_stores_list = rs_db.get_df(new_stores_list_query)
store_ids_list = tuple(new_stores_list['store_id'].astype(str))+('0','0')
# get shortbook launch orders
sb_orders_query = '''
select
distinct sb."store-id" as store_id,
sb."drug-id" as drug_id,
date(sb."created-at") as created_at,
sb.quantity as ordered_quantity,
date(s2."opened-at") as opened_at
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico".stores s2 on
s2.id = sb."store-id"
where
"store-id" in {store_ids}
and date(sb."created-at") < date(s2."opened-at")
'''.format(store_ids=store_ids_list, days=days)
sb_orders = rs_db.get_df(sb_orders_query)
wh_drug_list = get_product_list(rs_db)
df = sb_orders.copy()
df = df[df['drug_id'].isin(wh_drug_list['drug_id'])]
df = df[['store_id', 'drug_id', 'ordered_quantity']]
df.drop_duplicates(inplace=True)
new_stores_count = sb_orders['store_id'].nunique()
df = df[['drug_id', 'ordered_quantity']]
launch_stock = df.groupby('drug_id').sum().reset_index()
launch_stock_per_store = launch_stock.copy()
launch_stock_per_store['ordered_quantity'] = \
launch_stock['ordered_quantity'] / new_stores_count
launch_stock_per_store.rename(
columns={'ordered_quantity': 'launch_stock_per_store'}, inplace=True)
return launch_stock_per_store | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/data_prep/wh_data_prep.py | wh_data_prep.py |
import numpy as np
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import make_future_df,\
ape_calc, ae_calc
from fbprophet import Prophet
# prophet train
def prophet_train_monthly(
df, n_changepoints_factor=4, changepoint_prior_scale=0.2, growth='linear',
changepoint_range=1, interval_width=0.68, mcmc_samples=0, horizon=3,
out_of_sample=3):
# params
n_changepoints = int(np.round(len(df)/n_changepoints_factor))
# dividing the series into train and validation set
df = df.copy()
df['days'] = df['month_begin_dt'].dt.daysinmonth
df.rename(columns={'month_begin_dt': 'ds', 'net_sales_quantity': 'y'},
inplace=True)
train_df = df.drop(df.tail(out_of_sample).index)
validation_df = df.tail(out_of_sample)
# model building
model = Prophet(
growth=growth,
n_changepoints=n_changepoints,
changepoint_prior_scale=changepoint_prior_scale,
changepoint_range=changepoint_range,
interval_width=interval_width,
mcmc_samples=mcmc_samples,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False)
model.add_seasonality(name='yearly_e', period=365.25, fourier_order=12)
model.add_regressor(name='days', mode='multiplicative')
fit = model.fit(train_df)
validation_fcst = fit.predict(validation_df)[[
'yhat', 'yhat_upper', 'yhat_lower']]
# calculating standard deviation of additive terms and tremd
validation_std = (
validation_fcst['yhat_upper'].values -
validation_fcst['yhat_lower'].values)
# writing to final df
predict_df = validation_df
predict_df['fcst'] = np.round(validation_fcst['yhat'].values)
predict_df['std'] = np.round(validation_std)
# calculating errors
predict_df['ape'] = [
ape_calc(actual, forecast)
for actual, forecast in zip(predict_df['y'], predict_df['fcst'])]
predict_df['ae'] = [
ae_calc(actual, forecast)
for actual, forecast in zip(predict_df['y'], predict_df['fcst'])]
predict_df.rename(columns={'ds': 'month_begin_dt', 'y': 'actual'},
inplace=True)
predict_df.drop('days', axis=1, inplace=True)
return predict_df # , fit
# prophet train
def prophet_predict_monthly(
df, n_changepoints_factor=4, changepoint_prior_scale=0.2, growth='linear',
changepoint_range=1, interval_width=0.68, mcmc_samples=0, horizon=3,
out_of_sample=3):
# params
n_changepoints = int(np.round(len(df)/n_changepoints_factor))
# creating predict df
df = df.copy()
df['days'] = df['month_begin_dt'].dt.daysinmonth
predict_df = make_future_df(df, out_of_sample)
predict_df['days'] = predict_df['month_begin_dt'].dt.daysinmonth
# column name change for prophet
df.rename(columns={'month_begin_dt': 'ds', 'net_sales_quantity': 'y'},
inplace=True)
predict_df.rename(
columns={'month_begin_dt': 'ds', 'net_sales_quantity': 'y'},
inplace=True)
# model building
model = Prophet(
growth=growth,
n_changepoints=n_changepoints,
changepoint_prior_scale=changepoint_prior_scale,
changepoint_range=changepoint_range,
interval_width=interval_width,
mcmc_samples=mcmc_samples,
yearly_seasonality=True,
weekly_seasonality=False,
daily_seasonality=False)
fit = model.fit(df)
forecast = fit.predict(predict_df)[[
'yhat', 'yhat_upper', 'yhat_lower']]
# calculating standard deviation of additive terms and tremd
forecast_std = (
forecast['yhat_upper'].values - forecast['yhat_lower'].values)
# writing to final df
predict_df['fcst'] = np.round(forecast['yhat'].values)
predict_df['std'] = np.round(forecast_std)
predict_df.rename(columns={'ds': 'month_begin_dt', 'y': 'actual'},
inplace=True)
predict_df.drop('days', axis=1, inplace=True)
return predict_df # , fit | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/forecast/prophet.py | prophet.py |
import pandas as pd
from dateutil.relativedelta import relativedelta
from joblib import Parallel, delayed
from multiprocessing import cpu_count
# weekly vs monthly demand pattern
def month_week_plt(monthly_data, weekly_data, drug_id, drug_name, bucket):
week = weekly_data.loc[
weekly_data['drug_id'] == drug_id]
week.rename(columns={'week_begin_dt': 'date'}, inplace=True)
ax = week[['date', 'net_sales_quantity']].set_index('date').plot()
ax.set_title('{} {} {}'.format(drug_id, drug_name, bucket), )
month = monthly_data.loc[
monthly_data['drug_id'] == drug_id]
month.rename(columns={'month_begin_dt': 'date'}, inplace=True)
ax = month[['date', 'net_sales_quantity']].set_index('date').plot()
ax.set_title('{} {} {}'.format(drug_id, drug_name, bucket))
return 0
# make forward looking data frame for forecast
def make_future_df(df, horizon):
df = df.copy()
drug_id = df['drug_id'].values[-1]
# prev_month_dt = df['month_begin_dt'].dt.date.values[-1]
prev_month_dt = pd.to_datetime(df['month_begin_dt'].values[-1])
if horizon == 3:
predict_month_dt = [
prev_month_dt + relativedelta(months=h)
for h in range(1, horizon + 1)]
predict_year = [
(prev_month_dt + relativedelta(months=h)).year
for h in range(1, horizon + 1)]
predict_month = [
(prev_month_dt + relativedelta(months=h)).month
for h in range(1, horizon + 1)]
else:
predict_month_dt = [
prev_month_dt + relativedelta(days=28*h)
for h in range(1, horizon + 1)]
predict_year = [
(prev_month_dt + relativedelta(days=28*h)).year
for h in range(1, horizon + 1)]
predict_month = [
(prev_month_dt + relativedelta(days=28*h)).month
for h in range(1, horizon + 1)]
predict_df = pd.DataFrame()
predict_df['drug_id'] = pd.Series([drug_id] * horizon)
predict_df['month_begin_dt'] = pd.to_datetime(pd.Series(predict_month_dt))
predict_df['year'] = pd.Series(predict_year)
predict_df['month'] = pd.Series(predict_month)
predict_df['fcst'] = 0
return predict_df
# forecast visualisation
def forecast_viz(train, forecast, drug_id, drug_name, bucket, model, k=3):
train = train.copy()
forecast = forecast.copy()
train = train[['drug_id', 'month_begin_dt', 'net_sales_quantity']]
foreacast = forecast[['drug_id', 'month_begin_dt', 'fcst']]
merged = train.merge(
foreacast, how='outer', on=['drug_id', 'month_begin_dt'])
merged.drop('drug_id', axis=1, inplace=True)
ax = merged.set_index('month_begin_dt').plot()
ax.set_title('{} {} {} {}'.format(drug_id, drug_name, bucket, model))
return 0
# parallel thread execution
def apply_parallel_ets(
dfGrouped, func, ets_params, horizon=3, out_of_sample=3):
retLst = Parallel(n_jobs=cpu_count() - 4, verbose=10)(
delayed(func)(
group, ets_params, horizon, out_of_sample)
for name, group in dfGrouped)
return pd.concat(retLst)
def apply_parallel_prophet(
dfGrouped, func, n_changepoints_factor, changepoint_prior_scale,
growth, changepoint_range, interval_width, mcmc_samples, horizon,
out_of_sample):
retLst = Parallel(n_jobs=cpu_count() - 4, verbose=10)(
delayed(func)(
group, n_changepoints_factor, changepoint_prior_scale, growth,
changepoint_range, interval_width, mcmc_samples, horizon,
out_of_sample)
for name, group in dfGrouped)
return pd.concat(retLst) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/forecast/helper_functions.py | helper_functions.py |
import time
import pandas as pd
import numpy as np
from itertools import product
from zeno_etl_libs.utils.warehouse.forecast.errors import train_error, \
train_error_ets_h1
from zeno_etl_libs.utils.warehouse.forecast.moving_average import \
ma_train_monthly, \
ma_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.ets import ets_train_monthly, \
ets_predict_monthly
# from zeno_etl_libs.utils.warehouse.forecast.prophet import prophet_train_monthly,\
# prophet_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.naive import naive_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import \
apply_parallel_ets
# from scripts.ops.warehouse.forecast.\
# helper_functions import apply_parallel_prophet
def wh_forecast(drug_sales_monthly, wh_drug_list, drug_history, logger=None):
"""
Bucketing based on History
1. For drugs with history < 3 months -> Naive
2. For drugs with history 3-11 month -> MA, SES (Simple exponential smoothing)
3. For drugs with history >= 12 months -> MA, ETS (Error, Trend,Seasonality)
"""
# BUCKET BASED ON HISTORY
bucket_h3 = drug_history[drug_history['month_history'] < 3]
bucket_h2minus = drug_history[
(drug_history['month_history'] >= 3) &
(drug_history['month_history'] <= 5)]
bucket_h2 = drug_history[
(drug_history['month_history'] >= 6) &
(drug_history['month_history'] < 12)]
bucket_h1 = drug_history[drug_history['month_history'] >= 12]
bucket_log = '''
Bucket H1 12+ months history - {},
Bucket H2 6-11 months history - {},
Bucket H2- 3-5 months history - {},
Bucket H3 <3 months history - {}'''.format(
len(bucket_h1), len(bucket_h2), len(bucket_h2minus), len(bucket_h3)
)
logger.info(bucket_log)
# SUBSETTING SALE HISTORY DATA FOR BUCKETS
drug_sales_monthly_bucket_h1 = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h1['drug_id'])]
drug_sales_monthly_bucket_h2 = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h2['drug_id'])]
drug_sales_monthly_bucket_h2minus = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h2minus['drug_id'])]
drug_sales_monthly_bucket_h3 = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h3['drug_id'])]
''' H1 bucket - Train and Forecast'''
logger.info(
'Drugs for training' +
str(drug_sales_monthly_bucket_h1.drug_id.nunique()))
# FORECASTING MODULES: MOVING AVERAGES K=3
ma_train_data_h1 = drug_sales_monthly_bucket_h1.copy()
ma_train_data_h1 = ma_train_data_h1[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
# k = 3 # N moving average
horizon = 3 # future forecast
# train
start = time.time()
ma_train_h1 = ma_train_data_h1.groupby('drug_id').apply(ma_train_monthly). \
reset_index(drop=True)
end = time.time()
logger.info('H1 MA Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ma_train_error_h1 = ma_train_h1.groupby('drug_id').apply(train_error). \
reset_index(drop=True)
end = time.time()
logger.info('H1 MA Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ma_predict_h1 = ma_train_data_h1.groupby('drug_id'). \
apply(ma_predict_monthly).reset_index(drop=True)
end = time.time()
logger.info('H1 MA Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# FORECASTING MODULES: EXPONENTIAL SMOOTHING
ets_train_data_h1 = drug_sales_monthly_bucket_h1.copy()
ets_train_data_h1 = ets_train_data_h1[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
horizon = 3 # future forecast
out_of_sample = 3 # out of sample forecast
# holts winter implementation
trend = ['additive', None]
seasonal = ['additive', None]
damped = [True, False]
seasonal_periods = [12]
use_boxcox = [True, False]
ets_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
# train
start = time.time()
ets_train_h1 = apply_parallel_ets(
ets_train_data_h1.groupby('drug_id'), ets_train_monthly,
ets_params).reset_index(drop=True)
end = time.time()
logger.info('H1 ETS Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ets_train_error_h1 = ets_train_h1.groupby('drug_id').apply(
train_error_ets_h1). \
reset_index(drop=True)
end = time.time()
logger.info('H1 ETS Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ets_predict_h1 = apply_parallel_ets(
ets_train_data_h1.groupby('drug_id'), ets_predict_monthly,
ets_train_h1).reset_index(drop=True)
end = time.time()
logger.info('H1 ETS Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
''' # TODO - PROPHET TO BE INTG. LATER
# FORECASTING MODULES: PROPHET
prophet_train_data_h1 = drug_sales_monthly_bucket_h1.copy()
prophet_train_data_h1 = prophet_train_data_h1[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
horizon = 3 # future forecast
# holts winter implementation
n_changepoints_factor = 4
changepoint_prior_scale = 0.2
growth = 'linear'
changepoint_range = 1
interval_width = 0.68
mcmc_samples = 0
# train
start = time.time()
prophet_train_h1 = apply_parallel_prophet(
prophet_train_data_h1.groupby('drug_id'), prophet_train_monthly,
n_changepoints_factor, changepoint_prior_scale, growth,
changepoint_range, interval_width, mcmc_samples, horizon, out_of_sample
).reset_index(drop=True)
end = time.time()
logger.info(
'H1 Prophet Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
prophet_train_error_h1 = prophet_train_h1.groupby('drug_id').\
apply(train_error).reset_index(drop=True)
end = time.time()
logger.info(
'H1 Prophet Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
prophet_predict_h1 = apply_parallel_prophet(
prophet_train_data_h1.groupby('drug_id'), prophet_predict_monthly,
n_changepoints_factor, changepoint_prior_scale, growth,
changepoint_range, interval_width, mcmc_samples, horizon, out_of_sample
).reset_index(drop=True)
end = time.time()
logger.info(
'H1 Prophet Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
'''
# FORECASTING MODULE - ENSEMBLE
# identifying best model for each drug - using MA and ETS
ensemble_error_h1 = ets_train_error_h1.merge(
ma_train_error_h1, how='outer', on='drug_id', suffixes=('_ets', '_ma'))
ensemble_error_h1['model'] = np.where(
ensemble_error_h1['mape_ma'] < ensemble_error_h1['mape_ets'],
'ma', 'ets')
# choosing ma where SS days for ets is crossing 1 month
if ensemble_error_h1.loc[0]['model'] == 'ma':
ensemble_error_h1['ss_days_ets'] = 14.84 * ensemble_error_h1['std'] / \
ensemble_error_h1['actual']
else:
ensemble_error_h1['ss_days_ets'] = 14.84 * ensemble_error_h1['std'] / \
ensemble_error_h1['actual']
ensemble_error_h1['model'] = np.where(ensemble_error_h1['ss_days_ets'] > 28,
'ma', 'ets')
ensemble_error_h1.loc[np.isnan(ensemble_error_h1['std']), 'model'] = 'ma'
del ensemble_error_h1['actual']
del ensemble_error_h1['std']
del ensemble_error_h1['ss_days_ets']
del ets_train_error_h1['actual']
del ets_train_error_h1['std']
ensemble_error_h1['mape'] = np.where(
ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['mape_ma'], ensemble_error_h1['mape_ets'])
ensemble_error_h1['mae'] = np.where(
ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['mae_ma'], ensemble_error_h1['mae_ets'])
# creating ensemble dataframe for best model - MA + ETS
ma_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ma', 'drug_id']
ets_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ets', 'drug_id']
ma_train_best_h1 = ma_train_h1[
ma_train_h1['drug_id'].isin(ma_drug_best_h1)]
ma_predict_best_h1 = ma_predict_h1[
ma_predict_h1['drug_id'].isin(ma_drug_best_h1)]
ma_train_best_h1['model'] = 'ma'
ma_predict_best_h1['model'] = 'ma'
ets_train_best_h1 = ets_train_h1[
ets_train_h1['drug_id'].isin(ets_drug_best_h1)]
ets_predict_best_h1 = ets_predict_h1[
ets_predict_h1['drug_id'].isin(ets_drug_best_h1)]
ets_train_best_h1['model'] = 'ets'
ets_predict_best_h1['model'] = 'ets'
ensemble_train_h1 = pd.concat(
[ma_train_best_h1, ets_train_best_h1], axis=0)
ensemble_predict_h1 = pd.concat(
[ma_predict_best_h1, ets_predict_best_h1], axis=0)
''' # TODO - PROPHET TO BE INTG. LATER
# identifying best model for each drug - using MA, ETS and Prophet
ensemble_error_h1 = ets_train_error_h1.merge(
ma_train_error_h1, how='outer', on='drug_id',
suffixes=('_ets', '_ma')).merge(
prophet_train_error_h1, how='outer', on='drug_id',
suffixes=('', '_prophet'))
ensemble_error_h1.columns = [
'drug_id', 'mae_ets', 'mape_ets', 'mae_ma', 'mape_ma',
'mae_prophet', 'mape_prophet']
ensemble_error_h1['model'] = np.select(
[(ensemble_error_h1['mape_ma'] < ensemble_error_h1['mape_ets']) &
(ensemble_error_h1['mape_ma'] < ensemble_error_h1['mape_prophet']),
(ensemble_error_h1['mape_ets'] < ensemble_error_h1['mape_ma']) &
(ensemble_error_h1['mape_ets'] < ensemble_error_h1['mape_prophet']),
(ensemble_error_h1['mape_prophet'] < ensemble_error_h1['mape_ma']) &
(ensemble_error_h1['mape_prophet'] < ensemble_error_h1['mape_ets'])],
['ma', 'ets', 'prophet'], default='ets')
ensemble_error_h1['mape'] = np.select(
[ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['model'] == 'ets',
ensemble_error_h1['model'] == 'prophet'],
[ensemble_error_h1['mape_ma'],
ensemble_error_h1['mape_ets'],
ensemble_error_h1['mape_prophet']],
default=ensemble_error_h1['mape_ets'])
ensemble_error_h1['mae'] = np.select(
[ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['model'] == 'ets',
ensemble_error_h1['model'] == 'prophet'],
[ensemble_error_h1['mae_ma'],
ensemble_error_h1['mae_ets'],
ensemble_error_h1['mae_prophet']],
default=ensemble_error_h1['mae_ets'])
# creating ensemble dataframe for best model - MA + ETS + Prophet
ma_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ma', 'drug_id']
ets_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ets', 'drug_id']
prophet_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'prophet', 'drug_id']
ma_train_best_h1 = ma_train[
ma_train_h1['drug_id'].isin(ma_drug_best_h1)]
ma_predict_best_h1 = ma_predict[
ma_predict['drug_id'].isin(ma_drug_best_h1)]
ma_train_best_h1['model'] = 'ma'
ma_predict_best_h1['model'] = 'ma'
ets_train_best_h1 = ets_train_h1[
ets_train_h1['drug_id'].isin(ets_drug_best_h1)]
ets_predict_best_h1 = ets_predict_h1[
ets_predict_h1['drug_id'].isin(ets_drug_best_h1)]
ets_train_best_h1['model'] = 'ets'
ets_predict_best_h1['model'] = 'ets'
prophet_train_best_h1 = prophet_train_h1[
prophet_train_h1['drug_id'].isin(prophet_drug_best_h1)]
prophet_predict_best_h1 = prophet_predict_h1[
prophet_predict_h1['drug_id'].isin(prophet_drug_best_h1)]
prophet_train_best_h1['model'] = 'prophet'
prophet_predict_best_h1['model'] = 'prophet'
ensemble_train_h1 = pd.concat(
[ma_train_best_h1, ets_train_best_h1, prophet_train_best_h1], axis=0)
ensemble_predict_h1 = pd.concat(
[ma_predict_best_h1, ets_predict_best_h1, prophet_predict_best_h1],
axis=0)
'''
# H1 BUCKET AGGREGATING
ma_train_h1['model'] = 'ma'
ma_train_h1['history_bucket'] = 'H1'
ets_train_h1['model'] = 'ets'
ets_train_h1['history_bucket'] = 'H1'
ma_train_error_h1['model'] = 'ma'
ma_train_error_h1['history_bucket'] = 'H1'
ets_train_error_h1['model'] = 'ets'
ets_train_error_h1['history_bucket'] = 'H1'
ma_predict_h1['model'] = 'ma'
ma_predict_h1['history_bucket'] = 'H1'
ets_predict_h1['model'] = 'ets'
ets_predict_h1['history_bucket'] = 'H1'
train_h1 = pd.concat([ma_train_h1, ets_train_h1], axis=0)
train_error_h1 = pd.concat([ma_train_error_h1, ets_train_error_h1], axis=0)
predict_h1 = pd.concat([ma_predict_h1, ets_predict_h1], axis=0)
train_h1['forecast_type'] = 'train'
train_h1['final_fcst'] = 'N'
train_error_h1['forecast_type'] = 'train'
train_error_h1['final_fcst'] = 'N'
predict_h1['forecast_type'] = 'forecast'
predict_h1['final_fcst'] = 'N'
ensemble_train_h1['forecast_type'] = 'train'
ensemble_train_h1['final_fcst'] = 'Y'
ensemble_train_h1['history_bucket'] = 'H1'
ensemble_error_h1['forecast_type'] = 'train'
ensemble_error_h1['final_fcst'] = 'Y'
ensemble_error_h1['history_bucket'] = 'H1'
ensemble_predict_h1['forecast_type'] = 'forecast'
ensemble_predict_h1['final_fcst'] = 'Y'
ensemble_predict_h1['history_bucket'] = 'H1'
''' H2/H2- bucket - Train and Forecast'''
logger.info(
'Drugs for training' +
str(drug_sales_monthly_bucket_h2.drug_id.nunique()))
# FORECASTING MODULES: MOVING AVERAGES K=3
ma_train_data_h2 = drug_sales_monthly_bucket_h2.copy()
ma_train_data_h2 = ma_train_data_h2[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
horizon = 3 # future forecast
# train
start = time.time()
ma_train_h2 = ma_train_data_h2.groupby('drug_id').apply(ma_train_monthly). \
reset_index(drop=True)
end = time.time()
logger.info('H2 MA Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ma_train_error_h2 = ma_train_h2.groupby('drug_id').apply(train_error). \
reset_index(drop=True)
end = time.time()
logger.info('H2 MA Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ma_predict_h2 = ma_train_data_h2.groupby('drug_id'). \
apply(ma_predict_monthly).reset_index(drop=True)
end = time.time()
logger.info('H2 MA Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# FORECASTING MODULES: SIMPLE EXPONENTIAL SMOOTHING
ses_train_data_h2 = drug_sales_monthly_bucket_h2.copy()
ses_train_data_h2 = ses_train_data_h2[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# variables
horizon = 3 # future forecast
out_of_sample = 3 # out of sample forecast
# ses implementation
trend = [None]
seasonal = [None]
damped = [False]
seasonal_periods = [12]
use_boxcox = [False]
ses_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
# train
start = time.time()
ses_train_h2 = apply_parallel_ets(
ses_train_data_h2.groupby('drug_id'), ets_train_monthly, ses_params
).reset_index(drop=True)
end = time.time()
logger.info('H2 ETS Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ses_train_error_h2 = ses_train_h2.groupby('drug_id').apply(train_error). \
reset_index(drop=True)
end = time.time()
logger.info('H2 ETS Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ses_predict_h2 = apply_parallel_ets(
ses_train_data_h2.groupby('drug_id'), ets_predict_monthly,
ses_train_h2).reset_index(drop=True)
end = time.time()
logger.info('H2 ETS Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# FORECASTING MODULE - ENSEMBLE
# identifying best model for each drug - using MA and SES
ensemble_error_h2 = ses_train_error_h2.merge(
ma_train_error_h2, how='outer', on='drug_id', suffixes=('_ses', '_ma'))
ensemble_error_h2['model'] = np.where(
ensemble_error_h2['mape_ma'] < ensemble_error_h2['mape_ses'],
'ma', 'ses')
ensemble_error_h2['mape'] = np.where(
ensemble_error_h2['model'] == 'ma',
ensemble_error_h2['mape_ma'], ensemble_error_h2['mape_ses'])
ensemble_error_h2['mae'] = np.where(
ensemble_error_h2['model'] == 'ma',
ensemble_error_h2['mae_ma'], ensemble_error_h2['mae_ses'])
# creating ensemble dataframe for best_h2 model - MA + ses
ma_drug_best_h2 = ensemble_error_h2.loc[
ensemble_error_h2['model'] == 'ma', 'drug_id']
ses_drug_best_h2 = ensemble_error_h2.loc[
ensemble_error_h2['model'] == 'ses', 'drug_id']
ma_train_best_h2 = ma_train_h2[
ma_train_h2['drug_id'].isin(ma_drug_best_h2)]
ma_predict_best_h2 = ma_predict_h2[
ma_predict_h2['drug_id'].isin(ma_drug_best_h2)]
ma_train_best_h2['model'] = 'ma'
ma_predict_best_h2['model'] = 'ma'
ses_train_best_h2 = ses_train_h2[
ses_train_h2['drug_id'].isin(ses_drug_best_h2)]
ses_predict_best_h2 = ses_predict_h2[
ses_predict_h2['drug_id'].isin(ses_drug_best_h2)]
ses_train_best_h2['model'] = 'ses'
ses_predict_best_h2['model'] = 'ses'
ensemble_train_h2 = pd.concat(
[ma_train_best_h2, ses_train_best_h2], axis=0)
# getting best model for H2- bucket
ensemble_model_agg = ensemble_error_h2.groupby('model')['drug_id']. \
count().reset_index()
ensemble_model_best_h2 = ensemble_model_agg.loc[
ensemble_model_agg['drug_id'] == ensemble_model_agg['drug_id'].max(),
'model'].values[0]
logger.info('Best model for H2 forecast' + ensemble_model_best_h2)
# H2 minus bucket predic based on the best_h2 model overall
train_data_h2minus = drug_sales_monthly_bucket_h2minus.copy()
predict_h2minus = pd.DataFrame()
start = time.time()
if ensemble_model_best_h2 == 'ses' and len(drug_sales_monthly_bucket_h2minus):
start = time.time()
train_data_h2minus['hyper_params'] = str(ses_params[0])
predict_h2minus = apply_parallel_ets(
train_data_h2minus.groupby('drug_id'), ets_predict_monthly,
train_data_h2minus). \
reset_index(drop=True)
if ensemble_model_best_h2 == 'ma':
start = time.time()
predict_h2minus = train_data_h2minus.groupby('drug_id'). \
apply(ma_predict_monthly).reset_index(drop=True)
predict_h2minus['model'] = ensemble_model_best_h2
end = time.time()
logger.info(
'H2 Minus Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
ensemble_predict_h2 = pd.concat(
[ma_predict_best_h2, ses_predict_best_h2, predict_h2minus], axis=0)
# H2 BUCKET AGGREGATING
ma_train_h2['model'] = 'ma'
ma_train_h2['history_bucket'] = 'H2'
ses_train_h2['model'] = 'ses'
ses_train_h2['history_bucket'] = 'H2'
ma_train_error_h2['model'] = 'ma'
ma_train_error_h2['history_bucket'] = 'H2'
ses_train_error_h2['model'] = 'ses'
ses_train_error_h2['history_bucket'] = 'H2'
ma_predict_h2['model'] = 'ma'
ma_predict_h2['history_bucket'] = 'H2'
ses_predict_h2['model'] = 'ses'
ses_predict_h2['history_bucket'] = 'H2'
train_h2 = pd.concat([ma_train_h2, ses_train_h2], axis=0)
train_error_h2 = pd.concat([ma_train_error_h2, ses_train_error_h2], axis=0)
predict_h2 = pd.concat([ma_predict_h2, ses_predict_h2], axis=0)
train_h2['forecast_type'] = 'train'
train_h2['final_fcst'] = 'N'
train_error_h2['forecast_type'] = 'train'
train_error_h2['final_fcst'] = 'N'
predict_h2['forecast_type'] = 'forecast'
predict_h2['final_fcst'] = 'N'
ensemble_train_h2['forecast_type'] = 'train'
ensemble_train_h2['final_fcst'] = 'Y'
ensemble_train_h2['history_bucket'] = 'H2'
ensemble_error_h2['forecast_type'] = 'train'
ensemble_error_h2['final_fcst'] = 'Y'
ensemble_error_h2['history_bucket'] = 'H2'
ensemble_predict_h2['forecast_type'] = 'forecast'
ensemble_predict_h2['final_fcst'] = 'Y'
ensemble_predict_h2['history_bucket'] = 'H2'
''' H3- bucket - Train and Forecast'''
logger.info(
'Drugs for training' +
str(drug_sales_monthly_bucket_h2.drug_id.nunique()))
# FORECASTING MODULES: NAIVE
naive_train_data_h3 = drug_sales_monthly_bucket_h3.copy()
naive_train_data_h3 = naive_train_data_h3[[
'drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# predict
start = time.time()
naive_predict_h3 = naive_train_data_h3.groupby('drug_id'). \
apply(naive_predict_monthly, horizon).reset_index(drop=True)
end = time.time()
logger.info(
'H3 Naive Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# H3 BUCKET AGGREGATING
naive_predict_h3['model'] = 'naive'
naive_predict_h3['history_bucket'] = 'H3'
predict_h3 = naive_predict_h3.copy()
predict_h3['forecast_type'] = 'forecast'
predict_h3['final_fcst'] = 'N'
ensemble_predict_h3 = naive_predict_h3.copy()
ensemble_predict_h3['forecast_type'] = 'forecast'
ensemble_predict_h3['final_fcst'] = 'Y'
''' AGG. TRAIN/ERROR/FORECAST TABLES '''
train = pd.concat([train_h1, train_h2], axis=0)
error = pd.concat([train_error_h1, train_error_h2], axis=0)
predict = pd.concat([predict_h1, predict_h2, predict_h3], axis=0)
ensemble_train = pd.concat([ensemble_train_h1, ensemble_train_h2], axis=0)
ensemble_error = pd.concat([ensemble_error_h1, ensemble_error_h2], axis=0)
ensemble_predict = pd.concat(
[ensemble_predict_h1, ensemble_predict_h2, ensemble_predict_h3],
axis=0)
# Letting code to not fail when h3 bucket is empty
if 'net_sales_quantity' in predict.columns:
del predict['net_sales_quantity']
if 'net_sales_quantity' in ensemble_predict.columns:
del ensemble_predict['net_sales_quantity']
# converting data to str objection
train['month_begin_dt'] = train['month_begin_dt']. \
dt.date.astype(str)
predict['month_begin_dt'] = predict['month_begin_dt']. \
dt.date.astype(str)
ensemble_train['month_begin_dt'] = ensemble_train['month_begin_dt']. \
dt.date.astype(str)
ensemble_predict['month_begin_dt'] = ensemble_predict['month_begin_dt']. \
dt.date.astype(str)
return train, error, predict, ensemble_train, ensemble_error, \
ensemble_predict | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/forecast/forecast_main.py | forecast_main.py |
import numpy as np
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import make_future_df
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc
from statsmodels.tsa.holtwinters import ExponentialSmoothing
# ets train
def ets_train_monthly(df, ets_params, horizon=3, out_of_sample=3, logger=None):
train = df.copy()
train.drop(train.tail(out_of_sample).index, inplace=True)
# dividing the series into train and validation set
drug_id = train['drug_id'].values[0]
input_series = train['net_sales_quantity'].values
validation = df['net_sales_quantity'].tail(out_of_sample).values
# creating dummy best fit param and fit values
best_fit = None
best_fit_params = [None, None, False, None, False]
best_accuracy = np.inf
best_ape = [0]*horizon
best_ae = [0]*horizon
# best_fittedvalues = [0]*len(train)
# best_fcastvalues = [0]*horizon
# running a loop for grid search
for params in ets_params:
trend, seasonal, damped, seasonal_periods, use_boxcox = params
try:
ape = []
ae = []
# model fitting
model = ExponentialSmoothing(
input_series, trend=trend, seasonal=seasonal, damped=damped,
seasonal_periods=seasonal_periods, use_boxcox=use_boxcox)
fit = model.fit(optimized=True)
# accuracy parameter can be - aic, bic, sse or mape
forecast = np.round(fit.forecast(horizon))
# print(forecast)
ape = [
ape_calc(actual, forecast)
for actual, forecast in zip(validation, forecast)]
ae = [
ae_calc(actual, forecast)
for actual, forecast in zip(validation, forecast)]
fit_mape = np.mean(ape)
# fit_mae = np.mean(ae)
# fitted_values = fit.fittedvalues
# identifying the best fit params
if (fit_mape <= best_accuracy) & (fit_mape != -np.inf):
best_fit = fit
best_fit_params = params
best_accuracy = fit_mape
best_ape = ape
best_ae = ae
# best_fittedvalues = fitted_values
best_forecast = forecast
except Exception as error:
# print(params,':', error)
error_str = '''Drug {} Params {} Error: {}'''.format(
drug_id, str(params), error)
# logger.info(error_str)
pass
# creating out of output dataset
predict_df = make_future_df(train, out_of_sample)
# getting forecast deviation sigma = sse*(1 + alpha^2(h-1))/n holts methods
alpha = best_fit.params['smoothing_level']
std = np.round(
np.sqrt(best_fit.sse*(1 + alpha * alpha * (horizon-1)) /
len(best_fit.fittedvalues)))
predict_df['fcst'] = best_forecast
predict_df['std'] = std
predict_df['actual'] = validation
# model variables
predict_df['ape'] = best_ape
predict_df['ae'] = best_ae
predict_df['hyper_params'] = str(best_fit_params)
return predict_df
# ets predict
def ets_predict_monthly(df, ets_train, horizon=3, out_of_sample=3):
df = df.copy()
print("running for drug_id --> " + str(df['drug_id'].unique()[0]))
fit_params = ets_train[ets_train['drug_id']==df['drug_id'].unique()[0]]
fit_params = tuple(eval(fit_params['hyper_params'].values[0]))
series = df['net_sales_quantity'].values
# getting best fit params for forecast
trend, seasonal, damped, seasonal_periods, use_boxcox = fit_params
# creating model instance
try:
model = ExponentialSmoothing(
series, trend=trend, seasonal=seasonal, damped=damped,
seasonal_periods=seasonal_periods, use_boxcox=use_boxcox)
fit = model.fit(optimized=True)
if np.isnan(fit.sse) == True or fit.forecast(horizon)[0] < 0 or \
(series[-1] > 0 and fit.forecast(horizon)[0] > 0 and
(0.33 > series[-1]/fit.forecast(horizon)[0] or
series[-1]/fit.forecast(horizon)[0] > 3)):
raise Exception(
'ets hyperparams giving outliers for drug_id: ' \
+ str(df['drug_id'].unique()[0]) + \
' running model with default params')
except Exception as error:
model = ExponentialSmoothing(
series, trend=None, seasonal=None, damped=False,
seasonal_periods=seasonal_periods, use_boxcox=False)
fit = model.fit(optimized=True)
print(error)
# creating out of output dataset
predict_df = make_future_df(df, horizon)
predict_df['fcst'] = np.round(fit.forecast(horizon))
# getting forecast deviation sigma = sse*(1 + alpha^2(h-1))/n holts methods
alpha = fit.params['smoothing_level']
std = np.round(
np.sqrt(fit.sse*(1 + alpha * alpha * (horizon-1)) /
len(fit.fittedvalues)))
predict_df['std'] = std
return predict_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/forecast/ets.py | ets.py |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# absolute percentage error calculation
def ape_calc(actual, forecast):
if (actual == 0) & (forecast == 0):
ape = 0
elif forecast == 0:
ape = 1
elif actual == 0:
ape = 1
else:
ape = abs((forecast - actual)/actual)
return ape
# abs error calculation
def ae_calc(actual, forecast):
if (actual == 0) & (forecast == 0):
ae = 0
elif forecast == 0:
ae = actual
elif actual == 0:
ae = forecast
else:
ae = abs(forecast - actual)
return ae
# weighted mape calculation
def wmape(actual, forecast):
wmape = sum(abs(forecast-actual))/sum(actual)
return round(100*wmape, 1)
# avg mape, ape for the forecast horizon
def train_error(train_df):
drug_id = train_df['drug_id'].values[-1]
mae = np.mean(train_df['ae'])
mape = np.mean(train_df['ape'])
return pd.DataFrame(
[[drug_id, mae, mape]], columns=['drug_id', 'mae', 'mape'])
def train_error_ets_h1(train_df):
drug_id = train_df['drug_id'].values[-1]
mae = np.mean(train_df['ae'])
mape = np.mean(train_df['ape'])
actual = np.mean(train_df['actual'])
std = np.mean(train_df['std'])
return pd.DataFrame(
[[drug_id, mae, mape, actual, std]], columns=['drug_id', 'mae', 'mape', 'actual', 'std'])
# error reporting overall
def error_report(error_df, wh_drug_list, drug_history):
print('MAE and MAPE error')
error_df = error_df.copy()
error_df['mape'] = np.round(error_df['mape'] * 100, 1)
print(np.round(error_df.mae.mean()), error_df.mape.mean(), '\n')
print('MAPE describe')
print(error_df['mape'].describe(), '\n')
print('MAPE Plots')
fig, ax = plt.subplots()
error_df['mape'].hist(ax=ax, bins=100, bottom=0.05)
ax.set_yscale('log')
ax.set_ylabel('# of SKUs')
ax.set_xlabel('MAPE')
print(' ', '\n')
print('MAPE where error % > 100%')
print(error_df.query('mape > 1').sort_values('mape')['mape'].mean(), '\n')
print('MAE describe')
print(error_df['mae'].describe(), '\n')
print('MAE Plots')
fig, ax = plt.subplots()
error_df['mae'].hist(ax=ax, bins=100, bottom=0.05)
ax.set_ylabel('# of SKUs')
ax.set_xlabel('MAE')
ax.set_yscale('log')
print('ERROR MAPPING WITH BUCKETS AND HISTORY')
error_bucket = error_df.merge(
wh_drug_list[['drug_id', 'bucket']], on='drug_id').\
merge(drug_history, on='drug_id')
fig, ax = plt.subplots()
error_bucket.groupby('month_history')['mape'].mean().plot()
ax.set_ylabel('MAPE')
ax.set_xlabel('Available history')
print(error_bucket.groupby('bucket')['mape'].mean())
return 0
def error_report_monthly(train_data, wh_drug_list, drug_history):
train_data = train_data.copy()
train_data['ape'] = np.round(train_data['ape'] * 100, 1)
train_data['out_month'] = train_data.\
groupby('drug_id')['month_begin_dt'].rank()
print('MAE and MAPE error')
print(
train_data.groupby('out_month')['ape'].mean(),
train_data.groupby('out_month')['ae'].mean())
print('MAPE describe')
print(train_data.groupby('out_month')['ape'].describe(), '\n')
print('MAPE Plots')
for month in train_data['out_month'].unique():
train_data_month = train_data[train_data['out_month'] == month]
fig, ax = plt.subplots()
train_data_month['ape'].hist(ax=ax, bins=100, bottom=0.05)
plt.title('MAPE: Month out {}'.format(month))
ax.set_yscale('log')
ax.set_ylabel('# of SKUs')
ax.set_xlabel('APE')
print(' ', '\n')
print('MAPE where error % > 100%')
print(train_data.query('ape > 1').groupby('out_month')['ape'].mean(), '\n')
print('MAE describe')
print(train_data.groupby('out_month')['ae'].describe(), '\n')
print('MAE Plots')
for month in train_data['out_month'].unique():
train_data_month = train_data[train_data['out_month'] == month]
fig, ax = plt.subplots()
train_data_month['ae'].hist(ax=ax, bins=100, bottom=0.05)
plt.title('MAE: Month out {}'.format(month))
ax.set_yscale('log')
ax.set_yscale('log')
ax.set_ylabel('# of SKUs')
ax.set_xlabel('AE')
print(' ', '\n')
print('ERROR MAPPING WITH BUCKETS AND HISTORY')
train_bucket = train_data.merge(
wh_drug_list[['drug_id', 'bucket']], on='drug_id').\
merge(drug_history, on='drug_id')
fig, ax = plt.subplots()
colors = {1: 'red', 2: 'green', 3: 'blue'}
for month in train_bucket['out_month'].unique():
train_bucket_month = train_bucket[train_bucket['out_month'] == month]
train_bucket_month.groupby('month_history')['ape'].mean().\
plot(color=colors[month], title='APE: Month out {}'.format(month),
label=month)
print('APE: Month out {}'.format(month))
print(train_bucket_month.groupby('bucket')['ape'].mean())
plt.title('APE: Month out vs Data history' + str(colors))
ax.set_ylabel('APE')
ax.set_xlabel('Available history')
return 0
# weigheted mape report
def wmape_report(train_data, wh_drug_list, drug_history):
train_data = train_data.copy()
train_data['out_month'] = train_data.\
groupby('drug_id')['month_begin_dt'].rank()
print('wMAPE', wmape(train_data['actual'], train_data['fcst']))
print('Month out wMAPE', train_data.groupby('out_month').
apply(lambda row: wmape(row['actual'], row['fcst'])))
train_bucket = train_data.merge(
wh_drug_list[['drug_id', 'bucket']], on='drug_id').\
merge(drug_history, on='drug_id')
print('Bucket out wMAPE', train_bucket.groupby('bucket').
apply(lambda row: wmape(row['actual'], row['fcst'])))
print('Bucket out 1st Month wMAPE', train_bucket.query('out_month == 1').
groupby('bucket').apply(
lambda row: wmape(row['actual'], row['fcst'])))
return 0 | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/warehouse/forecast/errors.py | errors.py |
import pandas as pd
import numpy as np
import datetime as dt
def ranking_override_dc(features_rank, db, read_schema, logger,
override_type_list=['AS/MS']):
# Get distributor preference list
current_date = dt.date.today().strftime('%Y-%m-%d')
q_preference = """
select "dc-id", "drug-id", "distributor-preference", "distributor-id"
from "{read_schema}"."distributor-ranking-preference"
where "is-active" = 1
and "start-date" < '{0}'
and "end-date" > '{0}'
and "dc-id" is not null
""".format(current_date, read_schema=read_schema)
rank_override = db.get_df(q_preference)
if rank_override.shape[0] != 0:
# Manual rank override logic starts
logger.info(f"Overriding for {override_type_list}")
original_shape = features_rank.shape
features_rank = features_rank.reset_index(drop=True)
rank_override = rank_override.dropna()
rank_override = rank_override.drop_duplicates()
rank_override = rank_override.sort_values(
['dc_id', 'drug_id', 'distributor_preference', 'distributor_id'],
ascending=[True, True, True, True]).reset_index(drop=True)
rank_override_grp = rank_override.groupby(["dc_id", "drug_id"],
as_index=False).agg(
{"distributor_id": dist_order})
rank_override_grp.rename({"distributor_id": "override_dist_order"}, axis=1,
inplace=True)
df_merged = features_rank.merge(rank_override_grp, on=["dc_id", "drug_id"],
how="left")
df_rank_override = df_merged.loc[~df_merged["override_dist_order"].isna()]
df_rank_override = df_rank_override.loc[
df_rank_override["request_type"].isin(override_type_list)]
index_to_drop = df_rank_override.index.values.tolist()
features_rank = features_rank.drop(index_to_drop)
logger.info(f"Number of rows to update ranks: {original_shape[0]-features_rank.shape[0]}")
df_rank_override["final_dist_1"] = df_rank_override["final_dist_1"].fillna(0)
df_rank_override["final_dist_2"] = df_rank_override["final_dist_2"].fillna(0)
df_rank_override["final_dist_3"] = df_rank_override["final_dist_3"].fillna(0)
dist_1 = np.array(df_rank_override["final_dist_1"].astype(int))
dist_2 = np.array(df_rank_override["final_dist_2"].astype(int))
dist_3 = np.array(df_rank_override["final_dist_3"].astype(int))
stacked_dist = np.stack((dist_1, dist_2, dist_3), axis=-1)
df_rank_override["prev_dist_order"] = list(stacked_dist)
order_list = []
for index, row in df_rank_override.iterrows():
eval_string = str(row["override_dist_order"]) + "+" + str(list(row["prev_dist_order"]))
order_list.append(str(eval(eval_string)[:3]).replace('[', '').replace(']', '').replace(' ', ''))
df_rank_override["final_order"] = order_list
df_final_order = df_rank_override['final_order'].str.split(pat=',', expand=True).rename(
columns={0: 'final_dist_1',
1: 'final_dist_2',
2: 'final_dist_3'})
df_final_order["final_dist_1"] = df_final_order["final_dist_1"].astype(int)
df_final_order["final_dist_2"] = df_final_order["final_dist_2"].astype(int)
df_final_order["final_dist_3"] = df_final_order["final_dist_3"].astype(int)
df_final_order = df_final_order.replace({0: np.nan})
df_rank_override["final_dist_1"] = df_final_order["final_dist_1"]
df_rank_override["final_dist_2"] = df_final_order["final_dist_2"]
df_rank_override["final_dist_3"] = df_final_order["final_dist_3"]
df_rank_override.drop(["override_dist_order", "prev_dist_order", "final_order"],
axis=1, inplace=True)
features_rank = features_rank.append(df_rank_override)
features_rank.sort_index(ascending=True, inplace=True)
assert features_rank.shape == original_shape
else:
logger.info("Skipping..: no rank preferences present in table")
return features_rank
def ranking_override_franchisee(features_rank, db, read_schema, logger,
override_type_list=['AS/MS', 'PR']):
# Get distributor preference list
current_date = dt.date.today().strftime('%Y-%m-%d')
q_preference = """
select "dc-id", "drug-id", "distributor-preference", "distributor-id"
from "{read_schema}"."distributor-ranking-preference"
where "is-active" = 1
and start_date < '{0}'
and end_date > '{0}'
and "store-id" is not null
""".format(current_date, read_schema=read_schema)
rank_override = db.get_df(q_preference)
if rank_override.shape[0] != 0:
# Manual rank override logic starts
logger.info(f"Overriding for {override_type_list}")
original_shape = features_rank.shape
features_rank = features_rank.reset_index(drop=True)
rank_override = rank_override.dropna()
rank_override = rank_override.drop_duplicates()
rank_override = rank_override.sort_values(
['store_id', 'drug_id', 'distributor_preference', 'distributor_id'],
ascending=[True, True, True, True]).reset_index(drop=True)
rank_override_grp = rank_override.groupby(["store_id", "drug_id"],
as_index=False).agg(
{"distributor_id": dist_order})
rank_override_grp.rename({"distributor_id": "override_dist_order"}, axis=1,
inplace=True)
df_merged = features_rank.merge(rank_override_grp, on=["store_id", "drug_id"],
how="left")
df_rank_override = df_merged.loc[~df_merged["override_dist_order"].isna()]
df_rank_override = df_rank_override.loc[
df_rank_override["request_type"].isin(override_type_list)]
index_to_drop = df_rank_override.index.values.tolist()
features_rank = features_rank.drop(index_to_drop)
logger.info(f"Number of rows to update ranks: {original_shape[0]-features_rank.shape[0]}")
df_rank_override["final_dist_1"] = df_rank_override["final_dist_1"].fillna(0)
df_rank_override["final_dist_2"] = df_rank_override["final_dist_2"].fillna(0)
df_rank_override["final_dist_3"] = df_rank_override["final_dist_3"].fillna(0)
dist_1 = np.array(df_rank_override["final_dist_1"].astype(int))
dist_2 = np.array(df_rank_override["final_dist_2"].astype(int))
dist_3 = np.array(df_rank_override["final_dist_3"].astype(int))
stacked_dist = np.stack((dist_1, dist_2, dist_3), axis=-1)
df_rank_override["prev_dist_order"] = list(stacked_dist)
order_list = []
for index, row in df_rank_override.iterrows():
eval_string = str(row["override_dist_order"]) + "+" + str(list(row["prev_dist_order"]))
order_list.append(str(eval(eval_string)[:3]).replace('[', '').replace(']', '').replace(' ', ''))
df_rank_override["final_order"] = order_list
df_final_order = df_rank_override['final_order'].str.split(pat=',', expand=True).rename(
columns={0: 'final_dist_1',
1: 'final_dist_2',
2: 'final_dist_3'})
df_final_order["final_dist_1"] = df_final_order["final_dist_1"].astype(int)
df_final_order["final_dist_2"] = df_final_order["final_dist_2"].astype(int)
df_final_order["final_dist_3"] = df_final_order["final_dist_3"].astype(int)
df_final_order = df_final_order.replace({0: np.nan})
df_rank_override["final_dist_1"] = df_final_order["final_dist_1"]
df_rank_override["final_dist_2"] = df_final_order["final_dist_2"]
df_rank_override["final_dist_3"] = df_final_order["final_dist_3"]
df_rank_override.drop(["override_dist_order", "prev_dist_order", "final_order"],
axis=1, inplace=True)
features_rank = features_rank.append(df_rank_override)
features_rank.sort_index(ascending=True, inplace=True)
assert features_rank.shape == original_shape
else:
logger.info("Skipping..: no rank preferences present in table")
return features_rank
def dist_order(pd_arr):
"""To arrange in preference order and avoid duplication"""
pd_arr = list(pd_arr)
dist_list = [i for n, i in enumerate(pd_arr) if i not in pd_arr[:n]]
return dist_list[:3] | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/ranking_intervention.py | ranking_intervention.py |
import pandas as pd
import numpy as np
from sklearn import preprocessing
def apply_topsis(features, x_train, weights, cutoff_percentage, volume_fraction):
''' cutoff percentage is cutoff for determining whether a distributor is low volume or not'''
scaler = preprocessing.MinMaxScaler()
# normalize features
x_normalized = pd.DataFrame(
scaler.fit_transform(x_train), columns=x_train.columns)
# multiply with normalized weights here.
x_weighted = np.multiply(x_normalized, weights)
# merge drug id, dist id and dc id for reference
x_weighted = pd.merge(features[['drug_id', 'partial_distributor_id', 'partial_dc_id']],
x_weighted, left_index=True, right_index=True, how='inner')
# define ideal best vector here
ideal_best = x_weighted.agg({'lead_time': 'min', 'margin': 'max', 'bounce_rate': 'min',
'ff': 'max',
'lost_recency': 'max',
'success_recency': 'min'}).reset_index()
ideal_best = ideal_best.set_index(
'index').rename(columns={0: 'ideal_best'})
# define ideal worse vector here.
ideal_worse = x_weighted.agg({'lead_time': 'max', 'margin':'min',
'bounce_rate': 'max',
'ff': 'min',
'lost_recency': 'min',
'success_recency': 'max'}).reset_index()
ideal_worse = ideal_worse.set_index(
'index').rename(columns={0: 'ideal_worse'})
x_weighted_best = pd.merge(x_weighted.T, ideal_best,
how='left', left_index=True, right_index=True).T
x_weighted_worse = pd.merge(x_weighted.T, ideal_worse,
how='left', left_index=True, right_index=True).T
# euclidean distance with ideal worse is calculated here.
ideal_worse_ed = x_weighted_worse[['lead_time', 'margin', 'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(lambda x: np.linalg.norm(x.values - ideal_worse['ideal_worse'].values), axis=1)
ideal_worse_ed = pd.DataFrame(ideal_worse_ed, columns=['ideal_worse_ed'])
# euclidean distance with ideal best is calculated here.
ideal_best_ed = x_weighted_best[['lead_time', 'margin',
'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(lambda x: np.linalg.norm(x.values - ideal_best['ideal_best'].values), axis=1)
ideal_best_ed = pd.DataFrame(ideal_best_ed, columns=['ideal_best_ed'])
# append ideal worse euclidean distance here.
x_eval = pd.merge(x_weighted, ideal_worse_ed, how='left',
left_index=True, right_index=True)
# append ideal best euclidean distance here.
x_eval = pd.merge(x_eval, ideal_best_ed, how='left',
left_index=True, right_index=True)
x_eval['performance'] = (x_eval['ideal_worse_ed'] /
(x_eval['ideal_worse_ed'] + x_eval['ideal_best_ed'])) * 100
x_rank = x_eval.copy()
x_rank['rank'] = x_rank.groupby(['partial_dc_id', 'drug_id'])[
'performance'].rank(method='first', ascending=False)
x_rank['rank'] = x_rank['rank'].astype(int)
#################heuristics #############
features_rank = pd.merge(features,
x_rank[['drug_id', 'partial_distributor_id',
'partial_dc_id', 'performance', 'rank']],
how='outer', validate='one_to_one')
# add filter for low volume distributor exclusion for heuristic substitute
volume = features_rank.groupby(['partial_dc_id', 'partial_distributor_id',
'partial_distributor_name']).agg(
total_requests=('total_requests', 'sum'))
small_dist = volume.copy()
cutoff = max(small_dist['total_requests']) * cutoff_percentage
print(max(small_dist))
print('low volumne cutoff: ', cutoff)
small_dist['is_small'] = np.where(volume['total_requests'] < cutoff, 1, 0)
small_dist = small_dist.reset_index()
small_dist['fraction_total_requests'] = small_dist['total_requests'] / \
small_dist['total_requests'].sum()
# add flag for small distributors here
features_rank = pd.merge(features_rank,
small_dist[['partial_dc_id',
'partial_distributor_id', 'is_small']],
on=['partial_dc_id', 'partial_distributor_id'],
validate='many_to_one',
how='left')
dc_type_performance = features_rank.groupby(['partial_dc_id', 'drug_type', 'partial_distributor_id']).agg(
dc_type_performance=('performance', 'mean')).reset_index()
features_rank = pd.merge(features_rank, dc_type_performance,
on=['partial_dc_id', 'drug_type', 'partial_distributor_id'], how='left',
validate='many_to_one')
# determine dc type rank
features_rank['dc_type_rank'] = \
features_rank[(features_rank['is_small'] == 0) | ((features_rank['drug_type'] != 'generic') & (features_rank['drug_type'] != 'ethical'))].groupby(['partial_dc_id', 'drug_type'])['dc_type_performance'].rank(
method='dense', ascending=False).astype(int, errors='ignore')
dc_type_rank_ref = pd.pivot_table(features_rank, index=['partial_dc_id', 'drug_type'], columns=['dc_type_rank'],
values='partial_distributor_id').add_prefix('dc_drug_type_level_dist_').reset_index()
features_rank = pd.merge(features_rank,
dc_type_rank_ref[['partial_dc_id', 'drug_type', 'dc_drug_type_level_dist_1.0',
'dc_drug_type_level_dist_2.0', 'dc_drug_type_level_dist_3.0']],
how='left', on=['partial_dc_id', 'drug_type'], validate='many_to_one')
# append enterprise type rank
enterprise_type_performance = features_rank.groupby(['drug_type', 'partial_distributor_id']).agg(
enterprise_type_performance=('performance', 'mean')).reset_index()
features_rank = pd.merge(features_rank, enterprise_type_performance, on=['drug_type', 'partial_distributor_id'],
how='left', validate='many_to_one')
features_rank['enterprise_type_rank'] = features_rank[(features_rank['is_small'] == 0)
| ((features_rank['drug_type'] != 'generic')
& (features_rank['drug_type'] != 'ethical'))].groupby(['drug_type'])[
'enterprise_type_performance'].rank(method='dense', ascending=False).astype(int, errors='ignore')
enterprise_type_rank_ref = pd.pivot_table(features_rank, index=['drug_type'], columns=['enterprise_type_rank'],
values='partial_distributor_id').add_prefix('enterprise_drug_type_level_dist_').reset_index()
features_rank = pd.merge(features_rank,
enterprise_type_rank_ref[['drug_type',
'enterprise_drug_type_level_dist_1.0',
'enterprise_drug_type_level_dist_2.0',
'enterprise_drug_type_level_dist_3.0']],
how='left', on=['drug_type'], validate='many_to_one')
# 999 denotes that bounce rate = 1 and total requests is greater than 5 for that distributor.
features_rank['rank'] = np.where(
(features_rank['rank'] == 1) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 2) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 3) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
output_ranks = pd.pivot_table(features_rank, index=['partial_dc_id', 'drug_id'], columns='rank',
values='partial_distributor_id')[[1, 2, 3]].add_prefix('final_dist_').add_suffix('.0').reset_index()
features_rank = pd.merge(features_rank, output_ranks, on=['partial_dc_id', 'drug_id'], how='left',
validate='many_to_one')
# add volume fraction here
features_rank['volume_fraction'] = volume_fraction
######organize output here ####################
# remove .0 suffix from columns
features_rank.columns = features_rank.columns.str.replace(r'.0$', '')
# remove partial_ prefix from columns
features_rank.columns = features_rank.columns.str.replace(r'^partial_', '')
# decide columns to be included here
features_rank = features_rank[['dc_id', 'dc_name', 'distributor_id',
'distributor_name',
'distributor_type',
'is_small', 'drug_id',
'drug_name', 'drug_type',
'lead_time', 'margin',
'total_lost', 'total_requests',
'bounce_rate', 'ff',
'lost_recency', 'success_recency',
'performance', 'rank',
'final_dist_1',
'final_dist_2',
'final_dist_3',
'dc_drug_type_level_dist_1',
'dc_drug_type_level_dist_2',
'dc_drug_type_level_dist_3',
'enterprise_drug_type_level_dist_1',
'enterprise_drug_type_level_dist_2',
'enterprise_drug_type_level_dist_3',
'volume_fraction']]
return features_rank
def apply_topsis_franchisee(features, x_train, weights, cutoff_percentage, volume_fraction):
'''cutoff percentage is cutoff for determining whether a distributor is low volume or not'''
scaler = preprocessing.MinMaxScaler()
# normalize features
x_normalized = pd.DataFrame(
scaler.fit_transform(x_train), columns=x_train.columns)
# multiply with normalized weights here.
x_weighted = np.multiply(x_normalized, weights)
# merge drug id, dist id and store id for reference
x_weighted = pd.merge(
features[['drug_id', 'partial_distributor_id', 'store_id']],
x_weighted, left_index=True, right_index=True, how='inner')
# define ideal best vector here
ideal_best = x_weighted.agg(
{'lead_time': 'min', 'margin': 'max', 'bounce_rate': 'min',
'ff': 'max',
'lost_recency': 'max',
'success_recency': 'min'}).reset_index()
ideal_best = ideal_best.set_index(
'index').rename(columns={0: 'ideal_best'})
# define ideal worse vector here.
ideal_worse = x_weighted.agg({'lead_time': 'max', 'margin': 'min',
'bounce_rate': 'max',
'ff': 'min',
'lost_recency': 'min',
'success_recency': 'max'}).reset_index()
ideal_worse = ideal_worse.set_index(
'index').rename(columns={0: 'ideal_worse'})
x_weighted_best = pd.merge(x_weighted.T, ideal_best,
how='left', left_index=True, right_index=True).T
x_weighted_worse = pd.merge(x_weighted.T, ideal_worse,
how='left', left_index=True, right_index=True).T
# euclidean distance with ideal worse is calculated here.
ideal_worse_ed = x_weighted_worse[
['lead_time', 'margin', 'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(
lambda x: np.linalg.norm(x.values - ideal_worse['ideal_worse'].values),
axis=1)
ideal_worse_ed = pd.DataFrame(ideal_worse_ed, columns=['ideal_worse_ed'])
# euclidean distance with ideal best is calculated here.
ideal_best_ed = x_weighted_best[['lead_time', 'margin',
'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(
lambda x: np.linalg.norm(x.values - ideal_best['ideal_best'].values),
axis=1)
ideal_best_ed = pd.DataFrame(ideal_best_ed, columns=['ideal_best_ed'])
# append ideal worse euclidean distance here.
x_eval = pd.merge(x_weighted, ideal_worse_ed, how='left',
left_index=True, right_index=True)
# append ideal best euclidean distance here.
x_eval = pd.merge(x_eval, ideal_best_ed, how='left',
left_index=True, right_index=True)
x_eval['performance'] = (x_eval['ideal_worse_ed'] /
(x_eval['ideal_worse_ed'] + x_eval[
'ideal_best_ed'])) * 100
x_rank = x_eval.copy()
x_rank['rank'] = x_rank.groupby(['store_id', 'drug_id'])[
'performance'].rank(method='first', ascending=False)
x_rank['rank'] = x_rank['rank'].astype(int)
#################heuristics #############
features_rank = pd.merge(features,
x_rank[['drug_id', 'partial_distributor_id',
'store_id', 'performance', 'rank']],
how='outer', validate='one_to_one')
# add filter for low volume distributor exclusion for heuristic substitute
volume = features_rank.groupby(['store_id', 'partial_distributor_id',
'partial_distributor_name']).agg(
total_requests=('total_requests', 'sum'))
small_dist = volume.copy()
cutoff = max(small_dist['total_requests']) * cutoff_percentage
print(max(small_dist))
print('low volumne cutoff: ', cutoff)
small_dist['is_small'] = np.where(volume['total_requests'] < cutoff, 1, 0)
small_dist = small_dist.reset_index()
small_dist['fraction_total_requests'] = small_dist['total_requests'] / \
small_dist['total_requests'].sum()
# add flag for small distributors here
features_rank = pd.merge(features_rank,
small_dist[['store_id',
'partial_distributor_id', 'is_small']],
on=['store_id', 'partial_distributor_id'],
validate='many_to_one',
how='left')
store_type_performance = features_rank.groupby(
['store_id', 'drug_type', 'partial_distributor_id']).agg(
store_type_performance=('performance', 'mean')).reset_index()
features_rank = pd.merge(features_rank, store_type_performance,
on=['store_id', 'drug_type',
'partial_distributor_id'], how='left',
validate='many_to_one')
# determine store type rank
features_rank['store_type_rank'] = \
features_rank[(features_rank['is_small'] == 0) | (
(features_rank['drug_type'] != 'generic') & (
features_rank['drug_type'] != 'ethical'))].groupby(
['store_id', 'drug_type'])['store_type_performance'].rank(
method='dense', ascending=False).astype(int, errors='ignore')
store_type_rank_ref = \
pd.pivot_table(features_rank, index=['store_id', 'drug_type'],
columns=['store_type_rank'],
values='partial_distributor_id')[[1, 2, 3]].add_prefix(
'store_drug_type_level_dist_').reset_index()
features_rank = pd.merge(features_rank,
store_type_rank_ref[['store_id', 'drug_type',
'store_drug_type_level_dist_1',
'store_drug_type_level_dist_2',
'store_drug_type_level_dist_3']],
how='left', on=['store_id', 'drug_type'],
validate='many_to_one')
# 999 denotes that bounce rate = 1 and total requests is greater than 5 for that distributor.
features_rank['rank'] = np.where(
(features_rank['rank'] == 1) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 2) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 3) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
output_ranks = \
pd.pivot_table(features_rank, index=['store_id', 'drug_id'], columns='rank',
values='partial_distributor_id')[[1, 2, 3]].add_prefix(
'final_dist_').add_suffix('.0').reset_index()
features_rank = pd.merge(features_rank, output_ranks,
on=['store_id', 'drug_id'], how='left',
validate='many_to_one')
# add volume fraction here
features_rank['volume_fraction'] = volume_fraction
######organize output here ####################
# remove .0 suffix from columns
features_rank.columns = features_rank.columns.str.replace(r'.0$', '')
# remove partial_ prefix from columns
features_rank.columns = features_rank.columns.str.replace(r'^partial_', '')
# decide columns to be included here
features_rank = features_rank[['store_id', 'store_name', 'franchisee_id',
'distributor_id',
'distributor_name',
'distributor_type',
'is_small', 'drug_id',
'drug_name', 'drug_type',
'lead_time', 'margin',
'total_lost', 'total_requests',
'bounce_rate', 'ff',
'lost_recency', 'success_recency',
'performance', 'rank',
'final_dist_1',
'final_dist_2',
'final_dist_3',
'store_drug_type_level_dist_1',
'store_drug_type_level_dist_2',
'store_drug_type_level_dist_3',
'volume_fraction']]
return features_rank | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/topsis.py | topsis.py |
import pandas as pd
from zeno_etl_libs.utils.distributor_ranking.pull_data import pull_data, pull_data_franchisee
from zeno_etl_libs.utils.distributor_ranking.preprocess_features import preprocess_features_dc, preprocess_features_franchisee
from zeno_etl_libs.utils.distributor_ranking.calculate_features import calculate_features
from zeno_etl_libs.utils.distributor_ranking.topsis import apply_topsis, apply_topsis_franchisee
def ranking_calc_dc(time_interval, weights_as, weights_pr, as_low_volume_cutoff,
pr_low_volume_cutoff, volume_fraction, db, read_schema, logger):
'''output distributor ranking for AS and PR separately'''
logger.info('starting to import data')
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval + 7
df_features, df_distributors = pull_data(time_interval, db, read_schema)
logger.info('finished importing data')
######################### preprocessing starts #########################
logger.info('started preprocessing')
df_features = preprocess_features_dc(df_features, db, read_schema)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors, on=['partial_distributor_id'],
how='left', validate='many_to_one')
logger.info('finished preprocessing')
########################## preprocessing ends ##########################
####################### features calculation starts #######################
features = calculate_features(df_features, group_cols=['partial_dc_id','partial_distributor_id','drug_id'])
##### add neccessary columns in features #####
# add drug type column here
features = pd.merge(features, df_features[['drug_id', 'drug_type']].drop_duplicates(), on=['drug_id'],
how='left',
validate='many_to_one')
# add dist type column here
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name', 'partial_distributor_type']].drop_duplicates(),
on=['partial_distributor_id'], how='left',
validate='many_to_one')
# add dc name here.
features = pd.merge(features, df_features[['partial_dc_id', 'dc_name']].dropna().drop_duplicates(),
on=['partial_dc_id'], validate='many_to_one', how='left')
# add drug name here
features = pd.merge(features, df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
#### apply topsis ####
# weights format is [lead time, margin, bounce rate, ff, lost recency, success recency]
x_train = features[['lead_time', 'margin', 'bounce_rate',
'ff', 'lost_recency', 'success_recency']]
features_as = apply_topsis(features=features,
x_train=x_train, weights=weights_as, cutoff_percentage=as_low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for as')
features_pr = apply_topsis(features=features,
x_train=x_train, weights=weights_pr, cutoff_percentage=pr_low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for pr')
features_as.loc[:, 'request_type'] = 'AS/MS'
features_pr.loc[:, 'request_type'] = 'PR'
features_rank = pd.concat([features_as, features_pr])
return features_rank
def ranking_calc_franchisee(time_interval, weights_as, weights_pr,
low_volume_cutoff, volume_fraction, db,
read_schema, logger):
'''output distributor ranking for AS and PR separately'''
logger.info('starting to import data')
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval + 7
df_features, df_distributors = pull_data_franchisee(time_interval, db, read_schema)
logger.info('finished importing data')
######################### preprocessing starts #########################
logger.info('started preprocessing')
df_features = preprocess_features_franchisee(df_features, db, read_schema)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors, on=['partial_distributor_id'],
how='left', validate='many_to_one')
logger.info('finished preprocessing')
########################## preprocessing ends ##########################
####################### features calculation starts #######################
features = calculate_features(df_features, group_cols=['store_id','partial_distributor_id', 'drug_id'])
##### add neccessary columns in features #####
# add drug type column here
features = pd.merge(features,
df_features[['drug_id', 'drug_type']].drop_duplicates(),
on=['drug_id'],
how='left',
validate='many_to_one')
# add dist type column here
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name',
'partial_distributor_type']].drop_duplicates(),
on=['partial_distributor_id'], how='left',
validate='many_to_one')
# add store name and franchisee_id here.
features = pd.merge(features, df_features[
['store_id', 'store_name', 'franchisee_id']].dropna().drop_duplicates(),
on=['store_id'], validate='many_to_one', how='left')
# add drug name here
features = pd.merge(features,
df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
#### apply topsis ####
# weights format is [lead time, margin, bounce rate, ff, lost recency, success recency]
x_train = features[['lead_time', 'margin', 'bounce_rate',
'ff', 'lost_recency', 'success_recency']]
features_rank_as = apply_topsis_franchisee(features=features,
x_train=x_train,
weights=weights_as,
cutoff_percentage=low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for franchisee as')
features_rank_pr = apply_topsis_franchisee(features=features,
x_train=x_train,
weights=weights_pr,
cutoff_percentage=low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for franchisee pr')
features_rank_as.loc[:, 'request_type'] = 'AS/MS'
features_rank_pr.loc[:, 'request_type'] = 'PR'
features_rank = pd.concat([features_rank_as, features_rank_pr])
return features_rank | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/distributor_ranking_calc.py | distributor_ranking_calc.py |
import pandas as pd
import numpy as np
def preprocess_features_dc(df_features, db, read_schema):
'''df_features is the raw data variable '''
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where selling rate is 0 otherwise margin becomes infinity
df_features = df_features[df_features['selling_rate']!=0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
# if no dc information is present then remove those cases.
df_features = df_features[((~df_features['partial_dc_id'].isna()) | (
~df_features['forward_dc_id'].isna()))]
# for those cases where invoice doesn't exist, take invoice dc as obtained from sdm table
df_features['partial_dc_id'] = df_features['partial_dc_id'].fillna(
df_features['forward_dc_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs entries
print('pulling drug-distributor filter data')
q_drug_distributor = """select DISTINCT "distributor-id" as "partial_distributor_id",
"drug-id" as drug_id
from "{read_schema}"."distributor-drugs" dd """
drug_distributor_list_filter = db.get_df(q_drug_distributor.format(read_schema=read_schema))
df_features = pd.merge(df_features, drug_distributor_list_filter,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
return df_features
def preprocess_features_franchisee(df_features, db, read_schema):
'''df_features is the raw data variable '''
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where selling rate is 0 otherwise margin becomes infinity
df_features = df_features[df_features['selling_rate']!=0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs entries
print('pulling drug-distributor filter data')
q_drug_distributor = """select DISTINCT "distributor-id" as "partial_distributor_id",
"drug-id" as drug_id
from "{read_schema}"."distributor-drugs" dd """
drug_distributor_list_filter = db.get_df(q_drug_distributor.format(read_schema=read_schema))
df_features = pd.merge(df_features, drug_distributor_list_filter,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
return df_features | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/preprocess_features.py | preprocess_features.py |
import pandas as pd
def postprocess_ranking_dc(features_rank, volume_fraction):
tech_input = features_rank.copy()
# drop cases for tech input where all 3 distributor assigned are NULL.
# Since they automatically need to go to dc drug type level.
tech_input = tech_input[~((tech_input['final_dist_1'].isna()) & (
tech_input['final_dist_2'].isna()) & (
tech_input['final_dist_3'].isna()))]
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_2'])
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_2'], 'final_dist_2'] = float('NaN')
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
tech_input['final_dist_2'] = tech_input['final_dist_2'].fillna(
tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_2'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
# append dc_drug_type entries as separate rows in tech input
dc_drug_type_entries = features_rank[
['dc_id', 'drug_type', 'request_type', 'dc_drug_type_level_dist_1',
'dc_drug_type_level_dist_2',
'dc_drug_type_level_dist_3']].drop_duplicates().rename(
columns={'dc_drug_type_level_dist_1': 'final_dist_1',
'dc_drug_type_level_dist_2': 'final_dist_2',
'dc_drug_type_level_dist_3': 'final_dist_3'
})
dc_drug_type_entries['drug_id'] = float('NaN')
dc_drug_type_entries['volume_fraction'] = volume_fraction
dc_drug_type_entries = dc_drug_type_entries[
['dc_id', 'drug_id', 'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = pd.concat([tech_input, dc_drug_type_entries])
# append enterprise_drug_type entries as separate rows in tech input
enterprise_drug_type_entries = features_rank[
['drug_type', 'request_type', 'enterprise_drug_type_level_dist_1',
'enterprise_drug_type_level_dist_2',
'enterprise_drug_type_level_dist_3']].drop_duplicates().rename(
columns={'enterprise_drug_type_level_dist_1': 'final_dist_1',
'enterprise_drug_type_level_dist_2': 'final_dist_2',
'enterprise_drug_type_level_dist_3': 'final_dist_3'})
enterprise_drug_type_entries['dc_id'] = float('NaN')
enterprise_drug_type_entries['drug_id'] = float('NaN')
enterprise_drug_type_entries['volume_fraction'] = volume_fraction
enterprise_drug_type_entries = enterprise_drug_type_entries[
['dc_id', 'drug_id', 'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = pd.concat([tech_input, enterprise_drug_type_entries])
tech_input["store_id"] = float('NaN')
tech_input["franchisee_id"] = 1 # ZIPPIN PHARMA
tech_input = tech_input[['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = tech_input.drop_duplicates()
return tech_input
def postprocess_ranking_franchisee(features_rank, volume_fraction):
tech_input = features_rank.copy()
# drop cases for tech input where all 3 distributor assigned are NULL.
# Since they automatically need to go to store drug type level.
tech_input = tech_input[~((tech_input['final_dist_1'].isna()) & (
tech_input['final_dist_2'].isna()) & (tech_input['final_dist_3'].isna()))]
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_2'])
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_2'], 'final_dist_2'] = float('NaN')
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
tech_input['final_dist_2'] = tech_input['final_dist_2'].fillna(tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_2'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
tech_input = tech_input[['store_id', 'franchisee_id', 'drug_id', 'drug_type',
'request_type', 'volume_fraction','final_dist_1', 'final_dist_2', 'final_dist_3']]
# append store_drug_type entries as separate rows in tech input
store_drug_type_entries = features_rank[
['store_id', 'franchisee_id', 'drug_type', 'request_type',
'store_drug_type_level_dist_1', 'store_drug_type_level_dist_2',
'store_drug_type_level_dist_3']].drop_duplicates().rename(
columns={'store_drug_type_level_dist_1': 'final_dist_1',
'store_drug_type_level_dist_2': 'final_dist_2',
'store_drug_type_level_dist_3': 'final_dist_3'
})
store_drug_type_entries['drug_id'] = float('NaN')
store_drug_type_entries['volume_fraction'] = volume_fraction
store_drug_type_entries = store_drug_type_entries[
['store_id', 'franchisee_id', 'drug_id', 'drug_type', 'request_type',
'volume_fraction', 'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = pd.concat([tech_input, store_drug_type_entries], sort=False)
tech_input['dc_id'] = float('NaN')
tech_input = tech_input[['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = tech_input.drop_duplicates()
return tech_input | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/postprocess_ranking.py | postprocess_ranking.py |
from functools import reduce
import numpy as np
import pandas as pd
def calculate_features(df_features, group_cols):
"""
DC-LEVEL: group_cols=['partial_dc_id','partial_distributor_id', 'drug_id']
FRANCHISEE-LEVEL: group_cols=['store_id','partial_distributor_id', 'drug_id']
"""
'''outputs the calculated features when supplied with raw data'''
dfx = df_features[df_features['invoice_count'] != 0]
####################### feature calculation starts ########################
####lead time calculations ########
df_lead_time = df_features.copy()
cond_0 = (df_lead_time['invoice_count'] == 0)
df_lead_time['lead_time'] = float('NaN')
# for those cases where reordered does not exists and invoice count is 0
# lead time is invoiced-at - created at for invoice count 0
df_lead_time['lead_time'] = np.where((df_lead_time['original_created_at_2'].isna()) & (~cond_0),
(df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at']).astype('timedelta64[h]'),
df_lead_time['lead_time'])
# for cases where invoiced_at - reordered_at < 8, lead time is unreliable.
df_lead_time['lead_time'] = np.where(
(~df_lead_time['original_created_at_2'].isna() |
(((df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at_2']).astype('timedelta64[h]')) > 8))
& (~cond_0),
(df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at_2']).astype('timedelta64[h]'),
df_lead_time['lead_time'])
df_lead_time['lead_time'] = np.where(
(~df_lead_time['original_created_at_2'].isna() |
(((df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at_2']).astype('timedelta64[h]')) < 8))
& (~cond_0),
(df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at']).astype('timedelta64[h]'),
df_lead_time['lead_time'])
# invoice count 0, take lead time as max value
# This is done because we are eventually scaling things between 0 to 1.
df_lead_time['lead_time'] = np.where(cond_0,
df_lead_time['lead_time'].max(),
df_lead_time['lead_time'])
# If after taking the condition for lead time less than 8,
# still cases are present then those are unreliable, take lead time as mean.
df_lead_time.loc[df_lead_time['lead_time'] < 8, 'lead_time'] = df_lead_time[df_lead_time['lead_time'] > 8][
'lead_time'].mean()
# lead time for a distributor per drug id is the average of lead time per order.
df_lead_time = df_lead_time.groupby(group_cols).agg(
lead_time=('lead_time', 'mean')).reset_index()
# sanity check
assert df_lead_time.shape[0] == \
df_features[group_cols].drop_duplicates().shape[0]
print('finished lead time calculations')
####### margin calculation starts #######
df_margin = dfx.copy()
df_margin['margin'] = (df_margin['selling_rate'] -
df_margin['distributor_rate']) / df_margin['selling_rate']
df_margin = df_margin.groupby(group_cols).agg(margin=('margin', 'mean')).reset_index()
# sanity check
assert df_margin.shape[0] == dfx[group_cols].drop_duplicates().shape[0]
print('finished margin calculations')
####### bounce rate calculation #######
df_br = df_features.groupby(group_cols).agg(
total_lost=('is_lost', 'sum'),
total_requests=('is_lost', 'count')).reset_index()
df_br['bounce_rate'] = (df_br['total_lost']) / df_br['total_requests']
# sanity check
assert df_br.shape[0] == df_features[group_cols].drop_duplicates().shape[0]
print('finished bounce rate calculations')
####### ff calculation #######
df_sorted = dfx.groupby(['short_book_1_id'], as_index=False).apply(
lambda x: x.sort_values(by=['partial_invoiced_at']))
# for multiple invoices, calculate cumulative fulfilled quantities
df_sorted = df_sorted.groupby(['short_book_1_id']).apply(
lambda x: x['partial_quantity'].cumsum()).reset_index().rename(
columns={'partial_quantity': 'cum_partial_quantity'})
df_sorted = df_sorted.set_index('level_1')
df_fulfillment = pd.merge(df_sorted, dfx, left_index=True,
right_index=True, how='left', suffixes=('', '_y'))
assert df_fulfillment['short_book_1_id'].equals(
df_fulfillment['short_book_1_id_y'])
df_fulfillment = df_fulfillment[
['short_book_1_id'] + group_cols + ['original_order', 'partial_quantity',
'cum_partial_quantity']]
# cum required quantity is quantity left after subtracting cum quantity from all previous invoices.
df_fulfillment['cum_required_quantity'] = df_fulfillment['original_order'] - \
df_fulfillment['cum_partial_quantity']
# the real required quantity while placing an order is quantity
# unfulfilled by the previours invoice. Hence shifted by 1
df_fulfillment['actual_required'] = df_fulfillment.groupby(
['short_book_1_id']).shift(1)['cum_required_quantity']
# fill single invoices with the original order
df_fulfillment['actual_required'] = df_fulfillment['actual_required'].fillna(
df_fulfillment['original_order'])
# put actual required = 0 when ordered exceeds required.
df_fulfillment.loc[df_fulfillment['actual_required']
< 0, 'actual_required'] = 0
df_fulfillment['redundant_order_flag'] = np.where(
df_fulfillment['actual_required'] == 0, 1, 0)
df_fulfillment = df_fulfillment[['short_book_1_id'] + group_cols +
['original_order', 'partial_quantity', 'actual_required', 'redundant_order_flag']]
df_fulfillment['ff'] = df_fulfillment['partial_quantity'] / \
df_fulfillment['actual_required']
# for those quantities where nothing was required and still order placed, take them as 0.
df_fulfillment.loc[(df_fulfillment['actual_required'] == 0) & (
df_fulfillment['partial_quantity'] > 0), 'ff'] = 1
df_fulfillment.loc[(df_fulfillment['ff'] > 1), 'ff'] = 1
# removed redundant orders here.
df_ff = df_fulfillment[df_fulfillment['redundant_order_flag'] != 1].groupby(group_cols).agg(ff=('ff', 'mean')).reset_index()
print('finished ff calculations')
####### recency lost calculations #######
# number of days ago it was marked lost.
df_recency_lost = df_features[df_features['is_lost'] == 1].groupby(group_cols).agg(
max_lost_date=('original_created_at', 'max')).reset_index()
df_recency_lost['lost_recency'] = (
pd.datetime.today() - pd.to_datetime(df_recency_lost['max_lost_date'])).dt.days
df_recency_lost = df_recency_lost[group_cols + ['lost_recency']]
####### recency success calculations #######
# number of days ago it was marked success
df_recency_success = df_features[df_features['is_lost'] == 0].groupby(group_cols).agg(
max_success_date=('original_created_at', 'max')).reset_index()
df_recency_success['success_recency'] = (
pd.datetime.today() - pd.to_datetime(df_recency_success['max_success_date'])).dt.days
df_recency_success = df_recency_success[group_cols + ['success_recency']]
print('finished recency calculations')
######################## feature calculation ends #########################
################## compiling all the features #############################
meg_list = [df_lead_time, df_margin, df_br,
df_ff, df_recency_lost, df_recency_success]
features = reduce(
lambda left, right: pd.merge(left, right,
on=group_cols,
how='outer'), meg_list)
# lead_time: Replace lead time NA (i.e. bounce rate 1) with max lead time.
features['lead_time'] = features['lead_time'].fillna(
features['lead_time'].max())
# margin
features['margin'] = features['margin'].fillna(features['margin'].mean())
# ff
features.loc[(features['ff'].isna()) & (
features['bounce_rate'] == 1), 'ff'] = 0
features['ff'] = features['ff'].fillna(features['ff'].mean())
# for bounce rate = 0.
features['lost_recency'] = features['lost_recency'].fillna(
features['lost_recency'].max())
# for bounce rate = 1
features['success_recency'] = features['success_recency'].fillna(
features['success_recency'].max())
print('finished compiling features')
return features | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/calculate_features.py | calculate_features.py |
Q_FEATURES = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."dc-id" as "partial-dc-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
d."drug-name",
d.type as "drug_type",
sdm."forward-dc-id",
s.name as "dc-name"
from
"{read_schema}"."short-book-1" sb
left join "{read_schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{read_schema}".invoices i on
sbi."invoice-id" = i.id
left join "{read_schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{read_schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{read_schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{read_schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{read_schema}".drugs d on
sb."drug-id" = d.id
left join "{read_schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{read_schema}"."store-dc-mapping" sdm on
sb."store-id" = sdm."store-id"
and dis.type = sdm."drug-type"
left join "{read_schema}".stores s on
i."dc-id" = s.id
where
DATEDIFF(day, date(sb."created-at"), current_date) <= {0}
and DATEDIFF(day, date(sb."created-at"), current_date) >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
"""
Q_FEATURES_FRANCHISEE = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."store-id",
ss."franchisee-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
d."drug-name",
d.type as "drug_type",
ss."name" as "store-name"
from
"{read_schema}"."short-book-1" sb
left join "{read_schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{read_schema}".invoices i on
sbi."invoice-id" = i.id
left join "{read_schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{read_schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{read_schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{read_schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{read_schema}".drugs d on
sb."drug-id" = d.id
left join "{read_schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{read_schema}".stores s on
i."dc-id" = s.id
left join "{read_schema}".stores ss on
sb."store-id" = ss.id
where
DATEDIFF(day, date(sb."created-at"), current_date) <= {0}
and DATEDIFF(day, date(sb."created-at"), current_date) >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
and ss."franchisee-id" != 1
"""
Q_DISTRIBUTORS = """
select id as "partial-distributor-id",
name as "partial-distributor-name",
type as "partial-distributor-type"
from "{read_schema}".distributors
"""
def pull_data(time_interval, db, read_schema):
''' time interval is the buffer cutoff after which data isn't taken. 7 days in our case'''
df_features = db.get_df(Q_FEATURES.format(time_interval,
read_schema=read_schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(read_schema=read_schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
return df_features, df_distributors
def pull_data_franchisee(time_interval, db, read_schema):
''' time interval is the buffer cutoff after which data isn't taken. 7 days in our case'''
df_features = db.get_df(Q_FEATURES_FRANCHISEE.format(time_interval,
read_schema=read_schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(read_schema=read_schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
return df_features, df_distributors | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking/pull_data.py | pull_data.py |
from datetime import timedelta
class GetData:
"""To fetch required data from SQL and PostGre"""
def __init__(self, store_ids, reset_date, days_to_replenish, days_delta,
db, schema, logger):
"""
Arguments:
store_ids: (list) store id list
reset_date: (datetime.date) format
days_to_replenish: (int) days to skip for inventory replenishment
from date of reset
"""
self.store_ids = str(store_ids).replace('[', '(').replace(']', ')')
self.reset_date = reset_date.strftime('%Y-%m-%d')
self.date_before_90days = (reset_date - timedelta(90)).strftime('%Y-%m-%d')
self.start_date = (reset_date + timedelta(days_to_replenish)).strftime('%Y-%m-%d')
self.end_date = (reset_date + timedelta(days_to_replenish+days_delta-1)).strftime('%Y-%m-%d')
# considering sales period of 28 days (start & end date included in sql)
self.db = db
self.schema = schema
self.logger = logger
def ipc_ss(self, store_id, sql_cut_off_condition):
"""Fetch safety stock table for current IPC store and reset date"""
self.logger.info(f"Fetching ipc_ss data for store_id: {store_id}")
q_ss = """
select *
from "{schema}"."ipc-safety-stock"
where "store-id" = {0} and "reset-date" = '{1}'
{2}
""".format(store_id, self.reset_date, sql_cut_off_condition,
schema=self.schema)
df_ss = self.db.get_df(q_ss)
df_ss.columns = [c.replace('-', '_') for c in df_ss.columns]
df_ss["store_type"] = "ipc"
return df_ss
def non_ipc_ss(self, store_id, sql_cut_off_condition):
"""Fetch safety stock table for current Non-IPC store and reset date"""
self.logger.info(f"Fetching non_ipc_ss data for store_id: {store_id}")
q_ss = """
select *
from "{schema}"."non-ipc-safety-stock"
where "store-id" = {0} and "reset-date" = '{1}'
{2}
""".format(store_id, self.reset_date, sql_cut_off_condition,
schema=self.schema)
df_ss = self.db.get_df(q_ss)
df_ss.columns = [c.replace('-', '_') for c in df_ss.columns]
df_ss["store_type"] = "non_ipc"
return df_ss
def ipc2_ss(self, store_id, sql_cut_off_condition):
"""Fetch safety stock table for IPC2.0 store and reset date"""
self.logger.info(f"Fetching ipc2_ss data for store_id: {store_id}")
q_ss = """
select *
from "{schema}"."ipc2-safety-stock"
where "store-id" = {0} and "reset-date" = '{1}'
{2}
""".format(store_id, self.reset_date, sql_cut_off_condition,
schema=self.schema)
df_ss = self.db.get_df(q_ss)
df_ss.columns = [c.replace('-', '_') for c in df_ss.columns]
df_ss["store_type"] = "ipc2"
return df_ss
def curr_inv(self):
"""Fetch current inventory for all stores"""
self.logger.info("Fetching inventory data")
q_inv = """
SELECT "drug-id" as drug_id,
"store-id" as store_id,
AVG(ptr) AS average_ptr,
SUM("locked-quantity"+quantity+"locked-for-audit"+"locked-for-transfer"
+"locked-for-check"+"locked-for-return") AS current_inventory
FROM "{schema}"."inventory-1"
WHERE "store-id" in {0}
GROUP BY "store-id", "drug-id"
""".format(self.store_ids, schema=self.schema)
df_inv_comb = self.db.get_df(q_inv)
return df_inv_comb
def sales_3m(self):
"""Fetch last 3 months sales data for finding weather NPI or not."""
self.logger.info("Fetching 3 months sales data")
q_3m_sales = """
select
"drug-id", "store-id",
sum("net-quantity") as "net-sales-3m"
from "{schema}".sales
where "store-id" in {0} and
date("created-at") between '{1}' and '{2}'
group by "store-id", "drug-id"
""".format(self.store_ids, self.date_before_90days, self.reset_date,
schema=self.schema)
df_3m_sales_comb = self.db.get_df(q_3m_sales)
df_3m_sales_comb.columns = [c.replace('-', '_') for c in df_3m_sales_comb.columns]
return df_3m_sales_comb
def sales_28day(self):
"""Fetch 28 days sales data after date of reset"""
self.logger.info("Fetching 28 days sales data")
q_sales = """
select
"drug-id", "store-id",
sum("net-quantity") as "net-sales"
from "{schema}".sales
where "store-id" in {0} and
date("created-at") between '{1}' and '{2}'
group by "store-id", "drug-id"
""".format(self.store_ids, self.start_date, self.end_date,
schema=self.schema)
df_sales_comb = self.db.get_df(q_sales)
df_sales_comb.columns = [c.replace('-', '_') for c in df_sales_comb.columns]
return df_sales_comb
def pr_loss_28day(self):
"""Fetch 28 days PR losses after date of reset"""
self.logger.info("Fetching 28 days pr loss data")
q_pr = """
select "drug-id", "store-id",
sum("loss-quantity") as "pr-loss"
from "{schema}"."cfr-patient-request"
where "shortbook-date" between '{1}' and '{2}'
and "store-id" in {0}
group by "store-id", "drug-id"
""".format(self.store_ids, self.start_date, self.end_date,
schema=self.schema)
df_pr_loss_comb = self.db.get_df(q_pr)
df_pr_loss_comb.columns = [c.replace('-', '_') for c in df_pr_loss_comb.columns]
df_pr_loss_comb["pr_loss"] = df_pr_loss_comb["pr_loss"].astype(float)
return df_pr_loss_comb | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/fcst_performance/get_data.py | get_data.py |
from datetime import timedelta
def get_store_ids(reset_date, exclude_stores, db, schema):
"""
Get IPC and Non-IPC store-ids which was reset on specified reset date
Parameters:
reset_date: (datetime.date) format
Returns:
store_ids: (list) of ipc and non-ipc store ids
store_type_map: (list) of ipc and non-ipc store types respectively
"""
reset_date = reset_date.strftime('%Y-%m-%d')
if not exclude_stores:
exclude_stores = "(0)"
else:
exclude_stores = tuple(exclude_stores)
# Get list of all store_ids
q_stores = f"""
select "id", name, "opened-at" as opened_at
from "{schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {exclude_stores}
"""
stores_list = list(db.get_df(q_stores)["id"])
stores_list_sql = str(stores_list).replace('[', '(').replace(']', ')')
# Get list of IPC stores which was reset on specified reset date
q_ipc = """
select distinct "store-id"
from "{schema}"."ipc-safety-stock"
where "store-id" in {0} and "reset-date" = '{1}'
""".format(stores_list_sql, reset_date, schema=schema)
ipc_stores = list(db.get_df(q_ipc)["store-id"])
# Get list of Non-IPC stores which was reset on specified reset date
q_non_ipc = """
select distinct "store-id"
from "{schema}"."non-ipc-safety-stock"
where "store-id" in {0} and "reset-date" = '{1}'
""".format(stores_list_sql, reset_date, schema=schema)
non_ipc_stores = list(db.get_df(q_non_ipc)["store-id"])
# Get list of Non-IPC stores which was reset on specified reset date
q_ipc2 = """
select distinct "store-id"
from "{schema}"."ipc2-safety-stock"
where "store-id" in {0} and "reset-date" = '{1}'
""".format(stores_list_sql, reset_date, schema=schema)
ipc2_stores = list(db.get_df(q_ipc2)["store-id"])
store_ids = ipc_stores + non_ipc_stores + ipc2_stores
store_type_map = ["ipc"] * len(ipc_stores) \
+ ["non_ipc"] * len(non_ipc_stores) \
+ ["ipc2"] * len(ipc2_stores)
return store_ids, store_type_map
def handle_multiple_resets(reset_date, store_id, store_type, db, schema, logger):
"""
Check if multiple reset occurred on specified reset date
Parameters:
reset_date: (datetime.date) format
store_id: (int) format
store_type: (str) format IPC or Non-IPC
Returns:
sql_cut_off_condition: (str) sql condition to use in query for taking
only the latest reset that occurred.
"""
sql_reset_date = reset_date.strftime('%Y-%m-%d')
if store_type == "ipc":
sql_store_type = "ipc"
elif store_type == "non_ipc":
sql_store_type = "non-ipc"
else:
sql_store_type = "ipc2"
q_drug = """
select "drug-id"
from "{schema}"."{0}-safety-stock"
where "store-id" = {1} and "reset-date" = '{2}'
limit 1
""".format(sql_store_type, store_id, sql_reset_date, schema=schema)
rand_drug_id = db.get_df(q_drug)["drug-id"][0]
q_upload_time = """
select *
from "{schema}"."{0}-safety-stock"
where "store-id" = {1} and "reset-date" = '{2}' and "drug-id" = {3}
order by "updated-at" desc
""".format(sql_store_type, store_id, sql_reset_date, rand_drug_id,
schema=schema)
df_upload_time = db.get_df(q_upload_time)["updated-at"]
reset_count = df_upload_time.shape[0]
if reset_count > 1:
logger.info(f"Multiple resets detected for store_id: {store_id}")
cut_off_datetime = df_upload_time[0] - timedelta(minutes=1)
sql_cut_off_condition = """ and "updated-at" > '{}' """.format(
str(cut_off_datetime))
else:
sql_cut_off_condition = ""
return sql_cut_off_condition | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/fcst_performance/helper_functions.py | helper_functions.py |
from datetime import date
from zeno_etl_libs.utils.fcst_performance import metric_calc
import pandas as pd
def cal_fields_store_drug_level(df_ss, df_inv, df_sales, df_pr_loss, df_3m_sales):
"""
Calculate all the fields for store-drug level forecast performance assessment
Parameters:
df_ss: (pd.DataFrame) safety stock data IPC or Non-IPC
df_inv: (pd.DataFrame) current inventory data
df_sales: (pd.DataFrame) 28 days sales data
df_pr_loss: (pd.DataFrame) 28 days pr loss data
df_3m_sales: (pd.DataFrame) 3 month sales before reset (for NPI)
Returns:
df_sdl: (pd-DataFrame) of store-drug level performance metrics
"""
# Merge Inventory and NPI dataframe
df_inv_npi = pd.merge(df_inv, df_3m_sales, on="drug_id", how="left")
df_inv_npi.net_sales_3m.fillna(0, inplace=True)
df_inv_npi['is_npi'] = (df_inv_npi['net_sales_3m'] == 0)
# Merge sales and PR loss dataframe
df_sales_pr = pd.merge(df_sales, df_pr_loss, on="drug_id", how="left")
df_sales_pr.pr_loss.fillna(0, inplace=True)
# Merge inventory, NPI, sales and PR loss dataframes
df_merged = pd.merge(df_inv_npi, df_sales_pr, on="drug_id", how="left")
df_merged.net_sales.fillna(0, inplace=True)
df_merged.pr_loss.fillna(0, inplace=True)
df_merged = df_merged[["drug_id", "current_inventory", "is_npi",
"net_sales", "pr_loss"]]
df_merged.rename(columns={"net_sales": "net_sales_28days",
"pr_loss": "pr_loss_28days"}, inplace=True)
# Merge all collected data with SS table
df_all_combined = pd.merge(df_ss, df_merged, on="drug_id", how="left")
df_all_combined = df_all_combined[df_all_combined['drug_name'].notna()]
df_all_combined.current_inventory.fillna(0, inplace=True)
df_all_combined.net_sales_28days.fillna(0, inplace=True)
df_all_combined.pr_loss_28days.fillna(0, inplace=True)
df_all_combined.is_npi.fillna(True, inplace=True)
# Creating dataframe of required format
df_all_combined.rename(columns={"curr_inventory": "inventory_at_reset",
"std": "fcst_std", "type": "drug_type",
"current_inventory": "inventory_at_measurement",
"avg_ptr": "fptr"},
inplace=True)
df_all_combined["is_npi"] = df_all_combined["is_npi"].apply(
lambda x: 'Y' if x == True else 'N')
df_sdl = df_all_combined[["store_id", "store_type", "drug_id", "drug_name",
"drug_type", "drug_grade", "reset_date", "bucket",
"is_npi", "model", "percentile", "fcst", "fcst_std",
"safety_stock", "reorder_point", "order_upto_point",
"inventory_at_reset", "fptr", "inventory_at_measurement",
"net_sales_28days", "pr_loss_28days"]].copy()
df_sdl["demand_28days"] = df_sdl["net_sales_28days"] + df_sdl["pr_loss_28days"]
df_sdl["fcst_error"] = df_sdl["fcst"] - df_sdl["demand_28days"]
for index, row in df_sdl.iterrows():
forecast = row["fcst"]
actual = row["demand_28days"]
df_sdl.loc[index, "perc_error"] = metric_calc.pe(forecast, actual)
df_sdl["measurement_date"] = date.today()
return df_sdl | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/fcst_performance/data_operations.py | data_operations.py |
import pandas as pd
from zeno_etl_libs.utils.distributor_ranking2.pull_data import \
pull_data_dc, pull_data_franchisee
from zeno_etl_libs.utils.distributor_ranking2.calculate_ranks import \
calc_ranks_dc, get_final_ranks_dc, calc_ranks_franchisee, \
get_final_ranks_franchisee
from zeno_etl_libs.utils.distributor_ranking2.preprocess_features import \
preprocess_features_dc, preprocess_features_franchisee
from zeno_etl_libs.utils.distributor_ranking2.calculate_features import \
calculate_features
from zeno_etl_libs.utils.distributor_ranking2.post_process_ranking import \
post_process_ranking_dc, post_process_ranking_franchisee
def ranking_calc_dc(reset_date, time_interval_dc, as_ms_weights_dc_drug_lvl,
as_ms_weights_dc_type_lvl, pr_weights_dc_drug_lvl,
pr_weights_dc_type_lvl, logger, db, schema):
# =============================== PULL DATA ===============================
logger.info("Pulling data for DC")
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval_dc + 7
df_features, df_distributors, df_dc_distributors_mapping, \
df_distributor_drugs = pull_data_dc(
reset_date, time_interval, db, schema)
# ========================== DATA PRE-PROCESSING ==========================
logger.info("Preprocessing data")
df_features = preprocess_features_dc(df_features, df_dc_distributors_mapping,
df_distributor_drugs)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors,
on=['partial_distributor_id', 'drug_type'],
how='left', validate='many_to_one')
# ========================== FEATURE CALCULATION ==========================
logger.info("Calculating features")
features = calculate_features(df_features, reset_date, time_interval_dc,
logger, group_cols=['partial_dc_id',
'partial_distributor_id',
'drug_id'])
# add drug type column
features = pd.merge(features,
df_features[['drug_id', 'drug_type']].drop_duplicates(),
on=['drug_id'], how='left', validate='many_to_one')
# add dist info
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name',
'partial_distributor_credit_period', 'drug_type',
'dist_type_portfolio_size']].drop_duplicates(),
on=['partial_distributor_id', 'drug_type'], how='left',
validate='many_to_one')
# add dc name
features = pd.merge(features, df_features[
['partial_dc_id', 'dc_name']].dropna().drop_duplicates(),
on=['partial_dc_id'], validate='many_to_one', how='left')
# add drug name
features = pd.merge(features,
df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
# ========================= CALCULATE RANKS AS/MS =========================
logger.info("Ranking starts AS/MS")
rank_drug_lvl, rank_drug_type_lvl, disq_entries = calc_ranks_dc(
features, as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl, logger)
final_ranks = get_final_ranks_dc(
rank_drug_lvl, rank_drug_type_lvl, disq_entries, features,
df_distributor_drugs, df_distributors, df_dc_distributors_mapping,
as_ms_weights_dc_drug_lvl, logger)
# ====================== POST PROCESS RANK DFs AS/MS ======================
logger.info("Post processing rank-DFs AS/MS")
final_ranks_as_ms, ranked_features_as_ms = post_process_ranking_dc(
features, rank_drug_lvl, rank_drug_type_lvl, final_ranks,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl)
final_ranks_as_ms["request_type"] = "AS/MS"
ranked_features_as_ms["request_type"] = "AS/MS"
# ========================== CALCULATE RANKS PR ===========================
logger.info("Ranking starts PR")
rank_drug_lvl, rank_drug_type_lvl, disq_entries = calc_ranks_dc(
features, pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl, logger)
final_ranks = get_final_ranks_dc(
rank_drug_lvl, rank_drug_type_lvl, disq_entries, features,
df_distributor_drugs, df_distributors, df_dc_distributors_mapping,
pr_weights_dc_drug_lvl, logger)
# ======================== POST PROCESS RANK DFs PR =======================
logger.info("Post processing rank-DFs PR")
final_ranks_pr, ranked_features_pr = post_process_ranking_dc(
features, rank_drug_lvl, rank_drug_type_lvl, final_ranks,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl)
final_ranks_pr["request_type"] = "PR"
ranked_features_pr["request_type"] = "PR"
# =========================== JOIN DFs AS/MS & PR =========================
final_ranks = pd.concat([final_ranks_as_ms, final_ranks_pr], axis=0)
ranked_features = pd.concat([ranked_features_as_ms, ranked_features_pr], axis=0)
return ranked_features, final_ranks
def ranking_calc_franchisee(reset_date, time_interval_franchisee,
franchisee_stores, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl, logger, db, schema):
# =============================== PULL DATA ===============================
logger.info("Pulling data for franchisee")
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval_franchisee + 7
df_features, df_distributors, df_distributor_drugs = pull_data_franchisee(
reset_date, time_interval, franchisee_stores, db, schema)
# ========================== DATA PRE-PROCESSING ==========================
logger.info("Preprocessing data")
df_features = preprocess_features_franchisee(
df_features, df_distributor_drugs, db, schema)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors,
on=['partial_distributor_id', 'drug_type'],
how='left', validate='many_to_one')
# ========================== FEATURE CALCULATION ==========================
logger.info("Calculating features")
features = calculate_features(df_features, reset_date, time_interval_franchisee,
logger, group_cols=['store_id',
'partial_distributor_id',
'drug_id'])
# add drug type column
features = pd.merge(features,
df_features[['drug_id', 'drug_type']].drop_duplicates(),
on=['drug_id'], how='left', validate='many_to_one')
# add dist info
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name',
'partial_distributor_credit_period', 'drug_type',
'dist_type_portfolio_size']].drop_duplicates(),
on=['partial_distributor_id', 'drug_type'], how='left',
validate='many_to_one')
# add store name and franchisee_id here.
features = pd.merge(
features, df_features[['store_id', 'store_name', 'franchisee_id']].dropna().drop_duplicates(),
on=['store_id'], validate='many_to_one', how='left')
# add drug name
features = pd.merge(features,
df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
# ============================ CALCULATE RANKS ============================
logger.info("Ranking starts")
rank_drug_lvl, rank_drug_type_lvl = calc_ranks_franchisee(
features, weights_franchisee_drug_lvl, weights_franchisee_type_lvl,
logger)
final_ranks = get_final_ranks_franchisee(
rank_drug_lvl, rank_drug_type_lvl, features, logger)
# ========================= POST PROCESS RANK DFs =========================
logger.info("Post processing rank-DFs")
final_ranks, ranked_features = post_process_ranking_franchisee(
features, rank_drug_lvl, rank_drug_type_lvl, final_ranks,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl)
final_ranks["request_type"] = "ALL"
ranked_features["request_type"] = "ALL"
return ranked_features, final_ranks | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/distributor_ranking_calc.py | distributor_ranking_calc.py |
import pandas as pd
import numpy as np
def process_tech_df(final_ranks_dc, final_ranks_franchisee, volume_fraction):
tech_input = pd.concat([final_ranks_dc, final_ranks_franchisee], axis=0)
tech_input['volume_fraction'] = volume_fraction
tech_input.rename(
{"partial_dc_id": "dc_id", "distributor_rank_1": "final_dist_1",
"distributor_rank_2": "final_dist_2", "distributor_rank_3": "final_dist_3"},
axis=1, inplace=True)
# combine volume fraction split for cases where total distributors < 3
volume_fraction_split = tech_input['volume_fraction'].str.split(
pat='-', expand=True).rename(
columns={0: 'volume_fraction_1',
1: 'volume_fraction_2',
2: 'volume_fraction_3'})
tech_input['volume_fraction_1'] = volume_fraction_split[
'volume_fraction_1'].astype(float)
tech_input['volume_fraction_2'] = volume_fraction_split[
'volume_fraction_2'].astype(float)
tech_input['volume_fraction_3'] = volume_fraction_split[
'volume_fraction_3'].astype(float)
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_3'].isna(),
tech_input['volume_fraction_2'] +
tech_input['volume_fraction_3'],
tech_input['volume_fraction_2'])
tech_input['volume_fraction_3'] = np.where(
tech_input['final_dist_3'].isna(), 0,
tech_input['volume_fraction_3'])
tech_input['volume_fraction_1'] = np.where(
tech_input['final_dist_2'].isna(),
tech_input['volume_fraction_1'] +
tech_input['volume_fraction_2'],
tech_input['volume_fraction_1'])
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_2'].isna(), 0,
tech_input['volume_fraction_2'])
tech_input['volume_fraction'] = tech_input['volume_fraction_1'].astype(
'str') + '-' + tech_input['volume_fraction_2'].astype(
'str') + '-' + tech_input['volume_fraction_3'].astype('str')
tech_input = tech_input[
['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
# adhoc changes by tech, table restructure
tech_input = tech_input.reset_index(
drop=True).reset_index().rename(columns={'index': 'id'})
tech_input[['volume_fraction_1', 'volume_fraction_2',
'volume_fraction_3']] = tech_input[
'volume_fraction'].str.split('-', 3, expand=True)
tech_input.loc[tech_input['request_type'] == 'AS/MS',
'request_type'] = 'manual-short/auto-short'
tech_input.loc[tech_input['request_type'] ==
'PR', 'request_type'] = 'patient-request'
tech_input.loc[tech_input['request_type'] ==
'ALL', 'request_type'] = 'all'
volume_fraction_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['volume_fraction_1',
'volume_fraction_2',
'volume_fraction_3']).sort_values(
by='id')
distributor_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['final_dist_1',
'final_dist_2',
'final_dist_3']).sort_values(
by='id').rename(columns={'value': 'distributor_id'})
distributor_ranking_rule_values = pd.merge(distributor_melt,
volume_fraction_melt,
left_index=True,
right_index=True,
suffixes=('', '_y'))
distributor_ranking_rule_values = distributor_ranking_rule_values[
['id', 'distributor_id', 'value']].rename(
columns={'id': 'distributor_ranking_rule_id'}).reset_index(
drop=True)
distributor_ranking_rule_values = distributor_ranking_rule_values.reset_index().rename(
columns={'index': 'id'})
# drop null values in distributor_id(for cases where historical distributors are < 3)
distributor_ranking_rule_values = distributor_ranking_rule_values[
~distributor_ranking_rule_values['distributor_id'].isna()]
# convert distributor_id in int format
distributor_ranking_rule_values['distributor_id'] = \
distributor_ranking_rule_values['distributor_id'].astype(int)
distributor_ranking_rules = tech_input[['id', 'drug_id', 'dc_id',
'franchisee_id', 'store_id',
'drug_type', 'request_type']]
return distributor_ranking_rules, distributor_ranking_rule_values | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/tech_processing.py | tech_processing.py |
import pandas as pd
import numpy as np
def preprocess_features_dc(df_features, df_dc_distributors_mapping,
df_distributor_drugs):
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where mrp is 0 otherwise margin becomes infinity
df_features = df_features[df_features['mrp'] != 0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
# if no dc information is present then remove those cases.
df_features = df_features[((~df_features['partial_dc_id'].isna()) | (
~df_features['forward_dc_id'].isna()))]
# for those cases where invoice doesn't exist, take invoice dc as obtained from sdm table
df_features['partial_dc_id'] = df_features['partial_dc_id'].fillna(
df_features['forward_dc_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs not part of distributor portfolio
df_features = pd.merge(df_features, df_distributor_drugs,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
# filter out distributors not part of active dc-distributor mapping
df_features = pd.merge(df_features, df_dc_distributors_mapping,
on=['partial_dc_id', 'partial_distributor_id'],
how='inner', validate='many_to_one')
return df_features
def preprocess_features_franchisee(df_features, df_distributor_drugs,
db, read_schema):
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where mrp is 0 otherwise margin becomes infinity
df_features = df_features[df_features['mrp'] != 0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs not part of distributor portfolio
df_features = pd.merge(df_features, df_distributor_drugs,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
return df_features | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/preprocess_features.py | preprocess_features.py |
import pandas as pd
import numpy as np
import math
from zeno_etl_libs.utils.distributor_ranking2.correction_flag import add_corr_flag
def calc_ranks_dc(features, weights_dc_drug_lvl, weights_dc_type_lvl, logger):
# =========================== DRUG LEVEL RANKING ==========================
logger.info("DC-drug level ranking starts")
# select only relevant columns required for ranking
rank_drug_lvl = features[
['partial_dc_id', 'partial_distributor_id', 'drug_id', 'margin',
'wtd_ff', 'dist_type_portfolio_size', 'partial_distributor_credit_period',
'request_volume_dc_dist']]
# set significant digits for features with decimal points
rank_drug_lvl["margin"] = np.round(rank_drug_lvl["margin"], 3)
rank_drug_lvl["wtd_ff"] = np.round(rank_drug_lvl["wtd_ff"], 3)
rank_drug_lvl["request_volume_dc_dist"] = np.round(
rank_drug_lvl["request_volume_dc_dist"], 3)
# rank each features
rank_drug_lvl["rank_margin"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])['margin'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_ff"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_lvl["rank_dc_dist_credit_period"] = \
rank_drug_lvl.groupby(['partial_dc_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_lvl['rank_dc_dist_volume'] = features.groupby(['partial_dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin & ff
rank_drug_lvl["wtd_rank"] = (rank_drug_lvl["rank_margin"] *
weights_dc_drug_lvl["margin"]) + \
(rank_drug_lvl["rank_ff"] *
weights_dc_drug_lvl["ff"])
rank_drug_lvl["wtd_rank"] = np.round(rank_drug_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["partial_dc_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume",
"rank_dist_type_portfolio_size"]
sort_asc_order = group_col_sort_asc_order + [True, True, True, True]
rank_drug_lvl = rank_drug_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_lvl['index'] = rank_drug_lvl.index
# final ranking based on preference order
rank_drug_lvl["final_rank"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])['index'].rank(
method='first', ascending=True)
rank_drug_lvl.drop('index', axis=1, inplace=True)
# ========================== D.TYPE LEVEL RANKING =========================
logger.info("DC-drug-type level ranking starts")
# select only relevant columns required for ranking
rank_drug_type_lvl = features[
['partial_dc_id', 'partial_distributor_id', 'drug_id', 'drug_type',
'margin', 'wtd_ff', 'dist_type_portfolio_size',
'partial_distributor_credit_period', 'request_volume_dc_dist']]
# group by dc-distributor-drug_type level and calculate features
rank_drug_type_lvl = rank_drug_type_lvl.groupby(
["partial_dc_id", "partial_distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"dist_type_portfolio_size": "first",
"partial_distributor_credit_period": "first",
"request_volume_dc_dist": "first"})
# round features to 3 significant digits
rank_drug_type_lvl["margin"] = np.round(rank_drug_type_lvl["margin"], 3)
rank_drug_type_lvl["wtd_ff"] = np.round(rank_drug_type_lvl["wtd_ff"], 3)
rank_drug_type_lvl["request_volume_dc_dist"] = np.round(
rank_drug_type_lvl["request_volume_dc_dist"], 3)
# rank each features
rank_drug_type_lvl["rank_margin"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])['margin'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_ff"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_type_lvl["rank_dc_dist_credit_period"] = \
rank_drug_type_lvl.groupby(['partial_dc_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_type_lvl['rank_dc_dist_volume'] = \
rank_drug_type_lvl.groupby(['partial_dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin, ff & portfolio size
rank_drug_type_lvl["wtd_rank"] = (rank_drug_type_lvl["rank_margin"] *
weights_dc_type_lvl["margin"]) + \
(rank_drug_type_lvl["rank_ff"] *
weights_dc_type_lvl["ff"]) + \
(rank_drug_type_lvl["rank_dist_type_portfolio_size"] *
weights_dc_type_lvl["portfolio_size"])
rank_drug_type_lvl["wtd_rank"] = np.round(rank_drug_type_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["partial_dc_id", "drug_type"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
rank_drug_type_lvl = rank_drug_type_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_type_lvl['index'] = rank_drug_type_lvl.index
# final ranking based on preference order
rank_drug_type_lvl["final_rank"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])['index'].rank(
method='first', ascending=True)
rank_drug_type_lvl.drop('index', axis=1, inplace=True)
# ================== DISQUALIFY POOR DC-DRUG-DISTRIBUTORS =================
# For cases where a poor distributor in terms of wtd.ff and ff_requests
# comes in rank 3 or higher => disqualify it. As a result the rank 3 slot
# becomes vacant for the slot filling logic to assign another distributor
# which will get a chance to fulfill the order. If the assigned distributor
# performs good it will be better ranked in subsequent resets, else it will
# also get disqualified in similar way in later resets. This will keep the
# cycle to constantly look for better distributors. Else it might get locked
# in a cycle of ranking the same poor distributor over and over again.
disq_entries = rank_drug_lvl.merge(
features[["partial_dc_id", "partial_distributor_id", "drug_id", "ff_requests"]],
on=["partial_dc_id", "partial_distributor_id", "drug_id"], how="left")
# disqualify condition
disq_entries["disqualify"] = np.where(
(disq_entries["final_rank"] >= 3) &
((disq_entries["ff_requests"] == 0) | (disq_entries["wtd_ff"] < 0.4)),
1, 0)
disq_entries = disq_entries.loc[(disq_entries["disqualify"] == 1)]
disq_entries = disq_entries[["partial_dc_id", "partial_distributor_id",
"drug_id", "disqualify"]]
return rank_drug_lvl, rank_drug_type_lvl, disq_entries
def get_final_ranks_dc(rank_drug_lvl, rank_drug_type_lvl, disq_entries,
features, df_distributor_drugs, df_distributors,
df_dc_distributors_mapping, weights_dc_drug_lvl, logger):
"""
get final ranking format and apply slot filling logic to rank slots
which are empty.
"""
final_ranks = rank_drug_lvl[["partial_dc_id", "drug_id"]].drop_duplicates()
final_ranks = final_ranks.merge(
features[["drug_id", "drug_type"]].drop_duplicates(), on="drug_id",
how="left")
# remove disqualified entries
rank_drug_lvl = rank_drug_lvl.merge(
disq_entries, on=["partial_dc_id", "partial_distributor_id", "drug_id"],
how="left")
rank_drug_lvl = rank_drug_lvl.loc[rank_drug_lvl["disqualify"] != 1]
logger.info("Creating final df format")
# make final ranking df
for rank in [1, 2, 3]:
df_rank = rank_drug_lvl.loc[rank_drug_lvl["final_rank"] == rank]
df_rank = df_rank[
["partial_dc_id", "drug_id", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["partial_dc_id", "drug_id"],
how="left")
final_ranks[f"distributor_rank_{rank}"] = final_ranks[
f"distributor_rank_{rank}"].astype(float)
# ================== FILL MISSING RANK SLOTS DC-DRUG LVL ==================
# get all dc-drug with missing slots
logger.info("Get allowable dc-drug-distributors to fill slots")
missing_rank_dc_drugs = final_ranks.loc[
(final_ranks["distributor_rank_2"].isna()) | (final_ranks["distributor_rank_3"].isna())]
missing_rank_dc_drugs = missing_rank_dc_drugs[["partial_dc_id", "drug_id", "drug_type"]]
# list all missing drugs
list_missing_rank_drugs = list(missing_rank_dc_drugs["drug_id"].unique())
# get all distributors with missing drugs in their portfolio
select_distributor_drugs = df_distributor_drugs.loc[
df_distributor_drugs["drug_id"].isin(list_missing_rank_drugs)]
# assign it to all dc
available_mappings = missing_rank_dc_drugs.merge(select_distributor_drugs,
on="drug_id", how="left")
# merge distributor details
available_mappings = available_mappings.merge(
df_distributors[["partial_distributor_id", "partial_distributor_credit_period"]].drop_duplicates(),
on="partial_distributor_id", how="left")
# calculate features on drug_type level for dc-distributors (margin & ff)
distributor_type_lvl_features = features.groupby(
["partial_dc_id", "partial_distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"request_volume_dc_dist": "first"})
available_mappings = available_mappings.merge(
distributor_type_lvl_features, on=["partial_dc_id",
"partial_distributor_id",
"drug_type"], how="left")
# fill na and set significant digits
available_mappings["margin"] = available_mappings["margin"].fillna(0)
available_mappings["wtd_ff"] = available_mappings["wtd_ff"].fillna(0)
available_mappings["request_volume_dc_dist"] = available_mappings[
"request_volume_dc_dist"].fillna(0)
available_mappings["margin"] = np.round(available_mappings["margin"], 3)
available_mappings["wtd_ff"] = np.round(available_mappings["wtd_ff"], 3)
available_mappings["request_volume_dc_dist"] = np.round(
available_mappings["request_volume_dc_dist"], 3)
# remove inactive dc-distributors
available_mappings = available_mappings.merge(
df_dc_distributors_mapping, on=["partial_dc_id", "partial_distributor_id"],
how="inner")
# remove disqualified entries
available_mappings = available_mappings.merge(
disq_entries, on=["partial_dc_id", "partial_distributor_id", "drug_id"],
how="left")
available_mappings = available_mappings.loc[available_mappings["disqualify"] != 1]
# ranking distributors based on dc-drug level logic
logger.info("Ranking allowable dc-drug-distributors")
available_mapping_ranked = available_mappings.copy()
available_mapping_ranked["rank_margin"] = \
available_mapping_ranked.groupby(['partial_dc_id', 'drug_id'])[
'margin'].rank(method='dense', ascending=False)
available_mapping_ranked["rank_ff"] = \
available_mapping_ranked.groupby(['partial_dc_id', 'drug_id'])[
'wtd_ff'].rank(method='dense', ascending=False)
available_mapping_ranked["rank_dc_dist_credit_period"] = \
available_mapping_ranked.groupby(['partial_dc_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
available_mapping_ranked['rank_dc_dist_volume'] = \
available_mapping_ranked.groupby(['partial_dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# calculate wtd.ranks
available_mapping_ranked["wtd_rank"] = (available_mapping_ranked["rank_margin"] *
weights_dc_drug_lvl["margin"]) + \
(available_mapping_ranked["rank_ff"] *
weights_dc_drug_lvl["ff"])
available_mapping_ranked["wtd_rank"] = np.round(
available_mapping_ranked["wtd_rank"], 1)
# set sorting order
group_cols = ["partial_dc_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
available_mapping_ranked = available_mapping_ranked.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
available_mapping_ranked['index'] = available_mapping_ranked.index
# get final ranks
available_mapping_ranked["final_rank"] = \
available_mapping_ranked.groupby(['partial_dc_id', 'drug_id'])[
'index'].rank(method='first', ascending=True)
available_mapping_ranked.drop('index', axis=1, inplace=True)
pre_corr = final_ranks.copy() # to compare pre-post correction
# adding auxiliary ranking to empty slot dc-drugs
logger.info("Filling empty rank slots with ranked distributors")
for rank in [1, 2, 3]:
df_rank = available_mapping_ranked.loc[
available_mapping_ranked["final_rank"] == rank]
df_rank = df_rank[
["partial_dc_id", "drug_id", "partial_distributor_id"]]
df_rank.rename(
{"partial_distributor_id": f"aux_distributor_rank_{rank}"}, axis=1,
inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["partial_dc_id", "drug_id"],
how="left")
final_ranks[f"aux_distributor_rank_{rank}"] = final_ranks[
f"aux_distributor_rank_{rank}"].astype(float)
for index, row in final_ranks.iterrows():
# if rank 2 empty and aux_rank present
if math.isnan(row["distributor_rank_2"]) & \
(not math.isnan(row["aux_distributor_rank_1"])):
if row["aux_distributor_rank_1"] != row["distributor_rank_1"]:
final_ranks.loc[index, "distributor_rank_2"] = row[
"aux_distributor_rank_1"]
elif not math.isnan(row["aux_distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_2"] = row[
"aux_distributor_rank_2"]
for index, row in final_ranks.iterrows():
# if rank 1 & 2 filled, rank 3 empty and aux_ranks present
if (not math.isnan(row["distributor_rank_1"])) & \
(not math.isnan(row["distributor_rank_2"])) & \
(math.isnan(row["distributor_rank_3"])):
if (not math.isnan(row["aux_distributor_rank_1"])) & \
(row["aux_distributor_rank_1"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_1"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_1"]
elif (not math.isnan(row["aux_distributor_rank_2"])) & \
(row["aux_distributor_rank_2"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_2"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_2"]
elif (not math.isnan(row["aux_distributor_rank_3"])) & \
(row["aux_distributor_rank_3"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_3"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_3"]
final_ranks = final_ranks.drop(
["aux_distributor_rank_1", "aux_distributor_rank_2",
"aux_distributor_rank_3"], axis=1)
post_corr = final_ranks.copy() # to compare pre-post correction
# add correction flags where rank2 & rank3 slot filling took place
logger.info("Adding correction flags for filled rank slots")
final_ranks = add_corr_flag(final_ranks, pre_corr, post_corr,
col_to_compare="distributor_rank_2",
corr_flag="R2F",
group_cols=["partial_dc_id", "drug_id"])
final_ranks = add_corr_flag(final_ranks, pre_corr, post_corr,
col_to_compare="distributor_rank_3",
corr_flag="R3F",
group_cols=["partial_dc_id", "drug_id"])
# ================== COMBINE DC-DRUG LVL & DC-TYPE LVL ===================
# add dc-drug-type level ranking
logger.info("Adding dc-drug-type level ranking to final df")
final_ranks_type_lvl = rank_drug_type_lvl[
["partial_dc_id", "drug_type"]].drop_duplicates()
# create dc-type level final ranking format
for rank in [1, 2, 3]:
df_rank = rank_drug_type_lvl.loc[
rank_drug_type_lvl["final_rank"] == rank]
df_rank = df_rank[
["partial_dc_id", "drug_type", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks_type_lvl = final_ranks_type_lvl.merge(df_rank,
on=["partial_dc_id",
"drug_type"],
how="left")
final_ranks_type_lvl[f"distributor_rank_{rank}"] = final_ranks_type_lvl[
f"distributor_rank_{rank}"].astype(float)
# combine dc-drug lvl and dc-drug-type lvl
final_ranks = pd.concat([final_ranks, final_ranks_type_lvl], axis=0)
final_ranks["correction_flags"] = final_ranks["correction_flags"].fillna("")
return final_ranks
def calc_ranks_franchisee(features, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl, logger):
# =========================== DRUG LEVEL RANKING ==========================
logger.info("Franchisee-store-drug level ranking starts")
# select only relevant columns required for ranking
rank_drug_lvl = features[
['store_id', 'partial_distributor_id', 'drug_id', 'margin',
'wtd_ff', 'dist_type_portfolio_size', 'partial_distributor_credit_period',
'request_volume_store_dist']]
# set significant digits for features with decimal points
rank_drug_lvl["margin"] = np.round(rank_drug_lvl["margin"], 3)
rank_drug_lvl["wtd_ff"] = np.round(rank_drug_lvl["wtd_ff"], 3)
rank_drug_lvl["request_volume_store_dist"] = np.round(
rank_drug_lvl["request_volume_store_dist"], 3)
# rank each features
rank_drug_lvl["rank_margin"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['margin'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_ff"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_lvl["rank_store_dist_credit_period"] = \
rank_drug_lvl.groupby(['store_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_lvl['rank_store_dist_volume'] = features.groupby(['store_id'])[
'request_volume_store_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin & ff
rank_drug_lvl["wtd_rank"] = (rank_drug_lvl["rank_margin"] *
weights_franchisee_drug_lvl["margin"]) + \
(rank_drug_lvl["rank_ff"] *
weights_franchisee_drug_lvl["ff"])
rank_drug_lvl["wtd_rank"] = np.round(rank_drug_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["store_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_store_dist_credit_period",
"rank_store_dist_volume",
"rank_dist_type_portfolio_size"]
sort_asc_order = group_col_sort_asc_order + [True, True, True, True]
rank_drug_lvl = rank_drug_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_lvl['index'] = rank_drug_lvl.index
# final ranking based on preference order
rank_drug_lvl["final_rank"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['index'].rank(
method='first', ascending=True)
rank_drug_lvl.drop('index', axis=1, inplace=True)
# ========================== D.TYPE LEVEL RANKING =========================
logger.info("Franchisee-drug-type level ranking starts")
# select only relevant columns required for ranking
rank_drug_type_lvl = features[
['store_id', 'partial_distributor_id', 'drug_id', 'drug_type',
'margin', 'wtd_ff', 'dist_type_portfolio_size',
'partial_distributor_credit_period', 'request_volume_store_dist']]
# group by dc-distributor-drug_type level and calculate features
rank_drug_type_lvl = rank_drug_type_lvl.groupby(
["store_id", "partial_distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"dist_type_portfolio_size": "first",
"partial_distributor_credit_period": "first",
"request_volume_store_dist": "first"})
# round features to 3 significant digits
rank_drug_type_lvl["margin"] = np.round(rank_drug_type_lvl["margin"], 3)
rank_drug_type_lvl["wtd_ff"] = np.round(rank_drug_type_lvl["wtd_ff"], 3)
rank_drug_type_lvl["request_volume_store_dist"] = np.round(
rank_drug_type_lvl["request_volume_store_dist"], 3)
# rank each features
rank_drug_type_lvl["rank_margin"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['margin'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_ff"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_type_lvl["rank_store_dist_credit_period"] = \
rank_drug_type_lvl.groupby(['store_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_type_lvl['rank_store_dist_volume'] = \
rank_drug_type_lvl.groupby(['store_id'])[
'request_volume_store_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin, ff & portfolio size
rank_drug_type_lvl["wtd_rank"] = (rank_drug_type_lvl["rank_margin"] *
weights_franchisee_type_lvl["margin"]) + \
(rank_drug_type_lvl["rank_ff"] *
weights_franchisee_type_lvl["ff"]) + \
(rank_drug_type_lvl["rank_dist_type_portfolio_size"] *
weights_franchisee_type_lvl["portfolio_size"])
rank_drug_type_lvl["wtd_rank"] = np.round(rank_drug_type_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["store_id", "drug_type"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_store_dist_credit_period",
"rank_store_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
rank_drug_type_lvl = rank_drug_type_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_type_lvl['index'] = rank_drug_type_lvl.index
# final ranking based on preference order
rank_drug_type_lvl["final_rank"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['index'].rank(
method='first', ascending=True)
rank_drug_type_lvl.drop('index', axis=1, inplace=True)
return rank_drug_lvl, rank_drug_type_lvl
def get_final_ranks_franchisee(rank_drug_lvl, rank_drug_type_lvl, features,
logger):
"""
get final ranking format. no slot filling logic for franchisee stores.
"""
final_ranks = rank_drug_lvl[["store_id", "drug_id"]].drop_duplicates()
final_ranks = final_ranks.merge(
features[["drug_id", "drug_type"]].drop_duplicates(), on="drug_id",
how="left")
logger.info("Creating final df format")
# make final ranking df
for rank in [1, 2, 3]:
df_rank = rank_drug_lvl.loc[rank_drug_lvl["final_rank"] == rank]
df_rank = df_rank[
["store_id", "drug_id", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["store_id", "drug_id"],
how="left")
final_ranks[f"distributor_rank_{rank}"] = final_ranks[
f"distributor_rank_{rank}"].astype(float)
# add franchisee-store-drug-type level ranking
logger.info("Adding franchisee-store-drug-typ level ranking to final df")
final_ranks_type_lvl = rank_drug_type_lvl[
["store_id", "drug_type"]].drop_duplicates()
# create store-type level final ranking format
for rank in [1, 2, 3]:
df_rank = rank_drug_type_lvl.loc[
rank_drug_type_lvl["final_rank"] == rank]
df_rank = df_rank[
["store_id", "drug_type", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks_type_lvl = final_ranks_type_lvl.merge(df_rank,
on=["store_id",
"drug_type"],
how="left")
final_ranks_type_lvl[f"distributor_rank_{rank}"] = final_ranks_type_lvl[
f"distributor_rank_{rank}"].astype(float)
# combine store-drug lvl and store-drug-type lvl
final_ranks = pd.concat([final_ranks, final_ranks_type_lvl], axis=0)
return final_ranks | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/calculate_ranks.py | calculate_ranks.py |
import pandas as pd
import numpy as np
def post_process_ranking_dc(features, rank_drug_lvl, rank_drug_type_lvl,
final_ranks, weights_dc_drug_lvl, weights_dc_type_lvl):
# add drug_id dummy column in type lvl
rank_drug_type_lvl["drug_id"] = np.nan
# add weights column
rank_drug_type_lvl["weights"] = str(weights_dc_type_lvl)
rank_drug_lvl["weights"] = str(weights_dc_drug_lvl)
# additional details to be added
drugs_info = features[["drug_id", "drug_type", "drug_name"]].drop_duplicates()
dc_info = features[["partial_dc_id", "dc_name"]].drop_duplicates()
distributor_info = features[["partial_distributor_id", "partial_distributor_name"]].drop_duplicates()
# adding details into drug_lvl_df
rank_drug_lvl = rank_drug_lvl.merge(drugs_info, on="drug_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(dc_info, on="partial_dc_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# adding details into drug_type_lvl_df
rank_drug_type_lvl = rank_drug_type_lvl.merge(dc_info,
on="partial_dc_id", how="left")
rank_drug_type_lvl = rank_drug_type_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# combine drug_lvl and drug_type_lvl df
ranked_features = pd.concat([rank_drug_lvl, rank_drug_type_lvl], axis=0)
# add details into final_ranks df
final_ranks = final_ranks.merge(dc_info, on="partial_dc_id", how="left")
final_ranks = final_ranks.merge(drugs_info[["drug_id", "drug_name"]], on="drug_id", how="left")
# add columns for franchisee rank addition because
# both dc & franchisee features/ranks needs to be written to same table.
final_ranks["franchisee_id"] = 1 # zippin id
final_ranks["store_id"] = np.nan
final_ranks["store_name"] = ""
ranked_features["franchisee_id"] = 1 # zippin id
ranked_features["store_id"] = np.nan
ranked_features["store_name"] = ""
ranked_features["request_volume_store_dist"] = np.nan
ranked_features["rank_store_dist_credit_period"] = np.nan
ranked_features["rank_store_dist_volume"] = np.nan
return final_ranks, ranked_features
def post_process_ranking_franchisee(features, rank_drug_lvl, rank_drug_type_lvl,
final_ranks, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl):
# add drug_id dummy column in type lvl
rank_drug_type_lvl["drug_id"] = np.nan
# add weights column
rank_drug_type_lvl["weights"] = str(weights_franchisee_type_lvl)
rank_drug_lvl["weights"] = str(weights_franchisee_drug_lvl)
# additional details to be added
drugs_info = features[["drug_id", "drug_type", "drug_name"]].drop_duplicates()
store_info = features[["store_id", "store_name", "franchisee_id"]].drop_duplicates()
distributor_info = features[["partial_distributor_id",
"partial_distributor_name"]].drop_duplicates()
# adding details into drug_lvl_df
rank_drug_lvl = rank_drug_lvl.merge(drugs_info, on="drug_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(store_info, on="store_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# adding details into drug_type_lvl_df
rank_drug_type_lvl = rank_drug_type_lvl.merge(store_info,
on="store_id",
how="left")
rank_drug_type_lvl = rank_drug_type_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# combine drug_lvl and drug_type_lvl df
ranked_features = pd.concat([rank_drug_lvl, rank_drug_type_lvl], axis=0)
# add details into final_ranks df
final_ranks = final_ranks.merge(store_info, on="store_id", how="left")
final_ranks = final_ranks.merge(drugs_info[["drug_id", "drug_name"]],
on="drug_id", how="left")
# add columns for dc rank addition because
# both dc & franchisee features/ranks needs to be written to same table.
final_ranks["partial_dc_id"] = np.nan
final_ranks["dc_name"] = ""
final_ranks["correction_flags"] = ""
ranked_features["partial_dc_id"] = np.nan
ranked_features["dc_name"] = ""
ranked_features["request_volume_dc_dist"] = np.nan
ranked_features["rank_dc_dist_credit_period"] = np.nan
ranked_features["rank_dc_dist_volume"] = np.nan
return final_ranks, ranked_features | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/post_process_ranking.py | post_process_ranking.py |
from functools import reduce
import numpy as np
import pandas as pd
import datetime as dt
def calculate_features(df_features, reset_date, time_interval, logger, group_cols):
"""
DC-LEVEL: group_cols=['partial_dc_id','partial_distributor_id', 'drug_id']
FRANCHISEE-LEVEL: group_cols=['store_id','partial_distributor_id', 'drug_id']
"""
dfx = df_features[df_features['invoice_count'] != 0]
# ========================== MARGIN CALCULATION ==========================
# (follows same logic as in distributor ranking 1.0)
logger.info("Calculating margin")
df_margin = dfx.copy()
df_margin['margin'] = (df_margin['mrp'] -
df_margin['distributor_rate']) / df_margin[
'mrp']
df_margin = df_margin.groupby(group_cols).agg(
margin=('margin', 'mean')).reset_index()
# sanity check
assert df_margin.shape[0] == dfx[group_cols].drop_duplicates().shape[0]
# ====================== WTD.FULFILLMENT CALCULATION ======================
logger.info("Calculating wtd.ff")
# get length of 3 period split
period_length = round(time_interval / 3)
# p1 : t-1 (latest period)
# p2 : t-2 period
# p3 : t-3 period
p1_end = pd.Timestamp(reset_date - dt.timedelta(6))
p1_start = p1_end - dt.timedelta(period_length)
p2_end = p1_start
p2_start = p2_end - dt.timedelta(period_length)
p3_end = p2_start
p3_start = p3_end - dt.timedelta(period_length + 1)
df_ff_1 = ff_calc(dfx, group_cols, p_start=p1_start, p_end=p1_end, period_flag="p1")
df_ff_2 = ff_calc(dfx, group_cols, p_start=p2_start, p_end=p2_end, period_flag="p2")
df_ff_3 = ff_calc(dfx, group_cols, p_start=p3_start, p_end=p3_end, period_flag="p3")
df_ff_comb = pd.concat([df_ff_1, df_ff_2, df_ff_3], axis=0)
# count cases where all 3 or 2 or 1 periods data present
df_ff_period_cnt = df_ff_comb.groupby(group_cols, as_index=False).agg(
{"period_flag": "count"})
df_ff_period_cnt.rename({"period_flag": "period_count"}, axis=1,
inplace=True)
# Cases with 3 periods present
# weighted by 0.5, 0.3, 0.2 for p1, p2, p3 respectively
df_3p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 3][
group_cols]
df_ff_comb_3p = df_ff_comb.merge(df_3p, on=group_cols, how="inner")
df_ff_comb_3p['period'] = np.tile(np.arange(1, 4), len(df_ff_comb_3p))[
:len(df_ff_comb_3p)]
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 1, 0.5, 0)
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 2, 0.3,
df_ff_comb_3p['weights'])
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 3, 0.2,
df_ff_comb_3p['weights'])
df_ff_comb_3p["wtd_ff"] = df_ff_comb_3p["ff"] * df_ff_comb_3p["weights"]
df_ff_comb_3p = df_ff_comb_3p.groupby(group_cols, as_index=False).agg(
{"wtd_ff": "sum"})
# Cases with 2 periods present
# weighted by 0.6, 0.4 for latest, early respectively
df_2p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 2][
group_cols]
df_ff_comb_2p = df_ff_comb.merge(df_2p, on=group_cols, how="inner")
df_ff_comb_2p['period'] = np.tile(np.arange(1, 3), len(df_ff_comb_2p))[
:len(df_ff_comb_2p)]
df_ff_comb_2p['weights'] = np.where(df_ff_comb_2p['period'] == 1, 0.6, 0)
df_ff_comb_2p['weights'] = np.where(df_ff_comb_2p['period'] == 2, 0.4,
df_ff_comb_2p['weights'])
df_ff_comb_2p["wtd_ff"] = df_ff_comb_2p["ff"] * df_ff_comb_2p["weights"]
df_ff_comb_2p = df_ff_comb_2p.groupby(group_cols, as_index=False).agg(
{"wtd_ff": "sum"})
# Cases with 1 period present
# weighted by 1 for whatever period present
df_1p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 1][
group_cols]
df_ff_comb_1p = df_ff_comb.merge(df_1p, on=group_cols, how="inner")
df_ff_comb_1p = df_ff_comb_1p[group_cols + ["ff"]]
df_ff_comb_1p.rename({"ff": "wtd_ff"}, axis=1, inplace=True)
# combine all
df_ff_comb = pd.concat([df_ff_comb_3p, df_ff_comb_2p, df_ff_comb_1p],
axis=0)
# ======================== DIST VOLUME CALCULATION ========================
if group_cols[0] == "partial_dc_id":
base_lvl = 'dc'
else:
base_lvl = 'store'
logger.info(f"Calculating {base_lvl}-distributor volume")
df_vol = df_features.groupby(group_cols).agg(
total_lost=('is_lost', 'sum'),
total_requests=('is_lost', 'count')).reset_index()
df_vol["ff_requests"] = df_vol["total_requests"] - df_vol["total_lost"]
df_vol["ff_requests"] = np.where(df_vol["ff_requests"] < 0, 0,
df_vol["ff_requests"])
df_vol.drop(["total_lost", "total_requests"], axis=1, inplace=True)
if base_lvl == 'dc':
# calculate request volume dc
request_volume_dc = df_vol.groupby("partial_dc_id", as_index=False).agg(
total_requests_dc=("ff_requests", "sum"))
request_volume_dc_dist = df_vol.groupby(
["partial_dc_id", "partial_distributor_id"], as_index=False).agg(
total_requests_dc_dist=("ff_requests", "sum"))
df_vol = df_vol.merge(request_volume_dc_dist,
on=["partial_dc_id", "partial_distributor_id"],
how="left")
df_vol = df_vol.merge(request_volume_dc, on="partial_dc_id", how="left")
df_vol["request_volume_dc_dist"] = df_vol["total_requests_dc_dist"] / \
df_vol["total_requests_dc"]
df_vol.drop(["total_requests_dc_dist", "total_requests_dc"], axis=1,
inplace=True)
else:
# calculate request volume store (franchisee)
request_volume_store = df_vol.groupby("store_id", as_index=False).agg(
total_requests_store=("ff_requests", "sum"))
request_volume_store_dist = df_vol.groupby(
["store_id", "partial_distributor_id"], as_index=False).agg(
total_requests_store_dist=("ff_requests", "sum"))
df_vol = df_vol.merge(request_volume_store_dist,
on=["store_id", "partial_distributor_id"],
how="left")
df_vol = df_vol.merge(request_volume_store, on="store_id", how="left")
df_vol["request_volume_store_dist"] = df_vol["total_requests_store_dist"] / \
df_vol["total_requests_store"]
df_vol.drop(["total_requests_store_dist", "total_requests_store"], axis=1,
inplace=True)
# =========================== COMPILE FEATURES ===========================
logger.info("Compiling all features")
meg_list = [df_margin, df_ff_comb, df_vol]
features = reduce(
lambda left, right: pd.merge(left, right,
on=group_cols,
how='outer'), meg_list)
# rounding off to 3 significant digits
features["margin"] = np.round(features["margin"], 3)
features["wtd_ff"] = np.round(features["wtd_ff"], 3)
features[f"request_volume_{base_lvl}_dist"] = np.round(
features[f"request_volume_{base_lvl}_dist"], 3)
return features
def ff_calc(dfx, group_cols, p_start=None, p_end=None, period_flag="None"):
"""
Base FF calculation same as in distributor ranking 1.0
"""
# split base data by period
dfx = dfx.loc[
(dfx["original_created_at"] > p_start) &
(dfx["original_created_at"] < p_end)]
df_sorted = dfx.groupby(['short_book_1_id'], as_index=False).apply(
lambda x: x.sort_values(by=['partial_invoiced_at']))
# for multiple invoices, calculate cumulative fulfilled quantities
df_sorted = df_sorted.groupby(['short_book_1_id']).apply(
lambda x: x['partial_quantity'].cumsum()).reset_index().rename(
columns={'partial_quantity': 'cum_partial_quantity'})
df_sorted = df_sorted.set_index('level_1')
df_fulfillment = pd.merge(df_sorted, dfx, left_index=True,
right_index=True, how='left', suffixes=('', '_y'))
assert df_fulfillment['short_book_1_id'].equals(
df_fulfillment['short_book_1_id_y'])
df_fulfillment = df_fulfillment[
['short_book_1_id'] + group_cols + ['original_order', 'partial_quantity',
'cum_partial_quantity']]
# cum required quantity is quantity left after subtracting cum quantity from all previous invoices.
df_fulfillment['cum_required_quantity'] = df_fulfillment['original_order'] - \
df_fulfillment['cum_partial_quantity']
# the real required quantity while placing an order is quantity
# unfulfilled by the previours invoice. Hence shifted by 1
df_fulfillment['actual_required'] = df_fulfillment.groupby(
['short_book_1_id']).shift(1)['cum_required_quantity']
# fill single invoices with the original order
df_fulfillment['actual_required'] = df_fulfillment['actual_required'].fillna(
df_fulfillment['original_order'])
# put actual required = 0 when ordered exceeds required.
df_fulfillment.loc[df_fulfillment['actual_required']
< 0, 'actual_required'] = 0
df_fulfillment['redundant_order_flag'] = np.where(
df_fulfillment['actual_required'] == 0, 1, 0)
df_fulfillment = df_fulfillment[['short_book_1_id'] + group_cols +
['original_order', 'partial_quantity',
'actual_required', 'redundant_order_flag']]
df_fulfillment['ff'] = df_fulfillment['partial_quantity'] / \
df_fulfillment['actual_required']
# for those quantities where nothing was required and still order placed, take them as 0.
df_fulfillment.loc[(df_fulfillment['actual_required'] == 0) & (
df_fulfillment['partial_quantity'] > 0), 'ff'] = 1
df_fulfillment.loc[(df_fulfillment['ff'] > 1), 'ff'] = 1
# removed redundant orders here.
df_ff = df_fulfillment[df_fulfillment['redundant_order_flag'] != 1].groupby(
group_cols).agg(ff=('ff', 'mean')).reset_index()
# add period_flag
df_ff["period_flag"] = period_flag
return df_ff | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/calculate_features.py | calculate_features.py |
Q_FEATURES = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."dc-id" as "partial-dc-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
inv."mrp",
d."drug-name",
d.type as "drug_type",
sdm."forward-dc-id",
s.name as "dc-name"
from
"{schema}"."short-book-1" sb
left join "{schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{schema}".invoices i on
sbi."invoice-id" = i.id
left join "{schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{schema}".drugs d on
sb."drug-id" = d.id
left join "{schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{schema}"."store-dc-mapping" sdm on
sb."store-id" = sdm."store-id"
and dis.type = sdm."drug-type"
left join "{schema}".stores s on
i."dc-id" = s.id
where
DATEDIFF(day, date(sb."created-at"), '{reset_date}') <= {time_interval}
and DATEDIFF(day, date(sb."created-at"), '{reset_date}') >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
and sb."store-id" in (2,4,7,16,54,82,231,234,244,278,297,23,28,39,216,218,235,229,280,8,13,21,26,31,45,188,208,221,222,230,241,260,264,20,36,61,134,160,184,195,215,224,226,245,252,273,281)
"""
# (2,4,7,16,54,82,231,234,244,278,297,23,28,39,216,218,235,229,280,8,13,21,26,31,45,188,208,221,222,230,241,260,264,20,36,61,134,160,184,195,215,224,226,245,252,273,281)
Q_FEATURES_FRANCHISEE = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."store-id",
ss."franchisee-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
inv."mrp",
d."drug-name",
d.type as "drug_type",
ss."name" as "store-name"
from
"{schema}"."short-book-1" sb
left join "{schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{schema}".invoices i on
sbi."invoice-id" = i.id
left join "{schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{schema}".drugs d on
sb."drug-id" = d.id
left join "{schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{schema}".stores s on
i."dc-id" = s.id
left join "{schema}".stores ss on
sb."store-id" = ss.id
where
DATEDIFF(day, date(sb."created-at"), '{reset_date}') <= {time_interval}
and DATEDIFF(day, date(sb."created-at"), '{reset_date}') >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
and ss."franchisee-id" != 1
{franchisee_stores_execute_query}
"""
Q_DISTRIBUTORS = """
select db.id as "partial-distributor-id",
db.name as "partial-distributor-name",
db."credit-period" as "partial-distributor-credit-period",
d."type" as "drug-type", count(distinct dd."drug-id") as "dist-type-portfolio-size"
from "{schema}".distributors db
left join "{schema}"."distributor-drugs" dd on db.id = dd."distributor-id"
left join "{schema}".drugs d on dd."drug-id" = d.id
group by "partial-distributor-id", "partial-distributor-name",
"partial-distributor-credit-period", "drug-type"
"""
Q_DC_DISTRIBUTOR_MAPPING = """
select "dc-id" as "partial-dc-id", "distributor-id" as "partial-distributor-id"
from "{schema}"."dc-distributor-mapping" ddm
where "is-active" = 1
group by "dc-id" , "distributor-id"
"""
Q_DISTRIBUTOR_DRUGS = """
select "distributor-id" as "partial-distributor-id" , "drug-id"
from "{schema}"."distributor-drugs" dd
group by "distributor-id" , "drug-id"
"""
def pull_data_dc(reset_date, time_interval, db, schema):
df_features = db.get_df(Q_FEATURES.format(
reset_date=reset_date, time_interval=time_interval, schema=schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(schema=schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
df_distributors = df_distributors.dropna()
df_distributors = df_distributors.loc[df_distributors["drug_type"] != '']
df_dc_distributors_mapping = db.get_df(Q_DC_DISTRIBUTOR_MAPPING.format(schema=schema))
df_dc_distributors_mapping.columns = [c.replace('-', '_') for c in
df_dc_distributors_mapping.columns]
df_distributor_drugs = db.get_df(Q_DISTRIBUTOR_DRUGS.format(schema=schema))
df_distributor_drugs.columns = [c.replace('-', '_') for c in
df_distributor_drugs.columns]
df_distributor_drugs.drop_duplicates(inplace=True)
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
df_features["mrp"] = df_features["mrp"].astype(float)
return df_features, df_distributors, df_dc_distributors_mapping, df_distributor_drugs
def pull_data_franchisee(reset_date, time_interval, franchisee_stores,
db, schema):
if franchisee_stores == [0]:
franchisee_stores_execute_query = ""
else:
franchisee_stores_execute_query = f"""
and sb."store-id" in {str(franchisee_stores).replace('[', '(').replace(']',')')}
"""
df_features = db.get_df(Q_FEATURES_FRANCHISEE.format(
reset_date=reset_date, time_interval=time_interval,
franchisee_stores_execute_query=franchisee_stores_execute_query,
schema=schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(schema=schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
df_distributors = df_distributors.dropna()
df_distributor_drugs = db.get_df(Q_DISTRIBUTOR_DRUGS.format(schema=schema))
df_distributor_drugs.columns = [c.replace('-', '_') for c in
df_distributor_drugs.columns]
df_distributor_drugs.drop_duplicates(inplace=True)
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
df_features["mrp"] = df_features["mrp"].astype(float)
return df_features, df_distributors, df_distributor_drugs | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/distributor_ranking2/pull_data.py | pull_data.py |
import numpy as np
# Global Queries
Q_REPEATABLE = """
SELECT
id AS "drug-id",
"is-repeatable"
FROM
"{schema}".drugs
WHERE
"is-repeatable" = 1
"""
Q_PTR = """
select
"drug-id",
AVG(ptr) as ptr
FROM
"{schema}"."inventory-1"
GROUP BY
"drug-id"
"""
Q_STORES = """
select
id as "store-id",
name as "store-name"
FROM
"{schema}".stores
"""
Q_DRUG_INFO = """
select
id as "drug-id",
"drug-name",
type,
category
FROM
"{schema}".drugs
"""
# Queries with parameters
def prep_data_from_sql(query_pass, db):
data_fetched = db.get_df(query_pass)
data_fetched.columns = [c.replace('-', '_') for c in data_fetched.columns]
return data_fetched
def query_drug_grade(store_id, schema):
query = """
SELECT
"drug-id",
"drug-grade"
FROM
"{schema}"."drug-order-info"
WHERE
"store-id" = {0}
""".format(store_id, schema=schema)
return query
def query_max_zero(store_id, schema):
query = """
SELECT
"store-id",
"drug-id"
FROM
"{schema}"."drug-order-info"
WHERE
"store-id" = {0}
and max = 0
""".format(store_id, schema=schema)
return query
def query_inventory(store_id, schema):
query = """
SELECT
"store-id",
"drug-id",
SUM(quantity) AS "current-inventory"
FROM
"{schema}"."inventory-1"
WHERE
"store-id" = {0}
GROUP BY
"store-id",
"drug-id"
""".format(store_id, schema=schema)
return query
def get_drug_info(store_id, db, schema):
# Inventory and PTR info for order value
# Also, drug-type and drug-grade
q_inv = query_inventory(store_id, schema)
data_inv = prep_data_from_sql(q_inv, db)
data_ptr = prep_data_from_sql(Q_PTR.format(schema=schema), db)
data_ptr["ptr"] = data_ptr["ptr"].astype(float)
data_drug_info = prep_data_from_sql(Q_DRUG_INFO.format(schema=schema), db)
q_drug_grade = query_drug_grade(store_id, schema)
data_drug_grade = prep_data_from_sql(q_drug_grade, db)
data_stores = prep_data_from_sql(Q_STORES.format(schema=schema), db)
return data_inv, data_ptr, data_drug_info, data_drug_grade, data_stores
def order_value_report(ss_drug_sales):
ss_drug_sales['to_order_quantity'] = np.where(
ss_drug_sales['current_inventory'] < ss_drug_sales['safety_stock'],
ss_drug_sales['max'] - ss_drug_sales['current_inventory'], 0
)
ss_drug_sales['to_order_value'] = (
ss_drug_sales['to_order_quantity'] * ss_drug_sales['ptr'])
order_value = ss_drug_sales.groupby(
['type', 'store_name', 'drug_grade']). \
agg({'to_order_quantity': 'sum', 'to_order_value': 'sum'}). \
reset_index()
return order_value | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/new_stores/helper_functions.py | helper_functions.py |
from datetime import datetime
from datetime import timedelta
from zeno_etl_libs.db.db import PostGre
from scipy.stats import norm
from zeno_etl_libs.utils.new_stores.new_store_stock_triggers import *
from zeno_etl_libs.utils.new_stores.helper_functions import *
def new_stores_ss_calc(store_id, run_date, db, schema, logger):
#####################################################
# Final function for new stores (1-3 months) safety stock
# Combines base and triggers algorithm
#####################################################
# Get demand
data_demand = get_demand(store_id, db, schema)
# Get lead time
data_lt, lt_store_mean, lt_store_std = get_lead_time(store_id, run_date)
# Service level - hardcoded
service_level = 0.95
z = norm.ppf(service_level)
#####################################################
# SS calculation - Base algo
#####################################################
data = ss_calc(data_demand, data_lt, lt_store_mean, lt_store_std, z, db, schema)
data['algo_type'] = 'base'
logger.info("Length Base algo data {}".format(len(data)))
# Max>0
data_forecast_pos = data[data['max'] > 0].copy()
logger.info("Length Base algo forecast positive - data {}".format(len(data_forecast_pos)))
#####################################################
# Triggers
#####################################################
# Put max==0 logic here, and pass those drug-ids, for given store
data_algo_max0 = data[data['max'] == 0][['drug_id']].drop_duplicates()
data_algo_max0_list = data_algo_max0['drug_id'].to_list()
logger.info("Max 0 drugs from base algo, length is {}".format(len(data_algo_max0_list)))
# But this is max0 from base algo, there maybe other max0 in drug-order-info
# Fetching them
# Formatted SQL queries
q_max0 = query_max_zero(store_id, schema)
data_doi_max0 = prep_data_from_sql(q_max0, db)
data_doi_max0 = data_doi_max0[['drug_id']].drop_duplicates()
logger.info("Max 0 drugs from mysql drug-order-info, length is {}".format(len(data_doi_max0)))
# Remove drugs for which forecast is already positive
data_forecast_pos_list = data_forecast_pos['drug_id'].drop_duplicates().to_list()
data_doi_max0_forecast0 = data_doi_max0[~data_doi_max0['drug_id'].isin(data_forecast_pos_list)]
logger.info("Max 0 drugs from mysql drug-order-info, after removing forecast positive,"
"length is {}".format(len(data_doi_max0_forecast0)))
# Append both and take unique
data_doi_max0_forecast0_append = data_doi_max0_forecast0[~data_doi_max0_forecast0['drug_id'].isin(
data_algo_max0_list)]
logger.info("Max 0 drugs from mysql drug-order-info, non overlapping with forecast 0, "
"length is {}".format(len(data_doi_max0_forecast0_append)))
max0_drugs_df = data_algo_max0.append(data_doi_max0_forecast0_append)
max0_drugs_df = max0_drugs_df.drop_duplicates(subset='drug_id')
logger.info("Final Max 0 drugs, length is {}".format(len(max0_drugs_df)))
triggers_data, triggers_summary, \
triggers_store_report = triggers_combined(store_id, run_date,
max0_drugs_df, db, schema)
triggers_data = triggers_data[['drug_id', 'min', 'safety_stock', 'max']]
triggers_data['algo_type'] = 'non_sales_triggers'
# Output to s3 bucket
# triggers_summary.to_csv(output_dir_path + f'triggers_summary_{store_id}_{run_date}.csv',
# index=False)
# triggers_store_report.to_csv(output_dir_path +
# f'triggers_store_report_{store_id}_{run_date}.csv', index=False)
logger.info("Length Triggers algo data raw {}".format(len(triggers_data)))
# Remove those that are already part of base algo and already max>0
drugs_base = data_forecast_pos['drug_id'].drop_duplicates().to_list()
# Overlapping
triggers_data_overlap = triggers_data[triggers_data['drug_id'].isin(drugs_base)]
logger.info("Length triggers algo data overlapping {}".format(len(triggers_data_overlap)))
triggers_data_append = triggers_data[~triggers_data['drug_id'].isin(drugs_base)]
logger.info("Length triggers algo data non-overlapping {}".format(len(triggers_data_append)))
# Append base algo, and triggers algo output
data_final = data_forecast_pos.append(triggers_data_append)
logger.info("Length data final {}".format(len(data_final)))
# Put store id
data_final['store_id'] = store_id
# Final schema
data_final = data_final[['store_id', 'drug_id', 'min', 'safety_stock', 'max', 'algo_type']]
return data_final
def get_demand(store_id, db, schema):
# sales query
q_sales = f"""
select "store-id", "drug-id", date("created-at") as "sales-date",
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" = {store_id}
group by "store-id", "drug-id", "sales-date"
"""
data_s = db.get_df(q_sales)
data_s.columns = [c.replace('-', '_') for c in data_s.columns]
data_s['sales_date'] = pd.to_datetime(data_s['sales_date'])
# cfr pr loss
q_cfr_pr = f"""
select "store-id", "drug-id",
"attributed-loss-date" as "sales-date",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "store-id" = {store_id}
and "drug-id" > 0
and "loss-quantity" > 0
group by "store-id", "drug-id", "attributed-loss-date"
"""
data_cfr_pr = db.get_df(q_cfr_pr)
data_cfr_pr["loss-quantity"] = data_cfr_pr["loss-quantity"].astype(float)
data_cfr_pr['sales-date'] = pd.to_datetime(data_cfr_pr['sales-date'])
data_cfr_pr.columns = [c.replace('-', '_') for c in data_cfr_pr.columns]
# Merge
merge_data = data_s.merge(data_cfr_pr, how='outer', on=['store_id', 'drug_id', 'sales_date'])
for i in ['net_sales_quantity', 'loss_quantity']:
merge_data[i] = merge_data[i].fillna(0)
merge_data['demand_quantity'] = merge_data['net_sales_quantity'] + merge_data['loss_quantity']
data_demand = merge_data.groupby(['drug_id', 'sales_date'])['demand_quantity'].sum().reset_index()
data_demand = data_demand.sort_values(by=['drug_id', 'sales_date'])
return data_demand
def get_lead_time(store_id, run_date):
# Shortbook is normally created after some delay, of actual trigger event
sb_creation_delay_ethical = 1
sb_creation_delay_other = 1
sb_creation_delay_generic = 2
# Fetch data last 'N' days
end_date = str(datetime.strptime(run_date, '%Y-%m-%d') - timedelta(7))
begin_date = str(datetime.strptime(run_date, '%Y-%m-%d') - timedelta(97))
# ==== TEMP READ FROM PG ====
pg = PostGre()
pg.open_connection()
# ===========================
lead_time_query = '''
select
store_id,
drug_id,
drug_type,
status,
created_at,
received_at
from
ops_fulfillment
where
request_type = 'Auto Short'
and store_id = {store_id}
and created_at <= '{end_date}'
and created_at >= '{begin_date}'
and status not in ('failed', 'deleted')
'''.format(
store_id=store_id, end_date=end_date, begin_date=begin_date)
lead_time = pd.read_sql_query(lead_time_query, pg.connection)
# Convert null received at, to true null
lead_time['created_at'] = pd.to_datetime(lead_time['created_at'])
lead_time['received_at'].replace({'0000-00-00 00:00:00': ''}, inplace=True)
lead_time['received_at'] = pd.to_datetime(lead_time['received_at'])
# Calculate lead time
lead_time['lead_time'] = (lead_time['received_at'] -
lead_time['created_at']).astype('timedelta64[h]') / 24
# Missing value impute
lead_time['lead_time'].fillna(7, inplace=True)
# Incorporate delay values
lead_time['lead_time'] = np.select(
[lead_time['drug_type'] == 'generic',
lead_time['drug_type'] == 'ethical'],
[lead_time['lead_time'] + sb_creation_delay_generic,
lead_time['lead_time'] + sb_creation_delay_ethical],
default=lead_time['lead_time'] + sb_creation_delay_other
)
# Store averages
lt_store_mean = round(lead_time.lead_time.mean(), 2)
lt_store_std = round(lead_time.lead_time.std(ddof=0), 2)
# Summarize at drug level
lt_drug = lead_time.groupby('drug_id'). \
agg({'lead_time': [np.mean, np.std]}).reset_index()
lt_drug.columns = ['drug_id', 'lead_time_mean', 'lead_time_std']
# Impute for std missing
lt_drug['lead_time_std'] = np.where(
lt_drug['lead_time_std'].isin([0, np.nan]),
lt_store_std, lt_drug['lead_time_std']
)
# ===== CLOSE PG =====
pg.close_connection()
# ====================
return lt_drug, lt_store_mean, lt_store_std
def ss_calc(data_demand, data_lt, lt_store_mean, lt_store_std, z, db, schema):
# Drug type restrictions if any
q_drugs = f"""
select
id as "drug-id", type
from "{schema}".drugs
"""
# where `type` in ('ethical','generic')
data_drugs = db.get_df(q_drugs)
data_drugs.columns = [c.replace('-', '_') for c in data_drugs.columns]
# Avg and standard deviation demand
data_demand_min_date = data_demand['sales_date'].min()
data_demand_max_date = data_demand['sales_date'].max()
# Create full demand list, across all calendar dates, drug_id level
drugs = data_demand[['drug_id']].drop_duplicates()
dates = pd.DataFrame({'sales_date': pd.date_range(data_demand_min_date, data_demand_max_date, freq='D')})
drugs['key'] = 0
dates['key'] = 0
drug_dates = drugs[['drug_id', 'key']].merge(dates, on='key', how='outer')[['drug_id', 'sales_date']]
data_demand_all = drug_dates.merge(data_demand, how='left', on=['drug_id', 'sales_date'])
data_demand_all['demand_quantity'] = data_demand_all['demand_quantity'].fillna(0)
# Merge with drugs master
data_demand_all = data_demand_all.merge(data_drugs, how='left', on='drug_id')
# Treat outliers
'''
data_demand_all['demand_quantity'] = np.where(data_demand_all['demand_quantity'] > 20,
np.log(data_demand_all['demand_quantity']),
data_demand_all['demand_quantity'])
'''
# Calculate demand mean and std
data_demand_all_mean_std = data_demand_all.groupby(['drug_id', 'type'])['demand_quantity'].agg(
['mean', 'std']).reset_index()
data_demand_all_mean_std = data_demand_all_mean_std.rename(columns={'mean': 'demand_mean',
'std': 'demand_std'})
# Merge with lead time mean and std
data = data_demand_all_mean_std.merge(data_lt, how='left', on='drug_id')
data['lead_time_mean'] = data['lead_time_mean'].fillna(lt_store_mean)
data['lead_time_std'] = data['lead_time_std'].fillna(lt_store_std)
# Safety stock calculation
data['safety_stock'] = np.round(z * np.sqrt(data['lead_time_mean'] * np.square(data['demand_std'])
+ np.square(data['demand_mean']) * np.square(data['lead_time_std'])))
data['reorder_point'] = np.round(data['lead_time_mean'] * data['demand_mean'] + data['safety_stock'])
# Keep 30days stock by default
data['order_upto_point'] = np.round(data['demand_mean'] * 30)
# Adjustment for ethical
data['order_upto_point'] = np.round(np.where(data['type'].isin(['ethical', 'high-value-ethical']),
data['order_upto_point'] * (1 / 2),
data['order_upto_point'] * (2 / 3)))
# Sanity check, order_upto_point (max) to be not less than reorder point
data['order_upto_point'] = np.round(np.where(data['order_upto_point'] < data['reorder_point'],
data['reorder_point'], data['order_upto_point']))
# Where re-order point is 1,2,3 and is same as order_upto_point (max) then do max = max+1
data['order_upto_point'] = np.round(np.where(((data['reorder_point'].isin([1, 2, 3])) &
(data['order_upto_point'] == data['reorder_point'])),
data['order_upto_point'] + 1, data['order_upto_point']))
# order-upto-point 1,2,3 corrections
# Source - ops/ipc/safety_stock
one_index = data[
data['order_upto_point'].isin([1])].index
data.loc[one_index, 'safety_stock'] = 0
data.loc[one_index, 'reorder_point'] = 1
data.loc[one_index, 'order_upto_point'] = 2
two_index = data[
data['order_upto_point'].isin([2])].index
data.loc[two_index, 'safety_stock'] = 0
data.loc[two_index, 'reorder_point'] = 1
data.loc[two_index, 'order_upto_point'] = 2
three_index = data[
data['order_upto_point'].isin([3])].index
data.loc[three_index, 'safety_stock'] = 1
data.loc[three_index, 'reorder_point'] = 2
data.loc[three_index, 'order_upto_point'] = 3
# Where re-order point is >=4 and is same as order_upto_point (max) then do max = 1.5*max
data['order_upto_point'] = np.round(np.where(((data['reorder_point'] >= 4) &
(data['order_upto_point'] == data['reorder_point'])),
data['order_upto_point'] * 1.5, data['order_upto_point']))
# Sanity check for max again
data['order_upto_point'] = np.round(np.where(data['order_upto_point'] < data['reorder_point'],
data['reorder_point'], data['order_upto_point']))
data = data.rename(columns={'safety_stock': 'min',
'reorder_point': 'safety_stock',
'order_upto_point': 'max'})
data = data[['drug_id', 'min', 'safety_stock', 'max']]
return data | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/new_stores/new_stores_ipc.py | new_stores_ipc.py |
from zeno_etl_libs.utils.new_stores.helper_functions import *
import pandas as pd
import numpy as np
def query_search(store_id, schema):
query = f"""
SELECT
id,
"store-id",
"drug-id",
"created-at"
FROM
"{schema}".searches
WHERE
"store-id" = {store_id}
"""
return query
def query_patient_request(store_id, schema):
query = f"""
SELECT
id,
"store-id",
"drug-id",
"quantity",
"created-at"
FROM
"{schema}"."short-book-1"
WHERE
"auto-short" = 0
and "auto-generated" = 0
and "store-id" = {store_id}
"""
return query
def query_manual_short(store_id, schema):
query = f"""
SELECT
id,
"store-id",
"drug-id",
"quantity",
"created-at"
FROM
"{schema}"."short-book-1"
WHERE
"auto-short" = 1
and "home-delivery" = 0
and "patient-id" != 4480
and "store-id" = {store_id}
"""
return query
def query_local_purchase(store_id, schema):
query = f"""
SELECT
i."store-id",
i."drug-id",
i."created-at",
ii."invoice-item-reference",
ii."actual-quantity" as quantity,
ii."net-value" as "lp-value"
FROM
"{schema}"."inventory-1" i
LEFT JOIN
"{schema}"."invoice-items-1" ii ON ii.id = i."invoice-item-id"
WHERE
i."store-id" = {store_id}
AND ii."invoice-item-reference" IS NULL
"""
return query
def query_stock_transfer(store_id, schema):
query = f"""
SELECT
a."source-store",
a."destination-store",
b."inventory-id",
c."drug-id",
b.quantity,
b."received-at"
FROM
"{schema}"."stock-transfers-1" a
INNER JOIN "{schema}"."stock-transfer-items-1" b
on a.id = b."transfer-id"
LEFT JOIN "{schema}"."inventory-1" c
on b."inventory-id" = c.id
WHERE
a."destination-store" = {store_id}
"""
return query
def triggers_combined(store_id, run_date, max0_drugs_df, db, schema):
#############################
# Main consolidated function, for triggers
#############################
# Get formatted SQL queries
q_search = query_search(store_id, schema)
q_pr = query_patient_request(store_id, schema)
q_ms = query_manual_short(store_id, schema)
q_lp = query_local_purchase(store_id, schema)
q_st = query_stock_transfer(store_id, schema)
# Data prep, using SQL
data_merge_c = data_prep_triggers(q_search, q_pr, q_ms, q_lp, q_st, run_date, db, schema)
# Augment with max0 info, current inventory info, ptr info
data_merge_c = data_augment_doi_inv(data_merge_c, store_id, max0_drugs_df, db, schema)
# Rule for which drugs to set max for
data_merge_c = make_keep_col(data_merge_c)
# Some extra filters and final df
# max_set_final is the final df at drug level
max_set_final, max_set_summary, max_set_f_store_summary = final_reset_sku(data_merge_c, db, schema)
return max_set_final, max_set_summary, max_set_f_store_summary
def pre_trigger_data_prep_c(data_pass, run_date):
data = data_pass.copy()
data['created_at'] = pd.to_datetime(data['created_at'])
data_merge = data.copy()
data_merge['day_diff_current'] = (data_merge['created_at'] - pd.to_datetime(run_date)).dt.days
# Last 84days
data_merge_before = data_merge[data_merge['day_diff_current'].between(-84, -1)].copy()
data_merge_before['trigger_date'] = data_merge_before['created_at'].dt.date
# Group, to calculate unique days of trigger, and trigger quantity
data_merge_before_grp = data_merge_before.groupby(['store_id',
'drug_id']).agg({'created_at': 'count',
'trigger_date': 'nunique',
'quantity': 'sum'}).reset_index()
# Rename columns
data_merge_before_grp = data_merge_before_grp.rename(columns={'created_at': 'times_trigger',
'trigger_date': 'days_trigger'})
# Change to integer
for i in ['drug_id', 'quantity']:
data_merge_before_grp[i] = data_merge_before_grp[i].astype(int)
return data_merge_before_grp
def data_prep_triggers(q_search, q_pr, q_ms, q_lp, q_st, run_date, db, schema):
########################################
# Search
########################################
data_search_c = prep_data_from_sql(q_search, db)
data_search_c['quantity'] = 1
data_search_grp_c = pre_trigger_data_prep_c(data_search_c, run_date)
data_search_grp_c = data_search_grp_c.rename(columns={'times_trigger': 'times_searched',
'days_trigger': 'days_searched',
'quantity': 'quantity_searched'})
########################################
# PR
########################################
data_pr_c = prep_data_from_sql(q_pr, db)
data_pr_grp_c = pre_trigger_data_prep_c(data_pr_c, run_date)
data_pr_grp_c = data_pr_grp_c.rename(columns={'times_trigger': 'times_pr',
'days_trigger': 'days_pr',
'quantity': 'quantity_pr'})
########################################
# MS
########################################
data_ms_c = prep_data_from_sql(q_ms, db)
data_ms_grp_c = pre_trigger_data_prep_c(data_ms_c, run_date)
data_ms_grp_c = data_ms_grp_c.rename(columns={'times_trigger': 'times_ms',
'days_trigger': 'days_ms',
'quantity': 'quantity_ms'})
########################################
# LP
########################################
data_lp_c = prep_data_from_sql(q_lp, db)
data_lp_grp_c = pre_trigger_data_prep_c(data_lp_c, run_date)
data_lp_grp_c = data_lp_grp_c.rename(columns={'times_trigger': 'times_lp',
'days_trigger': 'days_lp',
'quantity': 'quantity_lp'})
########################################
# Stock transfer
########################################
data_st_c = prep_data_from_sql(q_st, db)
data_st_c['received_at'] = pd.to_datetime(data_st_c['received_at'], errors='coerce')
data_st_c = data_st_c[~data_st_c['received_at'].isnull()]
# Exclude central stores from source-stores
data_st_c = data_st_c[~data_st_c['source_store'].isin([52, 60, 92, 111])]
data_st_c['store_id'] = data_st_c['destination_store']
data_st_c['created_at'] = data_st_c['received_at']
data_st_grp_c = pre_trigger_data_prep_c(data_st_c, run_date)
data_st_grp_c = data_st_grp_c.rename(columns={'times_trigger': 'times_st',
'days_trigger': 'days_st',
'quantity': 'quantity_st'})
########################################
# Merge all
########################################
data_merge_c = data_search_grp_c.merge(data_pr_grp_c, how='outer', on=['store_id', 'drug_id'])
data_merge_c = data_merge_c.merge(data_ms_grp_c, how='outer', on=['store_id', 'drug_id'])
data_merge_c = data_merge_c.merge(data_lp_grp_c, how='outer', on=['store_id', 'drug_id'])
data_merge_c = data_merge_c.merge(data_st_grp_c, how='outer', on=['store_id', 'drug_id'])
# Fill missing values with 0
data_merge_c = data_merge_c.fillna(0).astype(int)
# Binary columns, which will be used later
for i in ['times_searched', 'times_pr', 'times_ms', 'times_lp', 'times_st']:
data_merge_c[i + '_b'] = np.where(data_merge_c[i] > 0, 1, 0)
# Aggregate
data_merge_c['num_triggers'] = (data_merge_c['times_searched_b'] + data_merge_c['times_pr_b'] +
data_merge_c['times_ms_b'] + data_merge_c['times_lp_b'] +
data_merge_c['times_st_b'])
# Repeatable info merge
data_r = prep_data_from_sql(Q_REPEATABLE.format(schema=schema), db)
data_merge_c = data_merge_c.merge(data_r, how='left', on='drug_id')
data_merge_c['is_repeatable'] = data_merge_c['is_repeatable'].fillna(0)
# Columns about repeat event flags
for i in ['days_searched', 'days_pr', 'days_ms', 'days_lp', 'days_st']:
data_merge_c[i + '_r'] = np.where(data_merge_c[i] > 1, 1, 0)
# Number of repeat triggers sum
data_merge_c['num_repeat_triggers'] = (data_merge_c['days_searched_r'] + data_merge_c['days_pr_r'] +
data_merge_c['days_ms_r'] + data_merge_c['days_lp_r'] +
data_merge_c['days_st_r'])
# Number of non search triggers
data_merge_c['num_repeat_triggers_non_search'] = (data_merge_c['days_pr_r'] + data_merge_c['days_ms_r'] +
data_merge_c['days_lp_r'] + data_merge_c['days_st_r'])
return data_merge_c
def data_augment_doi_inv(data_pass, store_id, max0_drugs_df, db, schema):
# Formatted SQL queries
# q_max0 = query_max_zero(store_id)
q_inv = query_inventory(store_id, schema)
data_merge_c = data_pass.copy()
########################################
# Max0 drugs
########################################
# connection = current_config.mysql_conn()
# data_max0 = prep_data_from_sql(q_max0, connection)
# data_max0['max0'] = 1
# Take max0 from df passed
data_max0 = max0_drugs_df.copy()
data_max0['store_id'] = store_id
data_max0['max0'] = 1
########################################
# Current inventory
########################################
q_inv = query_inventory(store_id, schema=schema)
data_inv = prep_data_from_sql(q_inv, db)
data_inv['curr_inv0'] = np.where(data_inv['current_inventory'] == 0, 1, 0)
########################################
# PTR
########################################
# SQL
data_ptr = prep_data_from_sql(Q_PTR.format(schema=schema), db)
data_ptr["ptr"] = data_ptr["ptr"].astype(float)
# Merge Max info, and impute if not present
data_merge_c = data_merge_c.merge(data_max0, how='inner', on=['store_id', 'drug_id'])
data_merge_c['max0'] = data_merge_c['max0'].fillna(0)
# Merge inventory and impute if not present
data_merge_c = data_merge_c.merge(data_inv, how='left', on=['store_id', 'drug_id'])
data_merge_c['curr_inv0'] = data_merge_c['curr_inv0'].fillna(1)
# Merge PTR and impute an average value if null
data_merge_c = data_merge_c.merge(data_ptr, how='left', on=['drug_id'])
data_merge_c['ptr'] = data_merge_c['ptr'].fillna(67)
# Max0, inv0 both
data_merge_c['max0_inv0'] = data_merge_c['max0'] * data_merge_c['curr_inv0']
return data_merge_c
def make_keep_col(data_pass):
data = data_pass.copy()
# Rule is written here, for if we want to set max for a drug
data['keep'] = np.where(((data['num_triggers'] >= 4) |
((data['num_triggers'] == 3) & (data['num_repeat_triggers'] >= 1)) |
((data['num_triggers'] == 3) & (data['num_repeat_triggers'] == 0) & (
data['is_repeatable'] == 1)) |
((data['num_triggers'] == 2) & (data['num_repeat_triggers'] >= 2)) |
((data['num_triggers'] == 2) & (data['num_repeat_triggers'] == 1) & (
data['is_repeatable'] == 1)) |
((data['num_triggers'] == 2) & (data['num_repeat_triggers'] == 1) & (
data['num_repeat_triggers_non_search'] == 1)) |
((data['num_triggers'] == 1) & (data['num_repeat_triggers'] == 1) & (
data['is_repeatable'] == 1)) |
((data['num_triggers'] == 1) & (data['num_repeat_triggers'] == 1) & (
data['num_repeat_triggers_non_search'] == 1))
),
1, 0)
# Rounding off to 2 decimals
for i in ['max0', 'curr_inv0', 'max0_inv0']:
data[i] = np.round(data[i], 2)
# Columns for order information
data['sku'] = 1
data['keep_sku'] = (data['sku'] * data['keep'] * data['max0']).astype(int)
data['order_sku'] = (data['sku'] * data['keep'] * data['max0_inv0']).astype(int)
data['max_value'] = data['keep_sku'] * data['ptr']
data['order_value'] = data['order_sku'] * data['ptr']
return data
def final_reset_sku(data_pass, db, schema):
data_merge_c = data_pass.copy()
########################################
# Some hardcoded decisions, to control inventory rise
########################################
# Should be revisited later
max_set = data_merge_c[(data_merge_c['keep_sku'] == 1)].copy()
# Summary by triggers
max_set_summary = max_set.groupby(['num_triggers',
'num_repeat_triggers',
'num_repeat_triggers_non_search',
'is_repeatable']).agg({'drug_id': 'count',
'max0': 'mean',
'curr_inv0': 'mean',
'max0_inv0': 'mean'}).reset_index()
max_set_summary = max_set_summary.rename(columns={'drug_id': 'drugs'})
max_set_summary['is_repeatable'] = max_set_summary['is_repeatable'].astype('int')
max_set_summary = max_set_summary.sort_values(by=['num_triggers',
'num_repeat_triggers',
'is_repeatable',
'num_repeat_triggers_non_search'],
ascending=(False, False, False, False))
# Some high value ethical drugs, can increase order value
max_set_f1 = max_set[max_set['ptr'] <= 300].copy()
# Keep only 2+ triggers for now
max_set_f2 = max_set_f1[max_set_f1['num_triggers'] >= 2].copy()
# Stores info merge
# SQL
stores = prep_data_from_sql(Q_STORES.format(schema=schema), db)
max_set_f = max_set_f2.merge(stores, how='left', on='store_id')
# Order summary for store
max_set_f_store_summary = max_set_f.groupby(['store_id', 'store_name'])[
'keep_sku', 'order_sku', 'max_value', 'order_value'].sum().reset_index()
for i in ['max_value', 'order_value']:
max_set_f_store_summary[i] = np.round(max_set_f_store_summary[i], 0).astype(int)
# Min, SS, Max to be set as 0,0,1
# Can be revisited later if policy change or more confidence
max_set_final = max_set_f[['store_id', 'drug_id']].drop_duplicates()
max_set_final['min'] = 0
max_set_final['safety_stock'] = 0
max_set_final['max'] = 1
# 'max_set_final' is the final df, at drug level
# Rest data-frames are summary data-frames
return max_set_final, max_set_summary, max_set_f_store_summary | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/new_stores/new_store_stock_triggers.py | new_store_stock_triggers.py |
import numpy as np
import pandas as pd
def get_ga_composition_sku(db, schema, substition_type=['generic'], logger=None):
''' to get good aid sku and top sku '''
# Good Aid SKU list
ga_sku_query = """
select wh."drug-id" , d.composition
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where wh."add-wh" = 'Yes'
and d."company-id" = 6984
and d.type in {0}
""".format(str(substition_type).replace('[', '(').replace(']', ')'),
schema=schema)
ga_sku = db.get_df(ga_sku_query)
ga_sku.columns = [c.replace('-', '_') for c in ga_sku.columns]
logger.info('GoodAid SKU list ' + str(ga_sku.shape[0]))
# ga_sku_query = '''
# select drug_id, composition
# from good_aid_substitution_sku
# where start_date <= '{}'
# '''.format(current_date)
# pg_connection = current_config.data_science_postgresql_conn()
# ga_sku = pd.read_sql_query(ga_sku_query, pg_connection)
# pg_connection.close()
# logger.info('GoodAid SKU list ' + str(ga_sku.shape[0]))
# Generic Top SKU
ga_active_composition = tuple(ga_sku['composition'].values)
top_sku_query = """
select wh."drug-id" , d.composition
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where wh."add-wh" = 'Yes'
and d."company-id" != 6984
and d.type in {0}
and d.composition in {1}
""".format(str(substition_type).replace('[', '(').replace(']', ')'),
str(ga_active_composition), schema=schema)
top_sku = db.get_df(top_sku_query)
top_sku.columns = [c.replace('-', '_') for c in top_sku.columns]
logger.info('GoodAid comp Top SKU list ' + str(top_sku.shape[0]))
# ga_active_composition = tuple(ga_sku['composition'].values)
# top_sku_query = '''
# select drug_id, composition
# from good_aid_generic_sku
# where active_flag = 'YES'
# and composition in {}
# '''.format(str(ga_active_composition))
# pg_connection = current_config.data_science_postgresql_conn()
# top_sku = pd.read_sql_query(top_sku_query, pg_connection)
# pg_connection.close()
# logger.info('GoodAid comp Top SKU list ' + str(top_sku.shape[0]))
# SS substition for other drugs
rest_sku_query = """
select id as drug_id, composition
from "{schema}".drugs
where composition in {0}
and id not in {1}
and type in {2}
""".format(str(ga_active_composition),
str(tuple(top_sku['drug_id'].values)),
str(substition_type).replace('[', '(').replace(']', ')'),
schema=schema)
rest_sku = db.get_df(rest_sku_query)
logger.info('GoodAid comp rest SKU list ' + str(rest_sku.shape[0]))
return ga_sku, top_sku, rest_sku
def update_ga_ss(safety_stock_df, store_id, db, schema, ga_inv_weight=0.5,
rest_inv_weight=0, top_inv_weight=1,
substition_type=['generic'], min_column='safety_stock',
ss_column='reorder_point', max_column='order_upto_point',
logger=None):
'''updating safety stock for good aid '''
# good aid ss log
good_aid_ss_log = pd.DataFrame()
pre_max_qty = safety_stock_df[max_column].sum()
# get drug list
logger.info('Getting SKU list')
ga_sku, top_sku, rest_sku = get_ga_composition_sku(db, schema,
substition_type,
logger)
# get composition level ss numbers
logger.info('Aggregating composition level SS')
ga_composition = pd.concat([ga_sku, top_sku, rest_sku], axis=0)
columns_list = ['drug_id', 'composition',
min_column, ss_column, max_column]
ga_composition_ss = ga_composition.merge(
safety_stock_df, on='drug_id')[columns_list]
ga_composition_ss_agg = ga_composition_ss.groupby(
['composition'])[min_column, ss_column, max_column].sum(). \
reset_index()
# get index for different drug lists
rest_sku_index = safety_stock_df[
safety_stock_df['drug_id'].isin(rest_sku['drug_id'])].index
top_sku_index = safety_stock_df[
safety_stock_df['drug_id'].isin(top_sku['drug_id'])].index
ga_sku_index = safety_stock_df[
safety_stock_df['drug_id'].isin(ga_sku['drug_id'])].index
logger.info('Updating safety stock')
# logging rest SKU ss from algo
prev_rest_sku_ss = safety_stock_df.loc[rest_sku_index]. \
merge(rest_sku)[columns_list]
prev_rest_sku_ss['sku_type'] = 'rest generic'
good_aid_ss_log = good_aid_ss_log.append(prev_rest_sku_ss)
# setting rest SKU ss
safety_stock_df.loc[rest_sku_index, min_column] = np.round(
rest_inv_weight * safety_stock_df.loc[rest_sku_index, min_column])
safety_stock_df.loc[rest_sku_index, ss_column] = np.round(
rest_inv_weight * safety_stock_df.loc[rest_sku_index, ss_column])
safety_stock_df.loc[rest_sku_index, max_column] = np.round(
rest_inv_weight * safety_stock_df.loc[rest_sku_index, max_column])
# logging rest SKU ss from algo
prev_top_sku_ss = safety_stock_df.loc[top_sku_index]. \
merge(top_sku)[columns_list]
prev_top_sku_ss['sku_type'] = 'top generic'
good_aid_ss_log = good_aid_ss_log.append(prev_top_sku_ss)
# settng top SKU ss
safety_stock_df.loc[top_sku_index, min_column] = np.round(
top_inv_weight * safety_stock_df.loc[top_sku_index, min_column])
safety_stock_df.loc[top_sku_index, ss_column] = np.round(
top_inv_weight * safety_stock_df.loc[top_sku_index, ss_column])
safety_stock_df.loc[top_sku_index, max_column] = np.round(
top_inv_weight * safety_stock_df.loc[top_sku_index, max_column])
# logging goodaid SKU ss from algo
prev_ga_sku_ss = safety_stock_df.loc[ga_sku_index]. \
merge(ga_sku)[columns_list]
prev_ga_sku_ss['sku_type'] = 'good aid'
good_aid_ss_log = good_aid_ss_log.append(prev_ga_sku_ss)
# setting goodaid SKU ss
ga_sku_ss = ga_composition_ss_agg.merge(ga_sku)[columns_list]
ga_sku_ss[min_column] = np.round(ga_inv_weight * ga_sku_ss[min_column])
ga_sku_ss[ss_column] = np.round(ga_inv_weight * ga_sku_ss[ss_column])
ga_sku_ss[max_column] = np.round(ga_inv_weight * ga_sku_ss[max_column])
ss_df_columns = safety_stock_df.columns
safety_stock_df = safety_stock_df.merge(
ga_sku_ss, how='left', on=['drug_id'])
safety_stock_df[min_column] = np.max(
safety_stock_df[[min_column + '_y', min_column + '_x']], axis=1)
safety_stock_df[ss_column] = np.max(
safety_stock_df[[ss_column + '_y', ss_column + '_x']], axis=1)
safety_stock_df[max_column] = np.max(
safety_stock_df[[max_column + '_y', max_column + '_x']], axis=1)
safety_stock_df = safety_stock_df[ss_df_columns]
# updating new good aid skus
ga_sku_new_entries = ga_sku_ss.loc[
~ga_sku_ss['drug_id'].isin(safety_stock_df['drug_id'])]
if len(ga_sku_new_entries) > 0:
ga_sku_new_entries_drug_list = str(
list(ga_sku_new_entries['drug_id'])
).replace('[', '(').replace(']', ')')
ga_sku_drug_info_query = """
select d.id as drug_id, "drug-name" as drug_name, type,
coalesce(doi."drug-grade", 'NA') as drug_grade
from "{schema}".drugs d left join "{schema}"."drug-order-info" doi
on d.id = doi."drug-id"
where d.id in {0}
and doi."store-id" = {1}
""".format(ga_sku_new_entries_drug_list, store_id, schema=schema)
ga_sku_drug_info = db.get_df(ga_sku_drug_info_query)
ga_sku_new_entries = ga_sku_new_entries.merge(ga_sku_drug_info)
# filling the relevant columns
ga_sku_new_entries['model'] = 'NA'
ga_sku_new_entries['bucket'] = 'NA'
ga_sku_new_entries['fcst'] = 0
ga_sku_new_entries['std'] = 0
ga_sku_new_entries['lead_time_mean'] = 0
ga_sku_new_entries['lead_time_std'] = 0
ga_sku_new_entries['correction_flag'] = 'N'
safety_stock_df = safety_stock_df.append(
ga_sku_new_entries)[ss_df_columns]
good_aid_ss_log.insert(
loc=0, column='store_id', value=store_id)
# renaming min/ss/max column name according to the table
good_aid_ss_log.rename(
columns={min_column: 'safety_stock',
ss_column: 'reorder_point',
max_column: 'order_upto_point'},
inplace=True)
post_max_qty = safety_stock_df[max_column].sum()
print('Reduction in max quantity:',
str(round(100 * (1 - post_max_qty / pre_max_qty), 2)) + '%')
return safety_stock_df, good_aid_ss_log | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/goodaid_substitution.py | goodaid_substitution.py |
import numpy as np
import math
import datetime as dt
"""
Steps -
1. Get Auto short total time -> from creation to received at store
2. If marked as lost make it 7 days
3. Max LT capped at 7 days
Recent corrections:
1. AS & MS added (earlier only AS)
2. In case of no history in past 90 days, set default store_lt = 4 days
"""
def lead_time(store_id, cal_sales, reset_date, db, schema, logger=None):
sb_creation_delay_ethical = 1
sb_creation_delay_other = 1
sb_creation_delay_generic = 2
end_date = str((
dt.datetime.strptime(reset_date, '%Y-%m-%d') -
dt.timedelta(7)).date())
begin_date = str(cal_sales.date.dt.date.max() - dt.timedelta(97))
logger.info("Lead Time Calculation Starts")
logger.info(f"SB Begin Date: {begin_date}, SB End Date: {end_date}")
lead_time_query = f"""
select "store-id" , "drug-id" , "type" , status , "created-to-delivery-hour" as "lt-hrs"
from "{schema}"."as-ms" am
where "as-ms" in ('AS', 'MS')
and "store-id" = {store_id}
and date("created-at") <= '{end_date}'
and date("created-at") >= '{begin_date}'
and status not in ('failed', 'deleted')
"""
lead_time = db.get_df(lead_time_query)
lead_time.columns = [c.replace('-', '_') for c in lead_time.columns]
# classify all types into generic, ethical & others
lead_time["type"] = np.where(
lead_time["type"].isin(['ethical', 'high-value-ethical']), 'ethical',
lead_time["type"])
lead_time["type"] = np.where(lead_time["type"].isin(['ethical', 'generic']),
lead_time["type"], 'others')
lead_time["lt_days"] = lead_time["lt_hrs"] / 24
lead_time["lt_days"] = lead_time["lt_days"].fillna(7)
lead_time["lt_days"] = np.where(lead_time["lt_days"] > 7, 7, lead_time["lt_days"])
# add SB creation delay
lead_time['lt_days'] = np.select(
[lead_time['type'] == 'generic',
lead_time['type'] == 'ethical'],
[lead_time['lt_days'] + sb_creation_delay_generic,
lead_time['lt_days'] + sb_creation_delay_ethical],
default=lead_time['lt_days'] + sb_creation_delay_other)
lt_store_mean = round(lead_time.lt_days.mean(), 2)
lt_store_std = round(lead_time.lt_days.std(ddof=0), 2)
# to handle cases where no AS,MS history in past 90 days
if math.isnan(lt_store_mean):
lt_store_mean = 4
if math.isnan(lt_store_std):
lt_store_std = 0
lt_drug = lead_time.groupby('drug_id'). \
agg({'lt_days': [np.mean, np.std]}).reset_index()
lt_drug.columns = ['drug_id', 'lead_time_mean', 'lead_time_std']
lt_drug['lead_time_std'] = np.where(
lt_drug['lead_time_std'].isin([0, np.nan]),
lt_store_std, lt_drug['lead_time_std'])
logger.info("Lead Time Calculation Completed")
return lt_drug, lt_store_mean, lt_store_std | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/lead_time.py | lead_time.py |
import numpy as np
def generic_portfolio(safety_stock_df, db, schema, logger=None):
"""
To keep at least 1 drug in every active generic compositions
"""
comp_drugs_to_keep = get_preference_drugs(db, schema, logger)
# get compositions of all generic drugs in store with OUP>0
all_drugs = tuple(safety_stock_df.loc[
safety_stock_df["order_upto_point"] > 0][
"drug_id"].unique())
q_gen_drugs = f"""
select id as "drug-id", composition
from "{schema}".drugs d
where id in {all_drugs}
and "type" = 'generic'
"""
df_gen_drugs = db.get_df(q_gen_drugs)
df_gen_drugs.columns = [c.replace('-', '_') for c in df_gen_drugs.columns]
df_gen_drugs = df_gen_drugs.loc[df_gen_drugs["composition"] != '']
compostitions_in_store = list(df_gen_drugs["composition"].unique())
# get additional composition-drugs to add
compositon_not_in_store = comp_drugs_to_keep.loc[
~comp_drugs_to_keep["composition"].isin(compostitions_in_store)]
logger.info(f"To keep {compositon_not_in_store.shape[0]} additional "
f"composition-drugs in store")
# drugs to add in current ss table
drugs_to_add = compositon_not_in_store[["drug_id", "std_qty"]]
final_df = safety_stock_df.merge(drugs_to_add, on="drug_id",
how="outer")
# handle NaN columns for additional drugs
final_df["model"] = final_df["model"].fillna('NA')
final_df["bucket"] = final_df["bucket"].fillna('NA')
final_df['fcst'] = final_df['fcst'].fillna(0)
final_df['std'] = final_df['std'].fillna(0)
final_df['lead_time_mean'] = final_df['lead_time_mean'].fillna(0)
final_df['lead_time_std'] = final_df['lead_time_std'].fillna(0)
final_df["safety_stock"] = final_df["safety_stock"].fillna(0)
final_df["reorder_point"] = final_df["reorder_point"].fillna(0)
final_df["order_upto_point"] = final_df["order_upto_point"].fillna(0)
# set OUP=STD_QTY for added drugs
final_df["order_upto_point"] = np.where(final_df["std_qty"].notna(),
final_df["std_qty"],
final_df["order_upto_point"])
final_df = final_df.drop("std_qty", axis=1)
return final_df
def get_preference_drugs(db, schema, logger=None):
"""
Get all active generic compositions in WH and the preferred drugs in that
compositions, the preference order is as follows:
* Choose GAID if available
* Else choose highest selling drug in past 90 days at system level
"""
q_wh_gen_sku = f"""
select wssm."drug-id" , d.composition , d."company-id"
from "{schema}"."wh-sku-subs-master" wssm
left join "{schema}".drugs d on wssm."drug-id" = d.id
where "add-wh" = 'Yes'
and d."type" = 'generic'
"""
df_wh_gen_sku = db.get_df(q_wh_gen_sku)
df_wh_gen_sku.columns = [c.replace('-', '_') for c in df_wh_gen_sku.columns]
# clear drugs with no composition present
df_wh_gen_sku = df_wh_gen_sku.loc[df_wh_gen_sku["composition"] != '']
logger.info(f"Distinct generic compositions in WH: {len(df_wh_gen_sku.composition.unique())}")
logger.info(f"Distinct generic drugs in WH: {df_wh_gen_sku.shape[0]}")
drug_ids = tuple(df_wh_gen_sku.drug_id.unique())
# get past 90 days sales info of the preferred drugs
q_sales = f"""
select "drug-id" , sum("revenue-value") as "gross-sales"
from "{schema}".sales s
where "drug-id" in {drug_ids}
and datediff('day', date("created-at"), CURRENT_DATE ) < 90
and "bill-flag" = 'gross'
group by "drug-id"
"""
df_sales = db.get_df(q_sales)
df_sales.columns = [c.replace('-', '_') for c in df_sales.columns]
df_wh_gen_sku = df_wh_gen_sku.merge(df_sales, on="drug_id", how="left")
df_wh_gen_sku["gross_sales"] = df_wh_gen_sku["gross_sales"].fillna(0)
df_wh_gen_sku["is_gaid"] = np.where(df_wh_gen_sku["company_id"] == 6984, 1, 0)
# order priority: GA, Sales
df_wh_gen_sku = df_wh_gen_sku.sort_values(
by=['composition', 'is_gaid', 'gross_sales'],
ascending=False)
# choose the first preference for every composition
comp_drug_list = df_wh_gen_sku.groupby('composition', as_index=False).agg(
{'drug_id': 'first'})
# get std-qty to keep
q_drug_std_info = f"""
select "drug-id" , "std-qty"
from "{schema}"."drug-std-info" dsi
"""
df_drug_std_info = db.get_df(q_drug_std_info)
df_drug_std_info.columns = [c.replace('-', '_') for c in df_drug_std_info.columns]
comp_drug_list = comp_drug_list.merge(df_drug_std_info, on="drug_id",
how="left")
# fill NA values with defaults
comp_drug_list["std_qty"] = comp_drug_list["std_qty"].fillna(1)
return comp_drug_list | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/store_portfolio_additions.py | store_portfolio_additions.py |
import pandas as pd
import numpy as np
import time
from zeno_etl_libs.django.api import Sql
from zeno_etl_libs.db.db import MySQL
def doid_update(data, type_list, db, schema, logger=None, gaid_omit=True):
# GA skus to be omitted
ga_sku_query = f"""
select "drug-id" as drug_id
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where d."company-id" = 6984
"""
ga_sku = db.get_df(ga_sku_query)
ga_sku_list = tuple(ga_sku['drug_id'])
# import pdb; pdb.set_trace()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
data = data[['store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']]
data = data.rename(columns={
'corr_min': 'min', 'corr_ss': 'safe_stock', 'corr_max': 'max'})
# check if gaid drugs need not to be omitted
if gaid_omit == False:
ga_sku_list = (0, 0)
data = data[~data['drug_id'].isin(ga_sku_list)]
mysql = MySQL()
sql = Sql()
for store_id in data['store_id'].unique():
current_ss_query = """
SELECT doid.id, doid.`store-id` , doid.`drug-id` , doid.min,
doid.`safe-stock` , doid.max
FROM `drug-order-info-data` doid
left join drugs d
on d.id = doid.`drug-id`
where doid.`store-id` = {store_id}
and d.`type` in {type_list}
and d.id not in {ga_sku_list}
""".format(store_id=store_id,
type_list=type_list,
ga_sku_list=ga_sku_list,
schema=schema)
mysql.open_connection()
current_ss = pd.read_sql(current_ss_query, mysql.connection)
mysql.close()
current_ss.columns = [c.replace('-', '_') for c in current_ss.columns]
data_store = data.loc[
data['store_id'] == store_id,
['store_id', 'drug_id', 'min', 'safe_stock', 'max']]
# Not let the code erroneously force non-gaid drugs to zero
how = 'outer'
if not gaid_omit:
how = 'right'
ss_joined = current_ss.merge(
data_store, on=['store_id', 'drug_id'], how=how,
suffixes=('_old', ''))
ss_joined['min'].fillna(0, inplace=True)
ss_joined['safe_stock'].fillna(0, inplace=True)
ss_joined['max'].fillna(0, inplace=True)
new_drug_entries = new_drug_entries.append(
ss_joined[ss_joined['id'].isna()])
ss_joined = ss_joined[~ss_joined['id'].isna()]
logger.info('Mysql upload for store ' + str(store_id))
logger.info('New entries ' + str(
ss_joined[ss_joined['id'].isna()].shape[0]))
ss_joined['flag'] = np.where(
(ss_joined['min_old'] == ss_joined['min']) &
(ss_joined['safe_stock_old'] == ss_joined['safe_stock']) &
(ss_joined['max_old'] == ss_joined['max']),
'values same', 'values changed'
)
ss_to_upload = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['id', 'min', 'safe_stock', 'max']]
logger.info('SS to update only for ' + str(
ss_joined[ss_joined['flag'] != 'values same'].shape[0]))
data_to_be_updated_list = list(ss_to_upload.apply(dict, axis=1))
if len(data_to_be_updated_list) > 0:
chunk_size = 1000
for i in range(0, len(data_to_be_updated_list), chunk_size):
status, msg = sql.update(
{'table': 'DrugOrderInfoData',
'data_to_be_updated': data_to_be_updated_list[i:i+chunk_size]}, logger)
logger.info(f"DrugOrderInfoData update API "
f"count: {min(i+chunk_size, len(data_to_be_updated_list))}, status: {status}, msg: {msg}")
drug_list = str(list(ss_joined.loc[
ss_joined['flag'] == 'values changed', 'drug_id'].unique())
).replace('[', '(').replace(']', ')')
update_test_query = """
SELECT `store-id` , `drug-id` , min , `safe-stock` , max
from `drug-order-info-data` doid
where `store-id` = {store_id}
and `drug-id` in {drug_list}
""".format(store_id=store_id,
drug_list=drug_list,
schema=schema)
time.sleep(15)
mysql.open_connection()
update_test = pd.read_sql(update_test_query, mysql.connection)
mysql.close()
update_test.columns = [c.replace('-', '_') for c in update_test.columns]
update_test = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['store_id', 'drug_id', 'min', 'safe_stock', 'max']].merge(
update_test, on=['store_id', 'drug_id'],
suffixes=('_new', '_prod'))
update_test['mismatch_flag'] = np.where(
(update_test['min_new'] == update_test['min_prod']) &
(update_test['safe_stock_new'] == update_test[
'safe_stock_prod']) &
(update_test['max_new'] == update_test['max_prod']),
'updated', 'not updated'
)
missed_entries = missed_entries.append(
update_test[update_test['mismatch_flag'] == 'not updated'])
logger.info(
'Entries updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'updated'].shape[0]))
logger.info(
'Entries not updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'not updated'].shape[
0]))
return new_drug_entries, missed_entries | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/doid_update_ss.py | doid_update_ss.py |
import numpy as np
def post_processing(store_id, drug_class, weekly_fcst, safety_stock_df,
db, schema, logger):
''' getting drug name, type, grades, store name'''
drug_id_list = tuple(drug_class.drug_id.unique())
drug_info_query = """
select d.id as drug_id, "drug-name" as drug_name, type,
coalesce(doi."drug-grade", 'NA') as drug_grade
from "{schema}".drugs d
left join "{schema}"."drug-order-info" doi
on d.id = doi."drug-id"
where d.id in {0}
and doi."store-id" = {1}
""".format(str(drug_id_list), store_id, schema=schema)
drug_info = db.get_df(drug_info_query)
q_store_name = f""" select name from "{schema}".stores where id = {store_id} """
store_name = db.get_df(q_store_name)['name'][0]
safety_stock_df['store_id'] = store_id
safety_stock_df['store_name'] = store_name
safety_stock_df = safety_stock_df.merge(
drug_info, on='drug_id', how='left')
safety_stock_df['drug_grade'].fillna('NA', inplace=True)
safety_stock_df = safety_stock_df[[
'store_id', 'store_name', 'model', 'drug_id', 'drug_name', 'type',
'drug_grade', 'bucket', 'percentile', 'fcst', 'std',
'lead_time_mean', 'lead_time_std', 'safety_stock', 'reorder_point',
'order_upto_point', 'safety_stock_days', 'reorder_days',
'order_upto_days', 'fptr', 'curr_inventory', 'max_value','correction_flag']]
weekly_fcst['store_id'] = store_id
weekly_fcst['store_name'] = store_name
weekly_fcst = weekly_fcst[['store_id', 'store_name', 'model',
'drug_id', 'date', 'fcst', 'std']]
drug_class['store_id'] = store_id
drug_class['store_name'] = store_name
drug_class = drug_class.merge(
drug_info[['drug_id', 'drug_grade', 'type']], on='drug_id', how='left')
drug_class['drug_grade'].fillna('NA', inplace=True)
drug_class = drug_class[['store_id', 'store_name', 'drug_id', 'drug_grade',
'type', 'net_sales', 'sales_std_dev', 'sales_cov',
'bucket_abc', 'bucket_xyz']]
'''Getting order value'''
safety_stock_df['to_order_quantity'] = np.where(
safety_stock_df['curr_inventory'] <= safety_stock_df['reorder_point'],
safety_stock_df['order_upto_point'] - safety_stock_df['curr_inventory'], 0)
safety_stock_df['to_order_value'] = (
safety_stock_df['to_order_quantity'] * safety_stock_df['fptr'])
order_value = safety_stock_df.pivot_table(
index=['type', 'store_name', 'drug_grade'],
values=['to_order_quantity', 'to_order_value'], aggfunc='sum',
margins=True, margins_name='Total').reset_index()
return drug_class, weekly_fcst, safety_stock_df, order_value | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/post_processing.py | post_processing.py |
import numpy as np
def abc_xyz_classification(cal_drug_sales_monthly, logger=None):
cut_cov = (0.3, 1.0)
cut_sales = (4, 30)
# taking last 12 months data only for classification
n = 12
prev_n_month_dt = cal_drug_sales_monthly[
['month_begin_dt']].drop_duplicates().\
sort_values('month_begin_dt', ascending=False)['month_begin_dt'].\
head(n)
cal_drug_sales_classification = cal_drug_sales_monthly[
cal_drug_sales_monthly.month_begin_dt.isin(prev_n_month_dt)]
print(len(cal_drug_sales_classification))
# monthly averages for classification
drug_class = cal_drug_sales_classification.\
groupby('drug_id').agg({'net_sales_quantity': [np.mean, np.std]}).\
reset_index()
drug_class.columns = ['drug_id', 'net_sales', 'sales_std_dev']
drug_class = drug_class[drug_class['net_sales'] >= 0]
drug_class['sales_cov'] = (
drug_class['sales_std_dev'] /
drug_class['net_sales'])
# assertion error to check all sales positive
assert len(drug_class[
drug_class['net_sales'] < 0]) == 0
# handling infs
drug_class['sales_cov'] = np.where(
drug_class['sales_cov'] == np.inf,
drug_class['sales_std_dev'],
drug_class['sales_cov']
)
# assigning buckets
drug_class['bucket_abc'] = np.select(
[(drug_class['net_sales'] <= cut_sales[0]),
(drug_class['net_sales'] > cut_sales[0]) &
(drug_class['net_sales'] <= cut_sales[1]),
(drug_class['net_sales'] > cut_sales[1])],
['C', 'B', 'A'],
default='NA')
drug_class['bucket_xyz'] = np.select(
[drug_class['sales_cov'] <= cut_cov[0],
(drug_class['sales_cov'] > cut_cov[0]) &
(drug_class['sales_cov'] <= cut_cov[1]),
drug_class['sales_cov'] > cut_cov[1]],
['X', 'Y', 'Z'],
default='NA')
print(drug_class.drug_id.nunique())
# summary
bucket_sales = drug_class.groupby(
['bucket_abc', 'bucket_xyz']).agg(
{'drug_id': 'count', 'net_sales': ['sum', 'mean'],
'sales_cov': 'mean'}).reset_index()
bucket_sales.columns = ['bucket_abc', 'bucket_xyz', 'drug_id', 'net_sales',
'avg_sales_per_drug', 'sales_cov']
bucket_sales['net_sales_frac'] = round(
100*bucket_sales['net_sales']/drug_class.net_sales.sum(), 2)
bucket_sales['drug_frac'] = round(
100*bucket_sales['drug_id']/drug_class.drug_id.nunique(), 2)
bucket_sales['avg_sales_per_drug'] = (
bucket_sales['net_sales']/bucket_sales['drug_id'])
print(bucket_sales)
return drug_class, bucket_sales | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/item_classification.py | item_classification.py |
import datetime
import numpy as np
import pandas as pd
def forecast_data_prep(store_id_list, type_list, reset_date, db, schema,
logger=None, last_date=None, is_wh='N'):
''' FETCHING HISTORICAL SALES AND SALES LOSS DATA '''
if last_date is None:
last_date = datetime.date(day=1, month=4, year=2019)
print('Date range', str(last_date), str(reset_date))
# store list
if type(store_id_list) is not list:
store_id_list = [store_id_list]
store_id_list = str(store_id_list).replace('[', '(').replace(']', ')')
# drug list
drug_list_query = """
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema)
drug_list = db.get_df(drug_list_query)
# sales query
sales_query = """
select date("created-at") as "sales-date", "drug-id" ,
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" in {store_id_list}
and date("created-at") >= '{last_date}'
and date("created-at") < '{reset_date}'
group by "sales-date", "drug-id"
""".format(
store_id_list=store_id_list, last_date=last_date,
reset_date=reset_date, schema=schema)
sales_history = db.get_df(sales_query)
sales_history.columns = [c.replace('-', '_') for c in sales_history.columns]
calendar_query = """
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
""".format(schema=schema)
calendar = db.get_df(calendar_query)
calendar.columns = [c.replace('-', '_') for c in calendar.columns]
sales_history = sales_history.merge(drug_list, how='inner', on='drug_id')
# cfr pr loss
cfr_pr_query = f"""
select "attributed-loss-date", "drug-id",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "shortbook-date" >= '{last_date}'
and "shortbook-date" < '{reset_date}'
and "drug-id" <> -1
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
and "drug-type" in {type_list}
and "store-id" in {store_id_list}
group by "attributed-loss-date", "drug-id"
"""
cfr_pr = db.get_df(cfr_pr_query)
cfr_pr["loss-quantity"] = cfr_pr["loss-quantity"].astype(float)
cfr_pr.columns = [c.replace('-', '_') for c in cfr_pr.columns]
print(sales_history.sales_date.max())
print(cfr_pr.attributed_loss_date.max())
sales_history = sales_history.groupby(
['sales_date', 'drug_id']).sum().reset_index()
# imputing days with no sales with zero sales
sales_history['sales_date'] = pd.to_datetime(sales_history['sales_date'])
sales_history = get_formatted_data(sales_history, 'drug_id', 'sales_date', 'net_sales_quantity')
cfr_pr['attributed_loss_date'] = pd.to_datetime(cfr_pr['attributed_loss_date'])
# total demand merge
sales = sales_history.merge(
cfr_pr, left_on=['sales_date', 'drug_id'],
right_on=['attributed_loss_date', 'drug_id'], how='left')
sales['sales_date'] = sales['sales_date'].combine_first(
sales['attributed_loss_date'])
sales['net_sales_quantity'].fillna(0, inplace=True)
sales['loss_quantity'].fillna(0, inplace=True)
sales['net_sales_quantity'] += sales['loss_quantity']
sales.drop(['attributed_loss_date', 'loss_quantity'], axis=1, inplace=True)
print(sales.drug_id.nunique())
#To get daily demand deviation drugwise
demand_daily_deviation = sales[sales['sales_date'] > pd.to_datetime(reset_date) - datetime.timedelta(days = 29)]
demand_daily_deviation = demand_daily_deviation.groupby('drug_id').std().reset_index()
demand_daily_deviation = demand_daily_deviation.rename(columns={'net_sales_quantity': 'demand_daily_deviation'})
'''
CREATING DAY-DRUG SALES CROSS TABLE
'''
calendar['date'] = pd.to_datetime(calendar['date'])
sales['sales_date'] = pd.to_datetime(sales['sales_date'])
print('Distinct drug count', sales.drug_id.nunique())
print('No of days', sales.sales_date.nunique())
cal_sales_weekly = calendar.loc[
(pd.to_datetime(calendar['date']) >= sales.sales_date.min()) &
(calendar['date'] <= sales.sales_date.max())]
cal_sales_monthly = calendar.loc[
(pd.to_datetime(calendar['date']) >= sales.sales_date.min()) &
(calendar['date'] <= sales.sales_date.max())]
# removing the first week if it has less than 7 days
min_year = cal_sales_weekly.year.min()
x = cal_sales_weekly.loc[(cal_sales_weekly.year == min_year)]
min_month = x.month.min()
x = x.loc[(x.month == min_month)]
min_week = x.week_of_year.min()
if x.loc[x.week_of_year == min_week].shape[0] < 7:
print('removing dates for', min_year, min_month, min_week)
cal_sales_weekly = cal_sales_weekly.loc[
~((cal_sales_weekly.week_of_year == min_week) &
(cal_sales_weekly.year == min_year))]
# removing the latest week if it has less than 7 days
max_year = cal_sales_weekly.year.max()
x = cal_sales_weekly.loc[(cal_sales_weekly.year == max_year)]
max_month = x.month.max()
x = x.loc[(x.month == max_month)]
max_week = x.week_of_year.max()
if x.loc[x.week_of_year == max_week].shape[0] < 7:
print('removing dates for', max_year, max_month, max_week)
cal_sales_weekly = cal_sales_weekly.loc[
~((cal_sales_weekly.week_of_year == max_week) &
(cal_sales_weekly.year == max_year))]
# adding week begin date
cal_sales_weekly['week_begin_dt'] = cal_sales_weekly.apply(
lambda x: x['date'] - datetime.timedelta(x['day_of_week']), axis=1)
cal_sales_weekly['month_begin_dt'] = cal_sales_weekly.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
cal_sales_monthly['week_begin_dt'] = cal_sales_monthly.apply(
lambda x: x['date'] - datetime.timedelta(x['day_of_week']), axis=1)
cal_sales_monthly['month_begin_dt'] = cal_sales_monthly.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
drugs = sales[['drug_id']].drop_duplicates()
drugs['key'] = 1
cal_sales_weekly['key'] = 1
cal_drug_w = drugs.merge(cal_sales_weekly, on='key', how='inner')
cal_drug_w.drop('key', axis=1, inplace=True)
cal_drug_sales_w = cal_drug_w.merge(
sales, left_on=['drug_id', 'date'], right_on=['drug_id', 'sales_date'],
how='left')
del cal_drug_w
cal_drug_sales_w.drop('sales_date', axis=1, inplace=True)
cal_drug_sales_w.net_sales_quantity.fillna(0, inplace=True)
cal_sales_monthly['key'] = 1
cal_drug_m = drugs.merge(cal_sales_monthly, on='key', how='inner')
cal_drug_m.drop('key', axis=1, inplace=True)
cal_drug_sales_m = cal_drug_m.merge(
sales, left_on=['drug_id', 'date'], right_on=['drug_id', 'sales_date'],
how='left')
del cal_drug_m
cal_drug_sales_m.drop('sales_date', axis=1, inplace=True)
cal_drug_sales_m.net_sales_quantity.fillna(0, inplace=True)
# assertion test to check no of drugs * no of days equals total entries
drug_count = cal_drug_sales_w.drug_id.nunique()
day_count = cal_drug_sales_w.date.nunique()
print('Distinct no of drugs', drug_count)
print('Distinct dates', day_count)
print('DF shape', cal_drug_sales_w.shape[0])
# assert drug_count*day_count == cal_drug_sales.shape[0]
# checking for history available and store opening date
first_bill_query = """
select min(date("created-at")) as bill_date from "{schema}"."bills-1"
where "store-id" in {store_id_list}
""".format(schema=schema, store_id_list=store_id_list)
first_bill_date = db.get_df(first_bill_query).values[0][0]
print(first_bill_date)
cal_drug_sales_w = cal_drug_sales_w.query(
'date >= "{}"'.format(first_bill_date))
cal_drug_sales_m = cal_drug_sales_m.query(
'date >= "{}"'.format(first_bill_date))
'''
AGGREGATION AT WEEKLY LEVEL
'''
cal_drug_sales_weekly = cal_drug_sales_w.groupby(
['drug_id', 'week_begin_dt', 'week_of_year']
)['net_sales_quantity'].sum().reset_index()
del cal_drug_sales_w
print(cal_drug_sales_weekly.drug_id.nunique())
# getting drug ids that havent been sold in the last 26 weeks
n = 26
prev_n_week_dt = (
cal_drug_sales_weekly.week_begin_dt.max() - datetime.timedelta(n * 7))
prev_n_week_sales = cal_drug_sales_weekly[
cal_drug_sales_weekly['week_begin_dt'] > prev_n_week_dt]. \
groupby('drug_id')['net_sales_quantity'].sum().reset_index()
prev_no_sales_drug_weekly = prev_n_week_sales.loc[
prev_n_week_sales['net_sales_quantity'] <= 0, 'drug_id'].values
cal_drug_sales_weekly = cal_drug_sales_weekly[
~cal_drug_sales_weekly.drug_id.isin(prev_no_sales_drug_weekly)]
print(cal_drug_sales_weekly.drug_id.nunique())
cal_drug_sales_weekly.rename(
columns={'week_begin_dt': 'date'}, inplace=True)
validation_week = 4
validation_weeks = cal_drug_sales_weekly['date'].drop_duplicates(). \
nlargest(validation_week)
print(validation_weeks)
cal_drug_sales_weekly['sample_flag'] = np.where(
cal_drug_sales_weekly['date'].isin(validation_weeks),
'validation', 'insample')
'''
AGGREGATION AT MONTHLY LEVEL
'''
cal_drug_sales_monthly = cal_drug_sales_m.groupby(
['drug_id', 'month_begin_dt', 'year', 'month']
)['net_sales_quantity'].sum().reset_index()
del cal_drug_sales_m
if is_wh == 'N':
# removing incomplete month's sales
cal_drug_sales_monthly = cal_drug_sales_monthly[
cal_drug_sales_monthly.month_begin_dt != max(
cal_drug_sales_monthly.month_begin_dt)]
# getting drug ids that havent been sold in the 6 months
print(cal_drug_sales_monthly.drug_id.nunique())
n = 6
prev_n_month_dt = cal_drug_sales_monthly[
['month_begin_dt']].drop_duplicates(). \
sort_values('month_begin_dt', ascending=False
)['month_begin_dt'].head(n - 1)
prev_n_month_sales = cal_drug_sales_monthly[
cal_drug_sales_monthly['month_begin_dt'].isin(prev_n_month_dt)]. \
groupby('drug_id')['net_sales_quantity'].sum().reset_index()
prev_no_sales_drug_monthly = prev_n_month_sales.loc[
prev_n_month_sales['net_sales_quantity'] <= 0, 'drug_id'].values
# removing such drugs
cal_drug_sales_monthly = cal_drug_sales_monthly[
(~cal_drug_sales_monthly.drug_id.isin(prev_no_sales_drug_monthly))
]
print(cal_drug_sales_monthly.drug_id.nunique())
if is_wh == 'Y':
return cal_drug_sales_weekly, cal_drug_sales_monthly, cal_sales_weekly, demand_daily_deviation
else:
return cal_drug_sales_weekly, cal_drug_sales_monthly, cal_sales_weekly
def get_formatted_data(df, key_col, date_col, target_col):
df_start = df.groupby([key_col])[date_col].min().reset_index().rename(columns={date_col: 'sales_start'})
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start=min_date,
end=end_date,
freq='d'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = pd.merge(df, df_start, how='left', on=key_col)
df = df[df[date_col] >= df['sales_start']]
return df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/data_prep.py | data_prep.py |
import pandas as pd
import numpy as np
import time
from scipy.stats import norm
from zeno_etl_libs.utils.ipc.data_prep import forecast_data_prep
from zeno_etl_libs.utils.ipc.item_classification import abc_xyz_classification
from zeno_etl_libs.utils.ipc.forecasting_modules.helper_functions import sum_std,\
applyParallel, applyParallel_lstm
from zeno_etl_libs.utils.ipc.forecasting_modules.lstm import lstm_forecast
from zeno_etl_libs.utils.ipc.forecasting_modules.moving_average import moving_average
from zeno_etl_libs.utils.ipc.forecasting_modules.prophet import prophet_weekly_predict
from zeno_etl_libs.utils.ipc.lead_time import lead_time
from zeno_etl_libs.utils.ipc.safety_stock import safety_stock_calc
def ipc_forecast_reset(
store_id, type_list, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, db, schema,
drug_type_list_v4, v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, chronic_max_flag='N', logger=None):
'''DATA PREPATION'''
cal_drug_sales_weekly, cal_drug_sales_monthly,\
cal_sales = forecast_data_prep(store_id, type_list, reset_date,
db, schema, logger)
'''ITEM CLASSIFICATION'''
drug_class, bucket_sales = abc_xyz_classification(
cal_drug_sales_monthly, logger)
'''FORECASTING'''
forecast_horizon = 4
# LSTM
week_in = 8
week_out = 4
epochs = 200
n_neurons = 8
use_dropout = 0.2
error_factor = 2
lstm_drug_list = drug_class.loc[
(drug_class['bucket_abc'] == 'A') & (drug_class['bucket_xyz'] == 'X') |
(drug_class['bucket_abc'] == 'A') & (drug_class['bucket_xyz'] == 'Y') |
(drug_class['bucket_abc'] == 'B') & (drug_class['bucket_xyz'] == 'X'),
'drug_id']
lstm_data_weekly = cal_drug_sales_weekly.loc[
cal_drug_sales_weekly['drug_id'].isin(lstm_drug_list)]
start = time.time()
lstm_weekly_fcst = applyParallel_lstm(
lstm_data_weekly.groupby('drug_id'), lstm_forecast,
n_neurons=n_neurons, week_in=week_in, week_out=week_out,
forecast_horizon=forecast_horizon, epochs=epochs,
use_dropout=use_dropout, error_factor=error_factor).\
reset_index(drop=True)
end = time.time()
print('Run time ', end-start)
# MOVING AVERAGES
ma_drug_list = drug_class.loc[
(drug_class['bucket_abc'] == 'B') & (drug_class['bucket_xyz'] == 'Y') |
(drug_class['bucket_abc'] == 'B') & (drug_class['bucket_xyz'] == 'Z') |
(drug_class['bucket_abc'] == 'C') & (drug_class['bucket_xyz'] == 'X'),
'drug_id']
ma_data_weekly = cal_drug_sales_weekly.loc[
cal_drug_sales_weekly['drug_id'].isin(ma_drug_list)]
start = time.time()
ma_weekly_fcst = ma_data_weekly.groupby('drug_id').\
apply(moving_average).reset_index(drop=True)
end = time.time()
print('Run time ', end-start)
# PROPHET
prophet_drug_list = drug_class.loc[
(drug_class['bucket_abc'] == 'C') & (drug_class['bucket_xyz'] == 'Y') |
(drug_class['bucket_abc'] == 'C') & (drug_class['bucket_xyz'] == 'Z') |
(drug_class['bucket_abc'] == 'A') & (drug_class['bucket_xyz'] == 'Z'),
'drug_id']
prophet_data_weekly = cal_drug_sales_weekly.loc[
cal_drug_sales_weekly['drug_id'].isin(prophet_drug_list)]
start = time.time()
prophet_weekly_fcst = applyParallel(
prophet_data_weekly.groupby('drug_id'), prophet_weekly_predict).\
reset_index(drop=True)
end = time.time()
print('Run time ', end-start)
'''COMPILING OUTPUT AND PERCENTILE FORECAST'''
columns = ['model', 'drug_id', 'date', 'fcst', 'std']
ma_weekly_fcst['model'] = 'MA'
ma_weekly_fcst = ma_weekly_fcst[columns]
prophet_weekly_fcst['model'] = 'Prophet'
prophet_weekly_fcst = prophet_weekly_fcst[columns]
lstm_weekly_fcst['model'] = 'LSTM'
lstm_weekly_fcst = lstm_weekly_fcst[columns]
weekly_fcst = pd.concat(
[ma_weekly_fcst, prophet_weekly_fcst, lstm_weekly_fcst], axis=0)
percentile_bucket_dict = {
'AX': 0.5, 'AY': 0.5, 'AZ': 0.5,
'BX': 0.5, 'BY': 0.6, 'BZ': 0.6,
'CX': 0.5, 'CY': 0.6, 'CZ': 0.6}
print(weekly_fcst.drug_id.nunique())
weekly_fcst = weekly_fcst.merge(
drug_class[['drug_id', 'bucket_abc', 'bucket_xyz']],
on='drug_id', how='inner')
weekly_fcst['bucket'] = (
weekly_fcst['bucket_abc'] + weekly_fcst['bucket_xyz'])
weekly_fcst.drop(['bucket_abc', 'bucket_xyz'], axis=1, inplace=True)
for key in percentile_bucket_dict.keys():
print(key, percentile_bucket_dict[key])
indexs = weekly_fcst[weekly_fcst.bucket == key].index
weekly_fcst.loc[indexs, 'percentile'] = percentile_bucket_dict[key]
weekly_fcst.loc[indexs, 'fcst'] = np.round(
weekly_fcst.loc[indexs, 'fcst'] +
norm.ppf(percentile_bucket_dict[key]) *
weekly_fcst.loc[indexs, 'std'])
agg_fcst = weekly_fcst.groupby(
['model', 'drug_id', 'bucket', 'percentile']).\
agg({'fcst': 'sum', 'std': sum_std}).reset_index()
'''LEAD TIME CALCULATION'''
lt_drug, lt_store_mean, lt_store_std = lead_time(
store_id, cal_sales, reset_date, db, schema, logger)
'''SAFETY STOCK CALCULATION'''
safety_stock_df, df_corrections, df_corrections_111, \
drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 = safety_stock_calc(
agg_fcst, store_id, forecast_horizon, lt_drug,
lt_store_mean, lt_store_std, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, chronic_max_flag,
v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, drug_type_list_v4, db, schema, logger)
return drug_class, weekly_fcst, safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/forecast_reset.py | forecast_reset.py |
import pandas as pd
import numpy as np
from scipy.stats import norm
from datetime import datetime
from zeno_etl_libs.utils.ipc.heuristics.base import base_heuristics
from zeno_etl_libs.utils.ipc.heuristics.ipcv4_heuristics import ipcv4_heuristics
from zeno_etl_libs.utils.ipc.heuristics.ipcv5_heuristics import v5_corrections
# from scripts.ops.ipc.heuristics.ipcv6_heuristics import v6_corrections
'''
service level - 95%
safety stock = z-score * sqrt(std_lead_time^2 * avg_demand^2 +
avg_lead_time^2 * std_demand^2)
re-order point = avg_lead_time + avg_demand + safety stock
'''
def safety_stock_calc(agg_fcst, store_id, forecast_horizon, lt_drug,
lt_store_mean, lt_store_std, reset_date,
corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
chronic_max_flag, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off,
drug_type_list_v4, db, schema, logger):
service_level = 0.95
fcst_weeks = 4
order_freq = 4
z = norm.ppf(service_level)
print(lt_store_mean, lt_store_std)
safety_stock_df = agg_fcst.merge(
lt_drug[['drug_id', 'lead_time_mean', 'lead_time_std']],
how='left', on='drug_id')
safety_stock_df['lead_time_mean'].fillna(lt_store_mean, inplace=True)
safety_stock_df['lead_time_std'].fillna(lt_store_std, inplace=True)
# heuristics #1
safety_stock_df['lead_time_std'] = np.where(
safety_stock_df['lead_time_std'] < 1,
lt_store_std, safety_stock_df['lead_time_std'])
# safeyty stock value - variation in demand & lead time
safety_stock_df['safety_stock'] = safety_stock_df.apply(
lambda row: np.round(z * np.sqrt(
(row['lead_time_mean'] * np.square(
row['std'] / np.sqrt(fcst_weeks * 7)) +
np.square(row['lead_time_std'] * row['fcst'] / fcst_weeks / 7))
)), axis=1)
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['fcst'] == 0, 0, safety_stock_df['safety_stock'])
# consider avg fulfillment times
safety_stock_df['reorder_point'] = safety_stock_df.apply(
lambda row: np.round(
row['lead_time_mean'] * row['fcst'] / fcst_weeks / 7),
axis=1) + safety_stock_df['safety_stock']
# ordering frequency 7 days
# heuristics #2
safety_stock_df['order_upto_point'] = (
safety_stock_df['reorder_point'] +
np.round(
np.where(
# if rounding off give 0, increase it to 4-week forecast
(safety_stock_df['reorder_point'] +
safety_stock_df[
'fcst'] * order_freq / fcst_weeks / 7 < 0.5) &
(safety_stock_df['fcst'] > 0),
safety_stock_df['fcst'],
safety_stock_df['fcst'] * order_freq / fcst_weeks / 7))
)
# correction for negative forecast
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['safety_stock'] < 0,
0, safety_stock_df['safety_stock'])
safety_stock_df['reorder_point'] = np.where(
safety_stock_df['reorder_point'] < 0,
0, safety_stock_df['reorder_point'])
safety_stock_df['order_upto_point'] = np.where(
safety_stock_df['order_upto_point'] < 0,
0, safety_stock_df['order_upto_point'])
safety_stock_df['safety_stock_days'] = np.round(
7 * forecast_horizon * safety_stock_df['safety_stock'] /
safety_stock_df['fcst'])
safety_stock_df['reorder_days'] = np.round(
7 * forecast_horizon * safety_stock_df['reorder_point'] /
safety_stock_df['fcst'])
safety_stock_df['order_upto_days'] = np.round(
7 * forecast_horizon * safety_stock_df['order_upto_point'] /
safety_stock_df['fcst'])
# heuristics #3
safety_stock_df['order_upto_point'] = np.where(
safety_stock_df['order_upto_days'] < 14,
np.round(14 * safety_stock_df['fcst'] / fcst_weeks / 7),
safety_stock_df['order_upto_point']
)
safety_stock_df['order_upto_days'] = np.round(
7 * forecast_horizon * safety_stock_df['order_upto_point'] /
safety_stock_df['fcst'])
# recent actuals base adjustments
safety_stock_df = base_heuristics(
store_id, safety_stock_df, reset_date, db, schema, logger)
# getting order value
drug_list = list(safety_stock_df['drug_id'].unique())
print(len(drug_list))
drug_str = str(drug_list).replace('[', '(').replace(']', ')')
fptr_query = """
select "drug-id" , avg(ptr) as fptr, sum(quantity) as curr_inventory
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {drug_str}
group by "drug-id"
""".format(store_id=store_id, drug_str=drug_str, schema=schema)
fptr = db.get_df(fptr_query)
fptr.columns = [c.replace('-', '_') for c in fptr.columns]
fptr["fptr"] = fptr["fptr"].astype(float)
final_pred_ss_df = safety_stock_df.merge(fptr, on='drug_id', how='left')
final_pred_ss_df['fptr'].fillna(100, inplace=True)
final_pred_ss_df['max_value'] = (
final_pred_ss_df['fptr'] * final_pred_ss_df['order_upto_point'])
print(final_pred_ss_df.groupby('bucket')['max_value'].sum().reset_index())
print(28 * final_pred_ss_df['order_upto_point'].sum() /
final_pred_ss_df['fcst'].sum())
print(final_pred_ss_df['max_value'].sum())
# correction plugin - Start
if corrections_flag:
final_pred_ss_df['correction_flag'] = 'N'
final_pred_ss_df['store_id'] = store_id
print("corrections code is running now:")
q_prob = f"""select * from "{schema}"."ipc-corrections-rest-cases" """
q_prob_111 = f"""select * from "{schema}"."ipc-corrections-111-cases" """
prob_matrix = db.get_df(q_prob)
df_111 = db.get_df(q_prob_111)
prob_matrix.columns = [c.replace('-', '_') for c in prob_matrix.columns]
df_111.columns = [c.replace('-', '_') for c in df_111.columns]
# list of drugs for which corrections is required. i.e. max value 0.
df_corrections_list = final_pred_ss_df[
final_pred_ss_df['order_upto_point'] == 0][['store_id', 'drug_id']]
df_corrections = pd.merge(
df_corrections_list, prob_matrix, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections = df_corrections.drop(columns={'corrected_max'})
df_corrections['order_upto_point'] = np.round(
df_corrections['current_ma_3_months'])
df_corrections_1 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_less_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_more_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 0)]
df_corrections_1 = df_corrections_1[
(df_corrections_1['selling_probability'] >=
corrections_selling_probability_cutoff['ma_less_than_2']) &
(df_corrections_1['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections_2[
(df_corrections_2['selling_probability'] >=
corrections_selling_probability_cutoff['ma_more_than_2']) &
(df_corrections_2['current_flag_ma_less_than_2'] == 0)]
df_corrections = pd.concat(
[df_corrections_1, df_corrections_2]).reset_index(drop=True)
df_corrections_final = df_corrections.copy()[
['store_id', 'drug_id', 'current_bucket', 'selling_probability',
'cumm_prob', 'current_flag_ma_less_than_2',
'avg_ptr', 'current_ma_3_months']]
# adding run time current inventory
df_corrections_final = pd.merge(
df_corrections_final,
final_pred_ss_df[['store_id', 'drug_id', 'curr_inventory']],
on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df_corrections = df_corrections[
['store_id', 'drug_id', 'order_upto_point']]
df_corrections['reorder_point'] = np.floor(
df_corrections['order_upto_point'] / 2)
df_corrections['safety_stock'] = np.floor(
df_corrections['order_upto_point'] / 4)
df_corrections['correction_flag'] = 'Y'
df_corrections['is_ipc'] = 'Y'
df_corrections = df_corrections.set_index(['store_id', 'drug_id'])
final_pred_ss_df = final_pred_ss_df.set_index(['store_id', 'drug_id'])
final_pred_ss_df.update(df_corrections)
final_pred_ss_df = final_pred_ss_df.reset_index()
df_corrections = df_corrections.reset_index()
df_corrections = pd.merge(
df_corrections, df_corrections_final, on=['store_id', 'drug_id'],
how='left', validate='one_to_one')
# update 111 cases here.
df_corrections_111 = pd.merge(
df_corrections_list, df_111, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections_111 = df_corrections_111.drop(
columns={'current_inventory', 'original_max', 'corrected_max',
'inv_impact', 'max_impact'}, axis=1)
df_corrections_111['order_upto_point'] = np.round(
df_corrections_111['ma_3_months'])
df_corrections_111['reorder_point'] = np.floor(
df_corrections_111['order_upto_point'] / 2)
df_corrections_111['safety_stock'] = np.floor(
df_corrections_111['order_upto_point'] / 4)
df_corrections_111['correction_flag'] = 'Y'
df_corrections_111['is_ipc'] = 'Y'
# adding run time current inventory
df_corrections_111 = pd.merge(
df_corrections_111,
final_pred_ss_df[['store_id', 'drug_id', 'curr_inventory']],
on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df_corrections_111 = df_corrections_111.set_index(
['store_id', 'drug_id'])
final_pred_ss_df = final_pred_ss_df.set_index(['store_id', 'drug_id'])
final_pred_ss_df.update(df_corrections_111)
final_pred_ss_df = final_pred_ss_df.reset_index()
df_corrections_111 = df_corrections_111.reset_index()
# set reset date
curr_date = str(datetime.now())
df_corrections['reset_date'] = curr_date
df_corrections_111['reset_date'] = curr_date
else:
print('corrections block skipped :')
final_pred_ss_df["store_id"] = store_id
final_pred_ss_df["correction_flag"] = 'N'
df_corrections = pd.DataFrame()
df_corrections_111 = pd.DataFrame()
# Correction plugin - End #
final_pred_ss_df = final_pred_ss_df.drop(['store_id'], axis=1)
# Chronic drug changes
if chronic_max_flag == 'Y':
# based on ME OOS feedback - keep chronic drugs
drug_max_zero = tuple(
final_pred_ss_df.query('order_upto_point == 0')['drug_id'])
# reading chronic drug list
drug_chronic_max_zero_query = '''
select id as drug_id from "{schema}".drugs
where category = 'chronic'
and id in {0}
'''.format(str(drug_max_zero), schema=schema)
drug_chronic_max_zero = db.get_df(drug_chronic_max_zero_query)['drug_id']
# setting non zero max for such drugs
final_pred_ss_df.loc[
(final_pred_ss_df['drug_id'].isin(drug_chronic_max_zero)) &
(final_pred_ss_df['order_upto_point'] == 0),
'order_upto_point'] = 1
final_pred_ss_df.loc[
(final_pred_ss_df['drug_id'].isin(drug_chronic_max_zero)) &
(final_pred_ss_df['order_upto_point'] == 0),
'correction_flag'] = 'Y_chronic'
# Min/SS/Max overlap correction
final_pred_ss_df['safety_stock_days'].fillna(0, inplace=True)
final_pred_ss_df['reorder_days'].fillna(0, inplace=True)
final_pred_ss_df['order_upto_days'].fillna(0, inplace=True)
final_pred_ss_df = ipcv4_heuristics(final_pred_ss_df, drug_type_list_v4, db, schema)
if v5_active_flag == "Y":
logger.info("IPC V5 Correction Starts")
final_pred_ss_df = v5_corrections(store_id, final_pred_ss_df, logger)
logger.info("IPC V5 Correction Successful")
# if v6_active_flag == "Y":
# logger.info("IPC V6 Correction Starts")
# final_pred_ss_df, drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 = \
# v6_corrections(store_id, final_pred_ss_df, reset_date, v6_type_list,
# v6_ptr_cut_off, logger)
#
# # add algo name to v6 write table
# drugs_max_to_lock_ipcv6["algo"] = 'ipc'
# drug_rejects_ipcv6["algo"] = 'ipc'
# logger.info("IPC V6 Correction Successful")
# else:
drugs_max_to_lock_ipcv6 = pd.DataFrame()
drug_rejects_ipcv6 = pd.DataFrame()
return final_pred_ss_df, df_corrections, df_corrections_111, \
drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/safety_stock.py | safety_stock.py |
import datetime
import pandas as pd
import numpy as np
import keras.backend as K
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
import tensorflow as tf
tf.__version__
def ae_weight_calc(y_true, y_pred, pos_error_weight):
# dim of y_pred, y_true [n_batch, output var]
error = y_true - y_pred
greater = K.greater(error, 0)
# 0 for y pred is more, 1 for y_pred is less
greater = K.cast(greater, K.floatx())
greater = greater + pos_error_weight
error = K.abs(error)
error = K.mean(error*greater, axis=1)
return error
def custom_loss(pos_error_weight):
def ae_specific_loss(y_true, y_pred):
return ae_weight_calc(y_true, y_pred, pos_error_weight)
# Returns the (y_true, y_pred) loss function
return ae_specific_loss
# create a differenced series for stationarity
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return pd.Series(diff)
def series_to_supervised(df, n_in=1, n_out=1, dropnan=True):
if type(df) == pd.DataFrame:
data = df[['net_sales_quantity']].values
else:
data = df
data_df = pd.DataFrame(data)
n_vars = 1
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(data_df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(data_df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
def prepare_data(df, n_test=1, n_in=1, n_out=1):
np.random.seed(1234)
# transform into lag and lead
supervised_df = series_to_supervised(df, n_in=n_in, n_out=n_out)
date_df = df[['date']].reset_index(drop=True)
supervised_df = supervised_df.merge(
date_df, how='inner', left_index=True, right_index=True)
# marking test and train
supervised_df['sample_flag'] = ''
supervised_df.iloc[0:-n_test]['sample_flag'] = 'train'
supervised_df.iloc[-n_test:]['sample_flag'] = 'validation'
# transform data to be stationary
raw_values = df[['net_sales_quantity']].values
diff_series = difference(raw_values, 1)
diff_values = diff_series.values
diff_values = diff_values.reshape(len(diff_values), 1)
# rescale values to -1, 1
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_values = scaler.fit_transform(diff_values)
scaled_values = scaled_values.reshape(len(scaled_values), 1)
# transform into supervised learning problem X, y
supervised_scaled_df = series_to_supervised(
scaled_values, n_in=n_in, n_out=n_out)
supervised_scaled_df = supervised_scaled_df.merge(
date_df, how='inner', left_index=True, right_index=True)
# marking test and train for scaled version
supervised_scaled_df['sample_flag'] = ''
supervised_scaled_df.iloc[0:-n_test]['sample_flag'] = 'train'
supervised_scaled_df.iloc[-n_test:]['sample_flag'] = 'validation'
return supervised_df, supervised_scaled_df, scaler
# fit an LSTM network to training data
def fit_lstm(
X, y, n_in=1, n_out=1, n_batch=1, nb_epoch=1000,
n_neurons=4, use_dropout=False, error_factor=1):
# reshape training into [samples, timesteps, features]
X = X.reshape(X.shape[0], 1, X.shape[1])
# design network
model = Sequential()
model.add(LSTM(
n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2]),
stateful=True))
if use_dropout is not False:
model.add(Dropout(use_dropout))
model.add(Dense(y.shape[1]))
loss = custom_loss(error_factor)
model.compile(loss=loss, optimizer='adam')
# print(model.summary())
# model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)
model.reset_states()
return model
# make one forecast with an LSTM,
def forecast_lstm(model, X, n_batch):
# reshape input pattern to [samples, timesteps, features]
forecasts = []
# make forecast
for i in range(X.shape[0]):
X_input = X[i, :].reshape(1, n_batch, X.shape[1])
forecast = model.predict(X_input, batch_size=n_batch)
# convert to array
forecasts.append(list(forecast.reshape(forecast.shape[1])))
return forecasts
def inverse_transform(data_df, scaler, undifferenced_df, col_names):
undifferenced_df = undifferenced_df.loc[data_df.index]
for col in undifferenced_df.columns:
if (data_df[col].dtype == float):
data_df[col] = scaler.inverse_transform(data_df[[col]])
data_df[col] += undifferenced_df[col]
col_names = ['var1(t-1)'] + col_names
for i in list((range(1, len(col_names)))):
data_df[col_names[i]] = scaler.inverse_transform(
data_df[[col_names[i]]])
data_df[col_names[i]] += data_df[col_names[i - 1]]
data_df[col_names] = np.round(data_df[col_names])
return data_df
def lstm_horizon_ape(df, col_names):
predicted = df[col_names].sum(axis=1)
actual = df[[x.replace('_hat', '') for x in col_names]].sum(axis=1)
return abs(predicted - actual)/actual
def lstm_forecast(
df, n_neurons=1, week_in=1, week_out=1, forecast_horizon=4, epochs=90,
use_dropout=False, n_batch=1, error_factor=1):
drug_id = df['drug_id'].unique()[0]
start_date = df.date.max()
date_list = [
start_date + datetime.timedelta(days=d*7)
for d in range(1, forecast_horizon+1)]
fcst = [0] * forecast_horizon
# setting seed for reproducibility
np.random.seed(1234)
tf.random.set_seed(1234)
supervised_df, supervised_scaled_df, scaler = prepare_data(
df, n_test=forecast_horizon, n_in=week_in, n_out=4)
train = supervised_scaled_df
_, test, _ = prepare_data(
df, n_test=forecast_horizon, n_in=week_in, n_out=0)
variable_name = list(train.columns)
variable_name = variable_name[:-2]
X_train, y_train = (
train[variable_name].values[:, 0:week_in],
train[variable_name].values[:, week_in:])
X_test = test[variable_name[:week_in]].iloc[-1]
X_test = np.reshape(np.ravel(X_test), (1, X_test.shape[0]))
model = fit_lstm(
X_train, y_train, n_in=week_in, n_out=week_out, n_batch=n_batch,
nb_epoch=epochs, n_neurons=n_neurons, use_dropout=use_dropout,
error_factor=error_factor)
hat_col = variable_name[week_in:]
hat_col = [x + '_hat' for x in hat_col]
scaler_test_fcst = forecast_lstm(model, X_test, n_batch=n_batch)
test_fcst = scaler.inverse_transform(scaler_test_fcst)
test_fcst = np.ravel(test_fcst)
for i in range(len(test_fcst)):
fcst[i] = df.net_sales_quantity.iloc[-1] + np.sum(test_fcst[:i])
if fcst[i] < 0:
fcst[i] = 0
fcst_df = pd.DataFrame({
'drug_id': drug_id, 'date': date_list, 'fcst': np.round(fcst),
'std': np.round(df.net_sales_quantity.iloc[-8:].std())})
return fcst_df
def lstm_wide_long(df, supervised_hat, hat_col):
drug_id = df['drug_id'].values[0]
supervised_hat = supervised_hat[supervised_hat['drug_id'] == drug_id]
return_df = df.copy()
fcst = (
list(supervised_hat.iloc[:-1][hat_col[0]].values) +
list(supervised_hat.iloc[-1][hat_col].values))
return_df.loc[-len(fcst):, 'fcst'] = pd.Series(
fcst, index=df.index[-len(fcst):])
return return_df
def hinge_error(error):
return sum(error < 0) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/forecasting_modules/lstm.py | lstm.py |
from ast import literal_eval
import pandas as pd
import numpy as np
def ipcv4_heuristics(final_pred_ss_df, drug_type_list_v4, db, schema):
''' drug_tupe_list_v4 variable has format as
drug_type_list_v4 = {'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',
'ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',
'others':'{0:[0,0,0], 1:[0,0,2], 2:[0,1,2],3:[1,2,3]}'}
final_pred_ss_df has the following format Index(['drug_id', 'model', 'bucket', 'percentile', 'fcst', 'std',
'lead_time_mean', 'lead_time_std', 'safety_stock', 'reorder_point',
'order_upto_point', 'safety_stock_days', 'reorder_days',
'order_upto_days', 'fptr', 'curr_inventory', 'max_value',
'correction_flag'],
dtype='object')
'''
q_drug_type_info = f""" select id as drug_id, "type" as drug_type from "{schema}".drugs """
drug_type_info = db.get_df(q_drug_type_info)
#convert drug types which are not generic or ethical as 'others'
drug_type_info['drug_type'] = np.where(
(drug_type_info['drug_type'] == 'ethical') | (drug_type_info['drug_type'] == 'generic'),
drug_type_info['drug_type'],'others')
final_pred_ss_df_v4 = pd.merge(final_pred_ss_df, drug_type_info, on=['drug_id'], how='left')
for drug_type in drug_type_list_v4.keys():
for max_value, ops_value in literal_eval(drug_type_list_v4[drug_type]).items():
safety_stock = ops_value[0] # min value
reorder_point = ops_value[1] # ss value
order_upto_point = ops_value[2] # max value
index_list = final_pred_ss_df_v4[
(final_pred_ss_df_v4['order_upto_point'].isin([max_value])) & (
final_pred_ss_df_v4['drug_type'] == drug_type)].index
final_pred_ss_df_v4.loc[index_list, 'safety_stock'] = safety_stock
final_pred_ss_df_v4.loc[index_list, 'reorder_point'] = reorder_point
final_pred_ss_df_v4.loc[index_list, 'order_upto_point'] = order_upto_point
print('Cases with {0} max: {1} for drug_type:{2} '.format(max_value, len(index_list), drug_type))
#remove the drug type column that was previously added
final_pred_ss_df_v4 = final_pred_ss_df_v4.drop(['drug_type'], axis = 1)
return final_pred_ss_df_v4 | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/heuristics/ipcv4_heuristics.py | ipcv4_heuristics.py |
import numpy as np
from datetime import datetime, timedelta
def get_demand_heuristics(start_date, end_date, drug_list, store_id,
db, schema, logger):
# sales query
print('getting data for store', store_id)
sales_query = f"""
select date("created-at") as "sales-date", "drug-id" ,
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" = {store_id}
and date("created-at") >= '{start_date}'
and date("created-at") < '{end_date}'
and "drug-id" in {drug_list}
group by "sales-date", "drug-id"
"""
sales_history = db.get_df(sales_query)
sales_history.columns = [c.replace('-', '_') for c in sales_history.columns]
# cfr pr loss
cfr_pr_query = f"""
select "attributed-loss-date", "drug-id",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "shortbook-date" >= '{start_date}'
and "shortbook-date" < '{end_date}'
and "drug-id" <> -1
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
and "drug-id" in {drug_list}
and "store-id" = {store_id}
group by "attributed-loss-date", "drug-id"
"""
cfr_pr = db.get_df(cfr_pr_query)
cfr_pr["loss-quantity"] = cfr_pr["loss-quantity"].astype(float)
cfr_pr.columns = [c.replace('-', '_') for c in cfr_pr.columns]
# total demand merge
demand = sales_history.merge(
cfr_pr, left_on=['sales_date', 'drug_id'],
right_on=['attributed_loss_date', 'drug_id'], how='left')
demand['sales_date'] = demand['sales_date'].combine_first(
demand['attributed_loss_date'])
demand['net_sales_quantity'].fillna(0, inplace=True)
demand['loss_quantity'].fillna(0, inplace=True)
demand['net_sales_quantity'] += demand['loss_quantity']
demand.drop(
['attributed_loss_date', 'loss_quantity'], axis=1, inplace=True)
# aggregating demand at level
demand_agg = demand.groupby(
['drug_id'])['net_sales_quantity'].sum().reset_index()
demand_agg.columns = ['drug_id', 'historical_demand']
# getting drug type
drug_type_query = """
select id as drug_id, type as drug_type
from "{schema}".drugs
where id in {0}
""".format(drug_list, schema=schema)
drug_type = db.get_df(drug_type_query)
demand_agg = demand_agg.merge(drug_type, on=['drug_id'], how='left')
return demand_agg
def base_heuristics(
store_id, safety_stock_df, reset_date, db, schema, logger=None,
raf_range=(0.25, 0.75), corr_raf=0.5):
# getting time period for last 4 weeks
date = datetime.strptime(reset_date, '%Y-%m-%d')
end_date = (date - timedelta(days=date.weekday())).date()
start_date = end_date - timedelta(days=28)
end_date = str(end_date)
start_date = str(start_date)
logger.info(
'Getting last 4 week data for base heuristic from' + start_date +
'to' + end_date)
# getting demand for heuristics - A/B class only
bucket_class_list = ['AX', 'AY', 'AZ', 'BX', 'BY', 'BZ']
drug_list = tuple(list(safety_stock_df.loc[
safety_stock_df.bucket.isin(bucket_class_list),
'drug_id']))
demand = get_demand_heuristics(
start_date, end_date, drug_list, store_id, db, schema, logger)
safety_stock_adj = safety_stock_df.merge(
demand, how='left', on=['drug_id'])
safety_stock_adj['historical_demand'].fillna(0, inplace=True)
# RAF factor calculation
safety_stock_adj['raf'] = np.select(
[safety_stock_adj['historical_demand'] == 0],
[0.5],
default=safety_stock_adj['order_upto_point'] /
safety_stock_adj['historical_demand'])
# adjustment using RAF: for low
low_raf_index = safety_stock_adj[
(safety_stock_adj['bucket'].isin(bucket_class_list)) &
(safety_stock_adj['raf'] < raf_range[0])
].index
safety_stock_adj.loc[low_raf_index, 'order_upto_point'] = np.round(
np.where(
safety_stock_adj.loc[low_raf_index, 'order_upto_point'] == 0,
safety_stock_adj.loc[low_raf_index, 'historical_demand']*corr_raf,
(safety_stock_adj.loc[low_raf_index, 'order_upto_point']*corr_raf /
safety_stock_adj.loc[low_raf_index, 'raf'])
))
safety_stock_adj.loc[low_raf_index, 'reorder_point'] = np.round(
safety_stock_adj.loc[low_raf_index, 'order_upto_point']/2)
safety_stock_adj.loc[low_raf_index, 'safety_stock'] = np.round(
safety_stock_adj.loc[low_raf_index, 'reorder_point']/2)
# print(safety_stock_adj.head())
# adjustment using RAF: for high
high_raf_index = safety_stock_adj[
(safety_stock_adj['bucket'].isin(bucket_class_list)) &
(safety_stock_adj['raf'] > raf_range[1])
].index
safety_stock_adj.loc[high_raf_index, 'order_upto_point'] = np.round(
safety_stock_adj.loc[high_raf_index, 'order_upto_point'] *
corr_raf / safety_stock_adj['raf'])
safety_stock_adj.loc[high_raf_index, 'reorder_point'] = np.round(
safety_stock_adj.loc[high_raf_index, 'order_upto_point']/2)
safety_stock_adj.loc[high_raf_index, 'safety_stock'] = np.round(
safety_stock_adj.loc[high_raf_index, 'reorder_point']/2)
logger.info(
'Out of total line items ' + str(len(safety_stock_adj)) + '\n' +
'Decreased: Total ' + str(len(high_raf_index)) + '\n' +
'Decreased: Generic ' +
str(len(safety_stock_adj.iloc[high_raf_index].
query('drug_type == "generic"'))) + '\n' +
'Decreased: Ethical ' +
str(len(safety_stock_adj.iloc[high_raf_index].
query('drug_type == "ethical"'))) + '\n' +
'Increased: Total ' + str(len(low_raf_index)) + '\n' +
'Increased: Generic ' +
str(len(safety_stock_adj.iloc[low_raf_index].
query('drug_type == "generic"'))) + '\n' +
'Increased: Ethical ' +
str(len(safety_stock_adj.iloc[low_raf_index].
query('drug_type == "ethical"')))
)
return safety_stock_adj[safety_stock_df.columns] | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/heuristics/base.py | base.py |
import pandas as pd
import numpy as np
import datetime as dt
def v5_corrections(store_id, safety_stock_df, db, schema, logger):
"""
Main function to perform V5 corrections
"""
# Get Drug STD Qty and list of repeatable drug_ids
df_3m_drugs, unique_drugs_3m = get_3m_drug_std_qty(store_id, db, schema, logger)
# Locate drugs to perform correction check
df_std_check = safety_stock_df.loc[safety_stock_df["drug_id"].isin(
unique_drugs_3m)][["drug_id", "fcst", "safety_stock", "reorder_point", "order_upto_point"]]
# Drugs not forecasted by IPC
drugs_3m_not_set = list(set(unique_drugs_3m) ^ set(df_std_check["drug_id"].unique()))
logger.info(f"Number of drugs not forecasted: {len(drugs_3m_not_set)}")
# Merge STD Qty with SS table and find drugs correction areas
df_std_check = df_3m_drugs.merge(df_std_check, on="drug_id", how="left")
df_std_check = df_std_check.dropna()
df_std_check["rop>=std_qty"] = np.where(
df_std_check["reorder_point"] >= df_std_check["std_qty"], "Y", "N")
tot_rep_drugs = df_std_check.shape[0]
corr_req = df_std_check.loc[df_std_check['rop>=std_qty'] == 'N'].shape[0]
corr_not_req = df_std_check.loc[df_std_check['rop>=std_qty'] == 'Y'].shape[0]
logger.info(f"Number of repeatable drugs: {tot_rep_drugs}")
logger.info(f"Number of repeatable drugs corrections required: {corr_req}")
logger.info(f"Number of repeatable drugs corrections not required: {corr_not_req}")
# CORRECTION STARTS
order_freq = 4
column_order = list(df_std_check.columns)
column_order += ["corr_ss", "corr_rop", "corr_oup"]
# CASE1: No changes required
df_no_change = df_std_check.loc[df_std_check["rop>=std_qty"] == "Y"].copy()
df_no_change["corr_ss"] = df_no_change["safety_stock"].astype(int)
df_no_change["corr_rop"] = df_no_change["reorder_point"].astype(int)
df_no_change["corr_oup"] = df_no_change["order_upto_point"].astype(int)
# CASE2: SS & ROP & OUP is Non Zero
df_change1 = df_std_check.loc[(df_std_check["rop>=std_qty"] == "N") &
(df_std_check["safety_stock"] != 0) &
(df_std_check["reorder_point"] != 0) &
(df_std_check["order_upto_point"] != 0)].copy()
df_change1["mul_1"] = df_change1["reorder_point"] / df_change1["safety_stock"]
df_change1["mul_2"] = df_change1["order_upto_point"] / df_change1["reorder_point"]
df_change1["corr_rop"] = df_change1["std_qty"]
df_change1["corr_ss"] = np.ceil(df_change1["corr_rop"] / df_change1["mul_1"]).astype(int)
# If ROP >= OUP, then in those cases, increase OUP.
df_change11 = df_change1.loc[
df_change1["corr_rop"] >= df_change1["order_upto_point"]].copy()
df_change12 = df_change1.loc[
df_change1["corr_rop"] < df_change1["order_upto_point"]].copy()
df_change11["corr_oup"] = np.ceil(df_change11["corr_rop"] + (
df_change11["fcst"] * order_freq / 28)).astype(int)
df_change12["corr_oup"] = np.ceil(df_change12["corr_rop"] + (
df_change12["fcst"] * order_freq / 28)).astype(int)
df_change1 = df_change11.append(df_change12)
df_change1 = df_change1[column_order]
# CASE3: Any of SS & ROP & OUP is Zero
df_change2 = df_std_check.loc[(df_std_check["rop>=std_qty"] == "N")].copy()
df_change2 = df_change2.loc[~((df_change2["safety_stock"] != 0) &
(df_change2["reorder_point"] != 0) &
(df_change2["order_upto_point"] != 0))].copy()
df_change2["corr_rop"] = df_change2["std_qty"].astype(int)
df_change2["corr_ss"] = np.floor(df_change2["corr_rop"] / 2).astype(int)
# If ROP >= OUP, then in those cases, increase OUP.
df_change21 = df_change2.loc[
df_change2["corr_rop"] >= df_change2["order_upto_point"]].copy()
df_change22 = df_change2.loc[
df_change2["corr_rop"] < df_change2["order_upto_point"]].copy()
df_change21["corr_oup"] = np.ceil(df_change21["corr_rop"] + (
df_change21["fcst"] * order_freq / 28)).astype(int)
df_change22["corr_oup"] = np.ceil(df_change22["corr_rop"] + (
df_change22["fcst"] * order_freq / 28)).astype(int)
df_change2 = df_change21.append(df_change22)
df_change2 = df_change2[column_order]
# Combine all 3 cases
df_corrected = df_no_change.append(df_change1)
df_corrected = df_corrected.append(df_change2)
df_corrected = df_corrected.sort_index(ascending=True)
# Get DF of corrected drugs and merge with input DF
df_corrected_to_merge = df_corrected.loc[df_corrected["rop>=std_qty"] == "N"][
["drug_id", "corr_ss", "corr_rop", "corr_oup"]]
corr_safety_stock_df = safety_stock_df.merge(df_corrected_to_merge,
on="drug_id", how="left")
# Make corrections for required drugs
corr_safety_stock_df["safety_stock"] = np.where(
corr_safety_stock_df["corr_ss"] >= 0, corr_safety_stock_df["corr_ss"],
corr_safety_stock_df["safety_stock"])
corr_safety_stock_df["reorder_point"] = np.where(
corr_safety_stock_df["corr_rop"] >= 0, corr_safety_stock_df["corr_rop"],
corr_safety_stock_df["reorder_point"])
corr_safety_stock_df["order_upto_point"] = np.where(
corr_safety_stock_df["corr_oup"] >= 0, corr_safety_stock_df["corr_oup"],
corr_safety_stock_df["order_upto_point"])
corr_safety_stock_df.drop(["corr_ss", "corr_rop", "corr_oup"], axis=1, inplace=True)
corr_safety_stock_df["max_value"] = corr_safety_stock_df["order_upto_point"] * \
corr_safety_stock_df["fptr"]
assert safety_stock_df.shape == corr_safety_stock_df.shape
# Evaluate PRE and POST correction
pre_post_metrics = {
"metric": ["pre_corr", "post_corr"],
"ss_qty": [safety_stock_df["safety_stock"].sum(),
corr_safety_stock_df["safety_stock"].sum()],
"ss_val": [round((safety_stock_df["safety_stock"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["safety_stock"] * corr_safety_stock_df["fptr"]).sum(), 2)],
"rop_qty": [safety_stock_df["reorder_point"].sum(), corr_safety_stock_df["reorder_point"].sum()],
"rop_val": [round((safety_stock_df["reorder_point"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["reorder_point"] * corr_safety_stock_df["fptr"]).sum(), 2)],
"oup_qty": [safety_stock_df["order_upto_point"].sum(), corr_safety_stock_df["order_upto_point"].sum()],
"oup_val": [round((safety_stock_df["order_upto_point"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["order_upto_point"] * corr_safety_stock_df["fptr"]).sum(), 2)]
}
pre_post_metics_df = pd.DataFrame.from_dict(pre_post_metrics).set_index('metric').T
pre_post_metics_df["delta"] = pre_post_metics_df["post_corr"] - pre_post_metics_df["pre_corr"]
pre_post_metics_df["change%"] = round((pre_post_metics_df["delta"] / pre_post_metics_df["pre_corr"]) * 100, 2)
logger.info(f"\n{str(pre_post_metics_df)}")
return corr_safety_stock_df
def max_mode(pd_series):
return int(max(pd_series.mode()))
def get_3m_drug_std_qty(store_id, db, schema, logger):
"""
To fetch repeatable patient-drug qty from past 90days and calculate
standard drug qty.
"""
start_date = (dt.date.today() - dt.timedelta(days=90)).strftime("%Y-%m-%d")
end_date = dt.date.today().strftime("%Y-%m-%d")
q_3m = """
select "patient-id" , "old-new" , "drug-id" ,
date("created-at") as "on-date", quantity as "tot-qty"
from "{schema}".sales
where "store-id" = {0}
and "is-repeatable" = 1
and "bill-flag" = 'gross'
and "created-at" > '{1} 00:00:00' and "created-at" < '{2} 00:00:00'
""".format(store_id, start_date, end_date, schema=schema)
df_3m = db.get_df(q_3m)
df_3m.columns = [c.replace('-', '_') for c in df_3m.columns]
# Get patient-drug-level STD Qty
df_3m["3m_bills"] = 1
df_3m["std_qty"] = df_3m["tot_qty"]
df_3m_patient = df_3m.groupby(["patient_id", "drug_id"],
as_index=False).agg(
{"3m_bills": "sum", "tot_qty": "sum", "std_qty": max_mode})
logger.info(f"Total repeatable patients: {len(df_3m_patient.patient_id.unique())}")
# Get drug-level STD Qty
df_3m_drugs = df_3m_patient.groupby("drug_id", as_index=False).agg(
{"std_qty": "max"})
# STD Qty > 10 is considered outliers, to drop.
drug_count_before = df_3m_drugs.shape[0]
df_3m_drugs = df_3m_drugs.loc[df_3m_drugs["std_qty"] <= 10]
drug_count_after = df_3m_drugs.shape[0]
logger.info(f"Number of outlier drugs STD Qty: {drug_count_before-drug_count_after}")
# Repeatable drugs STD Qty to check against IPC set ROP
unique_drugs_3m = list(df_3m_drugs["drug_id"].unique())
return df_3m_drugs, unique_drugs_3m | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc/heuristics/ipcv5_heuristics.py | ipcv5_heuristics.py |
all_secrets = {
"development/etl": {
"ENV": "dev",
"REDSHIFT_DB": "prod2-generico",
"REDSHIFT_HOST": "localhost",
"REDSHIFT_USER": "admin",
"REDSHIFT_PASSWORD": "vJF1TFxZaq9hkubMuQmpgA==",
"REDSHIFT_PORT": "5460",
"REDSHIFT_SSL": "0",
"REDSHIFT_WRITE_DB": "prod2-generico",
"REDSHIFT_WRITE_HOST": "localhost",
"REDSHIFT_WRITE_USER": "admin",
"REDSHIFT_WRITE_PASSWORD": "vJF1TFxZaq9hkubMuQmpgA==",
"REDSHIFT_WRITE_PORT": "5460",
"REDSHIFT_WRITE_SSL": "0",
"MS_DB": "prod2-generico",
"MS_HOST": "localhost",
"MS_USER": "ro",
"MS_PASSWORD": "obahegen63",
"MS_PORT": "3311",
"MYSQL_WRITE_HOST": "localhost",
"MYSQL_WRITE_DATABASE": "test-generico",
"MYSQL_WRITE_USER": "server",
"MYSQL_WRITE_PASSWORD": "dev-server",
"MYSQL_WRITE_PORT": "3306",
"PG_HOST": "127.0.0.1",
"PG_USER": "data_science_admin",
"PG_PASSWORD": "T+ckpmRHASGShsi1W2BPBAZbx4=",
"PG_PORT": "5435",
"PG_DB": "datascience_generico",
"INTERNAL_PG_HOST": "127.0.0.1",
"INTERNAL_PG_USER": "ds_read_only",
"INTERNAL_PG_PASSWORD": "HaE5mOxoZ0rjezcZ+gujzA==",
"INTERNAL_PG_PORT": "5435",
"INTERNAL_PG_DB": "generico_internal",
"MONGO_HOST": "localhost",
"MONGO_DB": "generico-crm",
"MONGO_USER": "crm_generico",
"MONGO_PASSWORD": "Ivynwg+lNAbwc39jECd+o3ZGe/I=",
"MONGO_PORT": 27017,
"WH_MSSQL_HOST": "127.0.0.1",
"WH_MSSQL_PORT": "1433",
"WH_MSSQL_DATABASE": "esdata",
"WH_MSSQL_USER": "rodata",
"WH_MSSQL_PASSWORD": "icanonlyread",
"AWS_ACCESS_KEY_ID": "AKIA5NJ64OJ5UEZ4LS5P",
"AWS_SECRET_ACCESS_KEY_ID": "Ps7rjzBURYi3T74WTFHDfLGwfdjoo9CvYojaaD7O",
"AWS_REGION": "ap-south-1",
"AWS_ACCOUNT_ID": "921939243643",
"CLEVERTAP_ACCOUNT_ID": "R5Z-K95-RZ6Z",
"CLEVERTAP_PASSCODE": 'ETK-RUZ-MHUL',
"NODE_NOTIFICATION_BASE_URL": "http://stag-node.generico.in:3001/",
"NOTIFICATION_AUTH_TOKEN": "bearer secret_c28365c7515cb33b7f4b585ad8b0dc59724d0e68b73119df038f1a1b91d63d0871bcf97e8d3992c54f135fd3e364eb49d12b0609a0733dbbd095f40301005e28639182ba1c44b56a7de6fde1d4a01b17fa8d945f3922a04d4cf2994c0a564026dd403f1879a1db9064625410b647fa3ff6e07a8b93cf3ffabfb91c2f9c65975df66384630ea2113de9c9dd77d9aec6367287570e2abee975d5924f56267fab32d3a11ac0e6a8f311a0f35f174ca55bff0b30908fa3f4428c433cf3c33b001409497a488d1ad86a290e86af1310f41b3df161dc2250063fbbe55ce5db8097364005b4d86b787c5407613085fb261567adbb7b0ddd5c994ab31afbd477a6a7288a",
"LOGGER_LEVEL": "INFO",
"GMAPS_API_KEY": "AIzaSyAM4MXbhAnt3GcADbHDQ251ULnUCGj6M7s",
"FRESHSERVICE_API": "nIIbufKTxrqLDyobzwgT",
"LEARNTRON_API_TOKEN": "wv7qC9K6/zeXtDUhDJVRo1fGs99YVC/A+k15+PQJHVcFP2LbPJYplcdvt+B0RD6H",
"DJANGO_ACCOUNTS_BASE_URL": "https://stag-accounts.generico.in/",
"DJANGO_OAUTH_TOKEN": "GPCFDazdfYFWTXF3qrWIawlLhygQEj",
"FLASK_DS_APP_BASE_URL": "https://rcbrrvicsg.execute-api.ap-south-1.amazonaws.com/staging",
"FLASK_DS_APP_X_API_KEY": 'o783ilbugb7j3My5hT9mC5sWu4qcPOWs7JVDEYHY',
"CLUSTER_IDENTIFIER": "stag-mysql-redshift-cluster-1",
"MANUAL_SNAPSHOT_RETENTION_PERIOD": "5",
"ZENO_WEBSOC_API_TOKEN": "12345678",
"ZENO_WEBSOC_BASE_URL": "https://dev-websoc.generico.in",
"DATALAKE_DATABASE": "development"
}
} | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/secret/zeno_secrets.py | zeno_secrets.py |
```
!pip install zeno_etl_libs==1.0.69
"""main wrapper for distributor ranking algorithm"""
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
from fractions import Fraction
sys.path.append('../../../..')
from zeno_etl_libs.utils.distributor_ranking.distributor_ranking_calc import ranking_calc_dc, ranking_calc_franchisee
from zeno_etl_libs.utils.distributor_ranking.ranking_intervention import ranking_override_dc, ranking_override_franchisee
from zeno_etl_libs.utils.distributor_ranking.postprocess_ranking import postprocess_ranking_dc, postprocess_ranking_franchisee
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
```
## Main Function
```
def main(debug_mode, weights_as, weights_pr, as_low_volume_cutoff,
pr_low_volume_cutoff, low_volume_cutoff_franchisee, volume_fraction,
time_interval, time_interval_franchisee, rank_override_dc_active,
rank_override_franchisee_active, db_read, db_write, read_schema,
write_schema, s3, logger):
mysql_write = MySQL(read_only=False)
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
reset_date = dt.date.today()
# weights format is [lead time, margin, bounce rate, ff, lost recency, success recency]
weights_as = [float(Fraction(i)) for i in list(weights_as.values())]
weights_pr = [float(Fraction(i)) for i in list(weights_pr.values())]
# define empty variable in case of code fail
dc_evaluated = []
franchisee_stores_evaluated = []
logger.info("Checking input weights")
try:
assert(sum(weights_as) == 1)
assert(sum(weights_pr) == 1)
logger.info("Weight inputs summing up to one")
except:
logger.info("Weights not summing up to one, reverting to defaults")
weights_as = [2 / 13, 1 / 13, 4 / 13, 4 / 13, 1 / 13, 1 / 13]
weights_pr = [6 / 15, 1 / 15, 3 / 15, 3 / 15, 1 / 15, 1 / 15]
try:
# calculate ranks
logger.info("Calculating Zippin DC-level Ranking")
features_rank_dc = ranking_calc_dc(
time_interval=time_interval, weights_as=weights_as,
weights_pr=weights_pr, as_low_volume_cutoff=as_low_volume_cutoff,
pr_low_volume_cutoff=pr_low_volume_cutoff,
volume_fraction=volume_fraction,
db=db_read, read_schema=read_schema, logger=logger)
logger.info("Completed Zippin DC-level Ranking")
logger.info("Calculating Franchisee Store-level Ranking")
features_rank_franchisee = ranking_calc_franchisee(
time_interval=time_interval_franchisee,
weights_as=weights_as, weights_pr=weights_pr,
low_volume_cutoff=low_volume_cutoff_franchisee,
volume_fraction=volume_fraction,
db=db_read, read_schema=read_schema, logger=logger)
logger.info("Completed Franchisee Store-level Ranking")
logger.info('Number of dc-drug_id combinations evaluated :' +
str(features_rank_dc[features_rank_dc['request_type'] == 'AS/MS'].shape[0]))
logger.info('Number of franchisee store-drug_id combinations evaluated :' +
str(features_rank_franchisee[features_rank_franchisee['request_type'] == 'AS/MS'].shape[0]))
if rank_override_dc_active == 'Y':
logger.info("Rank override DC level begins")
features_rank_dc = ranking_override_dc(
features_rank_dc, db_read, read_schema, logger,
override_type_list=['AS/MS'])
logger.info("Rank override DC level successful")
if rank_override_franchisee_active == 'Y':
logger.info("Rank override franchisee store level begins")
features_rank_franchisee = ranking_override_franchisee(
features_rank_franchisee, db_read, read_schema, logger,
override_type_list=['AS/MS', 'PR'])
logger.info("Rank override franchisee store level successful")
# postprocess features for dc level ranking
tech_input_dc_level = postprocess_ranking_dc(features_rank_dc,
volume_fraction)
# postprocess features for franchisee store level ranking
tech_input_franchisee_level = postprocess_ranking_franchisee(
features_rank_franchisee, volume_fraction)
# combine both dc-level and frachisee-level ranking
tech_input = pd.concat([tech_input_dc_level, tech_input_franchisee_level])
# combine volume fraction split for cases where total distributors < 3
volume_fraction_split = tech_input['volume_fraction'].str.split(
pat='-', expand=True).rename(
columns={0: 'volume_fraction_1',
1: 'volume_fraction_2',
2: 'volume_fraction_3'})
tech_input['volume_fraction_1'] = volume_fraction_split[
'volume_fraction_1'].astype(float)
tech_input['volume_fraction_2'] = volume_fraction_split[
'volume_fraction_2'].astype(float)
tech_input['volume_fraction_3'] = volume_fraction_split[
'volume_fraction_3'].astype(float)
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_3'].isna(),
tech_input['volume_fraction_2'] +
tech_input['volume_fraction_3'],
tech_input['volume_fraction_2'])
tech_input['volume_fraction_3'] = np.where(
tech_input['final_dist_3'].isna(), 0,
tech_input['volume_fraction_3'])
tech_input['volume_fraction_1'] = np.where(
tech_input['final_dist_2'].isna(),
tech_input['volume_fraction_1'] +
tech_input['volume_fraction_2'],
tech_input['volume_fraction_1'])
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_2'].isna(), 0,
tech_input['volume_fraction_2'])
tech_input['volume_fraction'] = tech_input['volume_fraction_1'].astype(
'str') + '-' + tech_input['volume_fraction_2'].astype(
'str') + '-' + tech_input['volume_fraction_3'].astype('str')
tech_input = tech_input[
['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
############ adhoc changes by tech, table restructure ############
tech_input = tech_input.reset_index(
drop=True).reset_index().rename(columns={'index': 'id'})
tech_input[['volume_fraction_1', 'volume_fraction_2',
'volume_fraction_3']] = tech_input[
'volume_fraction'].str.split('-', 3, expand=True)
tech_input.loc[tech_input['request_type'] == 'AS/MS',
'request_type'] = 'manual-short/auto-short'
tech_input.loc[tech_input['request_type'] ==
'PR', 'request_type'] = 'patient-request'
volume_fraction_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['volume_fraction_1',
'volume_fraction_2',
'volume_fraction_3']).sort_values(by='id')
distributor_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['final_dist_1',
'final_dist_2',
'final_dist_3']).sort_values(by='id').rename(columns={'value': 'distributor_id'})
distributor_ranking_rule_values = pd.merge(distributor_melt,
volume_fraction_melt,
left_index=True,
right_index=True,
suffixes=('', '_y'))
distributor_ranking_rule_values = distributor_ranking_rule_values[
['id', 'distributor_id', 'value']].rename(
columns={'id': 'distributor_ranking_rule_id'}).reset_index(
drop=True)
distributor_ranking_rule_values = distributor_ranking_rule_values.reset_index().rename(columns={'index': 'id'})
# drop null values in distributor_id(for cases where historical distributors are < 3)
distributor_ranking_rule_values = distributor_ranking_rule_values[
~distributor_ranking_rule_values['distributor_id'].isna()]
# convert distributor_id in int format
distributor_ranking_rule_values['distributor_id'] = \
distributor_ranking_rule_values['distributor_id'].astype(int)
distributor_ranking_rules = tech_input[['id', 'drug_id', 'dc_id',
'franchisee_id', 'store_id',
'drug_type', 'request_type']]
# for email info
dc_evaluated = distributor_ranking_rules["dc_id"].unique().tolist()
franchisee_stores_evaluated = distributor_ranking_rules["store_id"].unique().tolist()
# adding required fields
distributor_ranking_rules['rule_start_date'] = reset_date
distributor_ranking_rules['is_active'] = 1
distributor_ranking_rules['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
distributor_ranking_rules['created_by'] = 'etl-automation'
features_rank_dc.loc[:, 'reset_date'] = reset_date
features_rank_dc['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
features_rank_dc['created_by'] = 'etl-automation'
features_rank_franchisee.loc[:, 'reset_date'] = reset_date
features_rank_franchisee['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
features_rank_franchisee['created_by'] = 'etl-automation'
# formatting column names
distributor_ranking_rule_values.columns = [c.replace('_', '-') for c in distributor_ranking_rule_values.columns]
distributor_ranking_rules.columns = [c.replace('_', '-') for c in distributor_ranking_rules.columns]
features_rank_dc.columns = [c.replace('_', '-') for c in features_rank_dc.columns]
features_rank_franchisee.columns = [c.replace('_', '-') for c in features_rank_franchisee.columns]
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
logger.info("Writing to table: distributor-features-dc")
s3.write_df_to_db(df=features_rank_dc,
table_name='distributor-features-dc',
db=db_write, schema=write_schema)
logger.info("Writing to table: distributor-features-franchisee")
s3.write_df_to_db(df=features_rank_franchisee,
table_name='distributor-features-franchisee',
db=db_write, schema=write_schema)
logger.info("Writing table to RS-DB completed!")
mysql_write.open_connection()
logger.info("Updating table to MySQL")
try:
index_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rules`',
con=mysql_write.connection).values[0]) + 1
redundant_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rule-values`',
con=mysql_write.connection).values[0]) + 1
except:
index_increment = 1
redundant_increment = 1
logger.info(f"Incremented distributor-ranking-rules by {index_increment}")
logger.info(f"Incremented distributor-ranking-rule-values by {redundant_increment}")
distributor_ranking_rules['id'] = distributor_ranking_rules['id'] + index_increment
distributor_ranking_rule_values['distributor-ranking-rule-id'] = distributor_ranking_rule_values[
'distributor-ranking-rule-id'] + index_increment
distributor_ranking_rule_values['id'] = distributor_ranking_rule_values['id'] + redundant_increment
logger.info("Setting existing rules to inactive")
mysql_write.engine.execute("UPDATE `distributor-ranking-rules` SET `is-active` = 0")
logger.info("Writing to table: distributor-ranking-rules")
distributor_ranking_rules.to_sql(
name='distributor-ranking-rules',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
logger.info("Writing to table: distributor-ranking-rule-values")
distributor_ranking_rule_values.to_sql(
name='distributor-ranking-rule-values',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
logger.info("Updating table to MySQL completed!")
mysql_write.close()
else:
logger.info("Writing to RS-DB & MySQL skipped")
status = 'Success'
logger.info(f"Distributor Ranking code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Distributor Ranking code execution status: {status}")
return status, reset_date, dc_evaluated, franchisee_stores_evaluated
```
## Pass Param
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
## Read Params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=112)
# JOB EXCLUSIVE PARAMS
weights_as = args["weights_as"]
weights_pr = args["weights_pr"]
as_low_volume_cutoff = args["as_low_volume_cutoff"]
pr_low_volume_cutoff = args["pr_low_volume_cutoff"]
low_volume_cutoff_franchisee = args["low_volume_cutoff_franchisee"]
volume_fraction = args["volume_fraction"]
time_interval = args["time_interval"]
time_interval_franchisee = args["time_interval_franchisee"]
rank_override_dc_active = args["rank_override_dc_active"]
rank_override_franchisee_active = args["rank_override_franchisee_active"]
```
## Execute Main Function
```
""" calling the main function """
status, reset_date, dc_evaluated, franchisee_stores_evaluated = main(
debug_mode, weights_as, weights_pr,
as_low_volume_cutoff, pr_low_volume_cutoff, low_volume_cutoff_franchisee,
volume_fraction, time_interval, time_interval_franchisee,
rank_override_dc_active, rank_override_franchisee_active, rs_db_read,
rs_db_write, read_schema, write_schema, s3, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
## Send Email Notification
```
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Distributor Ranking Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
DC's Evaluated: {dc_evaluated}
Franchisee Stores Evaluated: {franchisee_stores_evaluated}
Job Params: {args}
""",
to_emails=email_to)
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/distributor-ranking-main/distributor_ranking_main.ipynb | distributor_ranking_main.ipynb |
```
#installing extra libraries on prod
!pip install zeno-etl-libs==1.0.40
!pip install google-cloud-storage==1.43.0
!pip install openpyxl==3.0.9
!pip install nltk==3.6.7
!pip install apiclient==1.0.4
"""
Analysing Playstore reviews on daily basis
Author : [email protected]
"""
import argparse
import sys
import re
import os
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
import time
import datetime
import os
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.tokenize import word_tokenize, RegexpTokenizer, sent_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
nltk.download('wordnet')
nltk.download('omw-1.4')
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('vader_lexicon')
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
```
## Pass Params
```
env = "dev"
full_run = 0
email_to ="[email protected],[email protected]"
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB()
rs_db_write = DB(read_only=False)
rs_db.open_connection()
rs_db_write.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'playstore-reviews-analysis'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
# max of data
playstore_q = """
select
max("review-created-at") max_exp
from
"prod2-generico"."playstore-reviews-analysis"
"""
max_exp_date = rs_db.get_df(playstore_q)
max_exp_date['max_exp'].fillna(np.nan, inplace=True)
print(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
print(max_exp_date)
# params
if full_run or max_exp_date == 'NaN':
start = '2021-01-01'
else:
start = max_exp_date
start = dateutil.parser.parse(start)
print(start)
#fetching all reviews from playstore-review table
q = f"""
select
"review-id",
"review",
"review-created-at"
from
"prod2-generico"."playstore-reviews"
where
date("review-created-at")> '{start}'
and review !='' and review is not null
"""
df = rs_db.get_df(q)
df['review-created-at'] = pd.to_datetime(df['review-created-at'])
df.head()
```
## Define all functions
```
# finding sentiment intensity aanalyzer
def sentiment_analyser(lst):
sid = SentimentIntensityAnalyzer()
sentiment = [sid.polarity_scores(x) for x in lst]
neg = [sid.polarity_scores(x)['neg'] for x in lst]
neu = [sid.polarity_scores(x)['neu'] for x in lst]
pos = [sid.polarity_scores(x)['pos'] for x in lst]
comp = [sid.polarity_scores(x)['compound'] for x in lst]
return neg[0], neu[0], pos[0], comp[0]
# removing stopwords
def remove_sw(sent,corpus):
stop_words = set(stopwords.words(corpus))
word_tokens = word_tokenize(sent)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
filtered_sentence=' '.join(filtered_sentence)
return [filtered_sentence]
#Remove URL
def remove_url(txt):
"""Replace URLs found in a text string with nothing
(i.e. it will remove the URL from the string).
Parameters
----------
txt : string
A text string that you want to parse and remove urls.
Returns
-------
The same txt string with url's removed.
"""
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split())
def generate_N_grams(text,ngram):
words=[word for word in text if word not in set(stopwords.words('english'))]
#print("Sentence after removing stopwords:",words)
temp=zip(*[words[i:] for i in range(0,ngram)])
ans=[' '.join(ngram) for ngram in temp]
return ans
# match two list and find intersection
def matchlist(a,b):
matches=[]
for item_a in a:
for item_b in b:
if item_a == item_b:
matches.append(item_a)
return list(set(matches))
# to convert list to string
def listToString(s):
# initialize an empty string
str1 = ","
# return string
s = [str(x) for x in s]
return (str1.join(s))
#sort string
def sortstring(my_str):
words = my_str.split(" ")
# sort the list
words.sort()
# display the sorted words
newSentence = " ".join(words)
return newSentence
print("Removing url")
df['content_url'] =df['review'].apply(lambda x :remove_url(x.lower()),1)
print("Removing stopwords")
sw = 'english'
df['content_sw'] =df['content_url'].apply(lambda x :remove_sw(x,sw),1)
print("sentiment_analyser")
df['neg','neu','pos','comp'] =df['content_sw'].apply(lambda x :sentiment_analyser(x),1)
df[['neg','neu','pos','comp']]= df['neg','neu','pos','comp'].apply(pd.Series)
df.drop(columns=('neg', 'neu', 'pos', 'comp'),inplace=True)
df['positive-ratio'] = round((df['pos']/ (df['pos']+ df ['neg'])) , 2)
df['negative-ratio'] = round((df['neg']/ (df['pos']+ df ['neg'])) , 2)
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]
df['content_lem'] = df['content_url'].apply(lambda x :lemmatize_text(x.lower()),1)
df['content_2gram'] = df['content_lem'].apply(lambda x :generate_N_grams(x,2),1)
df['content_1gram'] = df['content_lem'].apply(lambda x :generate_N_grams(x,1),1)
#place holder
positiveValues2=defaultdict(int)
negativeValues2=defaultdict(int)
positiveValues1=defaultdict(int)
negativeValues1=defaultdict(int)
#get the count of every word in both the columns dataframes where sentiment="positive"
for text in df[df['positive-ratio']>df['negative-ratio']].content_lem:
for word in generate_N_grams(text,2):
positiveValues2[word.lower()]+=1
for text in df[df['positive-ratio']>df['negative-ratio']].content_lem:
for word in generate_N_grams(text,1):
positiveValues1[word.lower()]+=1
#get the count of every word in both the columns dataframes where sentiment="negative"
for text in df[df['positive-ratio']<=df['negative-ratio']].content_lem:
for word in generate_N_grams(text,2):
negativeValues2[word]+=1
#get the count of every word in both the columns of df_train and df_test dataframes where sentiment="negative"
for text in df[df['positive-ratio']<=df['negative-ratio']].content_lem:
for word in generate_N_grams(text,1):
negativeValues1[word]+=1
#focus on more frequently occuring words for every sentiment=>
#sort in DO wrt 2nd column in each of positiveValues,negativeValues
df_positive2=pd.DataFrame(sorted(positiveValues2.items(),key=lambda x:x[1],reverse=True))
df_negative2=pd.DataFrame(sorted(negativeValues2.items(),key=lambda x:x[1],reverse=True))
df_positive1=pd.DataFrame(sorted(positiveValues1.items(),key=lambda x:x[1],reverse=True))
df_negative1=pd.DataFrame(sorted(negativeValues1.items(),key=lambda x:x[1],reverse=True))
#convert grams to list
positive_grams2=df_positive2[0][:20].tolist()
positive_grams1=df_positive1[0][:20].tolist()
negative_grams2=df_negative2[0][:20].tolist()
negative_grams1=df_negative1[0][:20].tolist()
df['top_grams2']= np.where(df['positive-ratio']>df['negative-ratio'],df['content_2gram'].apply(lambda x :matchlist(x,positive_grams2),1) ,df['content_2gram'].apply(lambda x :matchlist(x,negative_grams2),1) )
df['top_grams1']= np.where(df['positive-ratio']>df['negative-ratio'],df['content_1gram'].apply(lambda x :matchlist(x,positive_grams1),1) ,df['content_1gram'].apply(lambda x :matchlist(x,negative_grams1),1) )
df['top-grams-one']= df['top_grams1'].apply(lambda x :listToString(x))
df['top-grams-two']= df['top_grams2'].apply(lambda x :listToString(x))
df['top-grams-two']= df['top-grams-two'].apply(lambda x :sortstring(x),1)
df.head()
final_ana = df[['review-id','review-created-at','positive-ratio','negative-ratio','top-grams-one','top-grams-two']]
#etl columns
final_ana['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
final_ana['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
final_ana['created-by'] = 'etl-automation'
final_ana['updated-by'] = 'etl-automation'
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "review-created-at" >'{start}' '''
print(truncate_query)
rs_db_write.execute(truncate_query)
s3.write_df_to_db(df=final_ana[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/playstore-review/playstore-reviews-analysis.ipynb | playstore-reviews-analysis.ipynb |
```
# Only on stage and prod
!pip install Levenshtein==0.17.0
!pip install zeno-etl-libs==1.0.51
!pip install openpyxl==3.0.10
"""
Purpose: Fuzzy match to get patient names and store these patients in table,
and finally email the data on regular basis.
author : [email protected]
"""
import sys
import os
import pandas as pd
import datetime
from dateutil.tz import gettz
import numpy as np
import Levenshtein as lev
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.email.email import Email
```
## Pass Params
```
env = "dev"
full_run = 0
email_to ="NA"
batch_size = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info(f"full_run: {full_run}")
logger.info(f"batch_size: {batch_size}")
logger.info(f"email_to: {email_to}")
# read
rs_db = DB()
rs_db.open_connection()
# write
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
schema = 'prod2-generico'
patients_table = 'playstore-patients'
reviews_table = 'playstore-reviews'
patients_table_info = helper.get_table_info(db=rs_db_write, table_name=patients_table, schema=schema)
def get_last_processed_review_date():
# last review date in "playstore-patients" table
query = f""" select max("review-created-at") last_review_at from "{schema}"."{patients_table}" """
df = rs_db_write.get_df(query=query)
df['last_review_at'].fillna(np.nan, inplace=True)
last_review_at = df['last_review_at'].to_string(index=False)
logger.info(f"last_review_at in play store patients table: {last_review_at}")
return last_review_at
def get_orders(start_datetime, end_datetime):
#Fetch zeno orders for a given date range
zeno_q = f"""
select
zo.id as "zeno-order-id-before-review" ,
zo."patient-id" ,
zo."created-at" as "order-created-at",
p.phone,
p."name" as "matched-name"
from
"prod2-generico"."zeno-order" zo
left join "prod2-generico".patients p on
zo."patient-id" = p.id
where
zo."created-at" > '{start_datetime}'
and zo."created-at" <= '{end_datetime}'
and p."name" is not null
"""
df = rs_db.get_df(zeno_q)
return df
review_filter = ""
if full_run:
""" No filter, take all """
""" and truncate the playstore-patients tables """
logger.info(f"Full run is: {full_run}, so truncating the table.")
query = f""" truncate table "{schema}"."{patients_table}"; """
rs_db_write.execute(query=query)
else:
last_processed_review_at = get_last_processed_review_date()
if last_processed_review_at == 'NaN':
""" No filter, take all """
pass
else:
review_filter = f""" and "review-created-at" > '{last_processed_review_at}' """
review_filter
# Fetching required reviews from playstore-review table
query = f"""
select
"review-id",
"review",
"author-name",
"review-created-at",
"star-rating"
from
"{schema}"."{reviews_table}"
where
"review-id" != ''
{review_filter}
order by
"review-created-at" asc
"""
reviews_df = rs_db.get_df(query=query)
reviews_df['review-created-at'] = pd.to_datetime(reviews_df['review-created-at'])
if reviews_df.empty:
logger.info("No reviews to process, to stopping here.")
rs_db.close_connection()
rs_db_write.close_connection()
exit()
else:
logger.info(f"Total reviews to be processed: {len(reviews_df)}")
# Process review in batches
counter = 1
for b_df in helper.batch(reviews_df, batch_size):
print(f"Counter: {counter}, len: {len(b_df)}\n")
# Do all the processing
logger.info(f" b_df {counter}: {b_df.head(1).transpose()} \n\n")
start_datetime = b_df['review-created-at'].min() - datetime.timedelta(days=7)
end_datetime = b_df['review-created-at'].max()
orders_df = get_orders(start_datetime=start_datetime, end_datetime=end_datetime)
# Cross join reviews and orders
b_df['i'] = 1
orders_df['i'] = 1
df = pd.merge(b_df, orders_df, how='inner', on='i')
df['author-name'] = df['author-name'].str.lower()
df['matched-name'] = df['matched-name'].str.lower()
# Apply name matching
df['lev-ratio'] = df.apply(lambda row: lev.ratio(str(row['author-name']), str(row['matched-name'])), 1)
df['rank-order'] = df.sort_values(['zeno-order-id-before-review'], ascending=[False]) \
.groupby(['review-id', 'matched-name']) \
.cumcount() + 1
top_df = df[(df['rank-order'] == 1)]
top_df['top-matches'] = top_df.sort_values(['lev-ratio'], ascending=[False]).groupby(['review-id']).cumcount() + 1
# fetch top 3 name matches
top_df = top_df[(top_df['top-matches'] <= 3)]
top_df = top_df.sort_values(['star-rating', 'review-id', 'top-matches'], ascending=[True, True, True])
# Adding extra ETL columns
top_df['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
top_df['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
top_df['created-by'] = 'etl-automation'
top_df['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=top_df[patients_table_info['column_name']], table_name=patients_table, db=rs_db_write,
schema=schema)
if "@" in email_to:
""" Which means we want to send an email """
file_name = 'Zeno_playstore.xlsx'
columns_in_mail = ['review-id', 'review', 'star-rating', 'review-created-at', 'author-name',
'matched-name', 'zeno-order-id-before-review', 'patient-id', 'order-created-at']
file_path = s3.write_df_to_excel(data={'Zeno Playstore': top_df[columns_in_mail]}, file_name=file_name)
email = Email()
email.send_email_file(
subject="Zeno Playstore",
mail_body='Zeno Playstore',
to_emails=email_to,
file_uris=[],
file_paths=[file_path]
)
logger.info(f"Email has been sent successfully to: {email_to}")
counter += 1
# """ For testing only """
# if counter >= 2:
# break
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/playstore-review/playstore-patients-batch-process.ipynb | playstore-patients-batch-process.ipynb |
```
!pip install zeno_etl_libs==1.0.72
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 21 21:45:59 2022
@author: [email protected]
@Purpose: To generate forecast and replenishment figures for Warehouse
"""
import os
import sys
import pandas as pd
from datetime import datetime, timedelta
from scipy.stats import norm
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.warehouse.data_prep.wh_data_prep import wh_data_prep
from zeno_etl_libs.utils.warehouse.forecast.forecast_main import wh_forecast
from zeno_etl_libs.utils.warehouse.safety_stock.wh_safety_stock import \
wh_safety_stock_calc
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.helper.parameter.job_parameter import parameter
#tag = parameters
env = "dev"
os.environ['env'] = env
# runtime variables
job_params = parameter.get_params(job_id=117)
ss_runtime_var = {'lead_time_mean': job_params['lead_time_mean'],
'lead_time_std': job_params['lead_time_std'],
'service_level': job_params['service_level'],
'ordering_freq': job_params['ordering_freq'],
'max_review_period': job_params['max_review_period'],
'z': round(norm.ppf(job_params['service_level']), 2),
'for_next_month': job_params['for_next_month'],
'cap_ss_days': job_params['cap_ss_days'],
'use_fcst_error': job_params['use_fcst_error'],
'fcst_hist_to_use': job_params['fcst_hist_to_use'],
'debug_mode': job_params['debug_mode'],
'simulate_for': job_params['simulate_for']}
email_to = job_params['email_to']
debug_mode = job_params['debug_mode']
simulate_for = job_params['simulate_for']
err_msg = ''
df_uri = ''
schema = job_params['schema']
reset = job_params['reset']
wh_id = job_params['wh_id']
nso_history_days = job_params['nso_history_days']
status = False
logger = get_logger()
logger.info("Scripts begins")
logger.info("Run time variables --> " + str(ss_runtime_var))
# getting run date for the script
if debug_mode == 'Y' and simulate_for != '':
reset_date = simulate_for
current_month_date = (pd.to_datetime(simulate_for).date() - timedelta(days=pd.to_datetime(simulate_for).day - 1))
else:
reset_date = str(datetime.now(tz=gettz('Asia/Kolkata')).date())
current_month_date = (datetime.now(tz=gettz('Asia/Kolkata')).date() -
timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
if ss_runtime_var['for_next_month'] == 'Y':
forecast_date = str(
datetime(current_month_date.year +
int(current_month_date.month / 12),
((current_month_date.month % 12) + 1), 1).date())
else:
forecast_date = str(current_month_date)
logger.info(f"""
debug_mode --> {debug_mode}
reset_date --> {reset_date},
current_month_date --> {current_month_date},
forecast_date --> {forecast_date}
""")
try:
rs_db = DB()
rs_db.open_connection()
logger.info('reading input file to get expected_nso')
params_table_query = """
select
"month-begin-dt" as month_begin_dt,
value as expected_nso
from
"prod2-generico"."wh-forecast-repln-input"
where
"param-name" = 'expected_nso'
"""
params_table = rs_db.get_df(params_table_query)
logger.info('expected_nso parameter read')
params_table = params_table.apply(pd.to_numeric, errors='ignore')
params_table['month_begin_dt'] = params_table['month_begin_dt'].astype(str)
try:
expected_nso = int(params_table[
params_table[
'month_begin_dt'] == forecast_date][
'expected_nso'])
except Exception as error:
expected_nso = 0
logger.info(f"expected_nso --> {expected_nso}")
store_query = '''
select
"id",
name,
"opened-at" as opened_at
from
"prod2-generico".stores
where
"name" <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in (92, 52)
'''
stores = rs_db.get_df(store_query)
store_id_list = list(stores['id'])
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
# CONSIDERING DRUG TYPES FOR DATA LOAD
type_list = rs_db.get_df(
'select distinct type from "prod2-generico".drugs')
type_list = tuple(type_list[
~type_list.type.isin(
['', 'banned', 'discontinued-products'])][
'type'])
# RUNNING DATA PREPARATION
drug_sales_monthly, wh_drug_list, drug_history, demand_daily_deviation = wh_data_prep(
store_id_list, current_month_date, reset_date, type_list, rs_db, logger,
ss_runtime_var, schema)
drug_sales_monthly['drug_id'] = drug_sales_monthly['drug_id'].astype(int, errors='ignore')
drug_sales_monthly['year'] = drug_sales_monthly['year'].astype(int, errors='ignore')
drug_sales_monthly['month'] = drug_sales_monthly['month'].astype(int, errors='ignore')
drug_sales_monthly['net_sales_quantity'] = drug_sales_monthly['net_sales_quantity'].astype(int, errors='ignore')
drug_history = drug_history.astype(int, errors='ignore')
drug_sales_monthly['reset_date'] = reset_date
# FORECASTING
train, train_error, predict, wh_train, wh_train_error, wh_predict = wh_forecast(
drug_sales_monthly, wh_drug_list, drug_history, logger)
train['wh_id'] = wh_id
train_error['wh_id'] = wh_id
predict['wh_id'] = wh_id
wh_train['wh_id'] = wh_id
wh_train_error['wh_id'] = wh_id
wh_predict['wh_id'] = wh_id
train['forecast_date'] = forecast_date
train_error['forecast_date'] = forecast_date
predict['forecast_date'] = forecast_date
wh_train['forecast_date'] = forecast_date
wh_train_error['forecast_date'] = forecast_date
wh_predict['forecast_date'] = forecast_date
# SAFETY STOCK CALCULATIONS
last_actual_month = drug_sales_monthly['month_begin_dt'].max()
last_month_sales = drug_sales_monthly[
drug_sales_monthly['month_begin_dt'] == str(last_actual_month)]
last_month_sales = last_month_sales[['drug_id', 'net_sales_quantity']]
last_month_sales.rename(
columns={'net_sales_quantity': 'last_month_sales'}, inplace=True)
wh_safety_stock_df = wh_safety_stock_calc(
ss_runtime_var, wh_drug_list, wh_predict, last_month_sales, demand_daily_deviation, current_month_date,
forecast_date, reset_date, logger, expected_nso, nso_history_days, rs_db)
wh_safety_stock_df['wh_id'] = wh_id
wh_safety_stock_df['reset_date'] = str(reset_date)
rs_db.close_connection()
# WRITING TO POSTGRES
s3 = S3()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
created_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
wh_safety_stock_df['ptr'] = ''
wh_safety_stock_df['fcst'] = wh_safety_stock_df['fcst'].fillna(0).astype(int)
wh_safety_stock_df['safety_stock'] = wh_safety_stock_df['safety_stock'].fillna(0).astype(int)
wh_safety_stock_df['month'] = wh_safety_stock_df['month'].astype(int)
wh_safety_stock_df['year'] = wh_safety_stock_df['year'].astype(int)
wh_safety_stock_df['ss_wo_cap'] = wh_safety_stock_df['ss_wo_cap'].fillna(0).astype(int)
wh_safety_stock_df['reorder_point'] = wh_safety_stock_df['reorder_point'].fillna(0).astype(int)
wh_safety_stock_df['order_upto_point'] = wh_safety_stock_df['order_upto_point'].fillna(0).astype(int)
wh_safety_stock_df['shelf_min'] = wh_safety_stock_df['shelf_min'].fillna(0).astype(int)
wh_safety_stock_df['shelf_max'] = wh_safety_stock_df['shelf_max'].fillna(0).astype(int)
wh_safety_stock_df['rop_without_nso'] = wh_safety_stock_df['rop_without_nso'].fillna(0).astype(int)
wh_safety_stock_df['oup_without_nso'] = wh_safety_stock_df['oup_without_nso'].fillna(0).astype(int)
wh_safety_stock_df['created_at'] = created_at
wh_safety_stock_df['created_by'] = 'etl-automation'
wh_safety_stock_df['updated_at'] = created_at
wh_safety_stock_df['updated_by'] = 'etl-automation'
columns = [c.replace('-', '_') for c in ['drug-id', 'drug-name', 'type', 'category', 'company', 'ptr', 'bucket',
'history-bucket', 'fcst', 'final-fcst', 'forecast-type', 'model',
'month', 'month-begin-dt', 'std', 'year', 'wh-id', 'forecast-date',
'lead-time-mean', 'lead-time-std', 'max-review-period',
'ordering-freq',
'service-level', 'z-value', 'demand-daily', 'demand-daily-deviation',
'safety-stock', 'launch-stock-per-store', 'expected-nso',
'rop-without-nso', 'reorder-point', 'oup-without-nso',
'order-upto-point', 'shelf-min', 'shelf-max', 'last-month-sales',
'safety-stock-days',
'reorder-point-days', 'order-upto-days', 'reset-date', 'created-at',
'created-by', 'updated-at', 'updated-by', 'cap_ss_days', 'ss_wo_cap']]
wh_safety_stock_df = wh_safety_stock_df[columns]
if debug_mode == 'N':
# drug_sales_monthly
drug_sales_monthly['created-at'] = created_at
drug_sales_monthly['created-by'] = 'etl-automation'
drug_sales_monthly['updated-at'] = created_at
drug_sales_monthly['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=drug_sales_monthly, table_name='wh-drug-sales-monthly', db=rs_db_write,
schema='prod2-generico')
# train
train['type'] = 'separate'
train['created-at'] = created_at
train['created-by'] = 'etl-automation'
train['updated-at'] = created_at
train['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=train, table_name='wh-train', db=rs_db_write, schema='prod2-generico')
# wh_train
wh_train['type'] = 'ensemble'
wh_train['created-at'] = created_at
wh_train['created-by'] = 'etl-automation'
wh_train['updated-at'] = created_at
wh_train['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=wh_train, table_name='wh-train', db=rs_db_write, schema='prod2-generico')
# train_error
train_error['type'] = 'separate'
train_error['created-at'] = created_at
train_error['created-by'] = 'etl-automation'
train_error['updated-at'] = created_at
train_error['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=train_error, table_name='wh-train-error', db=rs_db_write, schema='prod2-generico')
# wh_train_error
wh_train_error['type'] = 'ensemble'
wh_train_error['created-at'] = created_at
wh_train_error['created-by'] = 'etl-automation'
wh_train_error['updated-at'] = created_at
wh_train_error['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=wh_train_error[train_error.columns], table_name='wh-train-error', db=rs_db_write,
schema='prod2-generico')
# predict
predict['type'] = 'separate'
predict['created-at'] = created_at
predict['created-by'] = 'etl-automation'
predict['updated-at'] = created_at
predict['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=predict, table_name='wh-predict', db=rs_db_write, schema='prod2-generico')
# wh_predict
wh_predict['type'] = 'ensemble'
wh_predict['created-at'] = created_at
wh_predict['created-by'] = 'etl-automation'
wh_predict['updated-at'] = created_at
wh_predict['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=wh_predict, table_name='wh-predict', db=rs_db_write, schema='prod2-generico')
# wh_safety_stock_df
s3.write_df_to_db(df=wh_safety_stock_df, table_name='wh-safety-stock', db=rs_db_write,
schema='prod2-generico')
if reset == 'Y':
# UPLOADING SAFETY STOCK NUMBERS IN DRUG-ORDER-INFO
ss_data_upload = wh_safety_stock_df.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
new_drug_entries, missed_entries = doid_update(
ss_data_upload, type_list, rs_db_write, schema, logger)
logger.info('DOI updated as per request')
logger.info('missed entries --> ' + str(missed_entries))
logger.info('new_drug_entries entries --> ' + str(new_drug_entries))
else:
logger.info('DOID did not update as per request')
rs_db_write.close_connection()
df_uri = s3.save_df_to_s3(df=wh_safety_stock_df,
file_name='wh_safety_stock_{date}.csv'.format(date=str(forecast_date)))
status = True
except Exception as error:
err_msg = str(error)
logger.info(str(error))
raise error
email = Email()
if debug_mode == 'Y':
email_to = '[email protected],[email protected]'
if status:
result = 'Success'
email.send_email_file(subject=f"Warehouse forecast & replenishment ({env}): {result}",
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject=f"Warehouse forecast & replenishment ({env}): {result}",
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[])
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/warehouse/wh_forecast_reset.ipynb | wh_forecast_reset.ipynb |
```
!pip install zeno_etl_libs==1.0.36
"""
Created on Sun May 1 23:28:09 2021
@author: [email protected]
Purpose: To generate forecast for Goodaid drugs at Goodaid warehouse
"""
import os
import sys
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.helper.parameter.job_parameter import parameter
#tag = parameters
env = "dev"
os.environ['env'] = env
job_params = parameter.get_params(job_id=125)
email_to = job_params['email_to']
logger = get_logger()
logger.info("Scripts begins. Env = " + env)
status = False
err_msg = ''
df_uri = ''
run_date = str(datetime.now().strftime("%Y-%m-%d"))
drugs_not_in_doi = 0
drugs_missed = 0
drugs_updated = 0
try:
rs_db = DB()
rs_db.open_connection()
# read inputs file to get parameters
logger.info('reading input file to get parameters')
params_table_query = """
select
"param-name" as param,
value
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" not in ('drug_lvl_fcst_inputs' , 's_and_op_factors')
"""
logger.info('input parameters read')
params_table = rs_db.get_df(params_table_query)
params_table = params_table.apply(pd.to_numeric, errors='ignore')
days = int(params_table.where(params_table['param'] == 'days',
axis=0).dropna()['value'])
expected_new_stores = int(params_table.where(
params_table['param'] == 'expected_new_stores',
axis=0).dropna()['value'])
wh_id = int(params_table.where(params_table['param'] == 'gaw_id',
axis=0).dropna()['value'])
revenue_min = int(params_table.where(
params_table['param'] == 'revenue_min', axis=0).dropna()['value'])
revenue_max = int(params_table.where(
params_table['param'] == 'revenue_max', axis=0).dropna()['value'])
# get active gaid drugs list
drugs_query = '''
select
wssm."drug-id" as drug_id,
d.composition,
d."drug-name" as drug_name,
d.company,
d."type",
d.category
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products')
and d.company = 'GOODAID'
'''
drugs = rs_db.get_df(drugs_query)
logger.info('active drugs list pulled from wssm')
# get 28 days sales for active gaid drugs
drug_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as drug_sales_quantity
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
group by
"drug-id"
'''.format(days=days, drug_ids=tuple(drugs['drug_id']))
drug_sales = rs_db.get_df(drug_sales_query)
logger.info('drug sales data pulled from rs')
drug_sales['drug_sales_quantity'] = drug_sales[
'drug_sales_quantity'] * 28 / days
# get non-ethical composition level sale
composition_sales_query = '''
select
composition as composition,
sum(quantity) as composition_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
and "type" <> 'ethical'
group by
composition
'''.format(days=days, compositions=tuple(drugs['composition']))
composition_sales = rs_db.get_df(composition_sales_query)
logger.info('composition data pulled from rs')
composition_sales['composition_sales_quantity'] = composition_sales[
'composition_sales_quantity'] * 28 / days
# merging data
main_df = drugs.merge(drug_sales, on='drug_id', how='left')
main_df['drug_sales_quantity'].fillna(0, inplace=True)
main_df = main_df.merge(composition_sales, on='composition',
how='left')
main_df['composition_sales_quantity'].fillna(0, inplace=True)
# getting 50% of composition level sales
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity'] * 0.5
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity_50%'].round(0)
# calculate month-on-month sales growth
# getting last-to-last 28 day sales for calcuating growth factor
last_to_last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_to_last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 56
and date("created-at") < current_date - 28
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_to_last_sales = rs_db.get_df(last_to_last_sales_query)
logger.info('last-to-last 28 day sales data pulled from rs')
# getting last 28 day sales
last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_sales = rs_db.get_df(last_sales_query)
logger.info('last 28 day sales data pulled from rs')
# merging to main_df
main_df = main_df.merge(last_to_last_sales, on='drug_id', how='left')
main_df['last_to_last_28_day_sales'].fillna(0, inplace=True)
main_df = main_df.merge(last_sales, on='drug_id', how='left')
main_df['last_28_day_sales'].fillna(0, inplace=True)
main_df['growth_factor'] = main_df['last_28_day_sales'] / main_df[
'last_to_last_28_day_sales']
main_df['growth_factor'].fillna(1, inplace=True)
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] == np.inf, 1,
main_df['growth_factor'])
# growth factor capped at 150% - min at 100%
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] > 1.5, 1.5,
main_df['growth_factor'])
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] < 1, 1,
main_df['growth_factor'])
# growth factor foreced to 1 when 50% comp sales > drug sales
main_df['growth_factor'] = np.where(main_df[
'composition_sales_quantity_50%'] >
main_df[
'drug_sales_quantity'], 1,
main_df['growth_factor'])
# get s&op factor
logger.info('reading s&op factors table')
input_table_query = """
select
"drug-id" as drug_id,
value as s_op_factor,
"start-date" as start_date,
"end-date" as end_date
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" = 's_and_op_factors'
"""
s_op_table = rs_db.get_df(input_table_query)
logger.info('s&op factors table read')
s_op_table = s_op_table.apply(pd.to_numeric, errors='ignore')
s_op_table = s_op_table[
s_op_table['start_date'] <= datetime.now().date()]
s_op_table = s_op_table[
s_op_table['end_date'] >= datetime.now().date()]
s_op_table.drop('start_date', axis=1, inplace=True)
s_op_table.drop('end_date', axis=1, inplace=True)
main_df = main_df.merge(s_op_table, on='drug_id', how='left')
main_df['s_op_factor'].fillna(1, inplace=True)
# get avg gaid sales for 13-16 lakh revenue stores
# getting stores lists to compare with
stores_cmp_query = '''
select
"store-id" as store_id,
round(sum("revenue-value")) as revenue
from
"prod2-generico".sales
where
date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"store-id"
'''
stores_cmp = rs_db.get_df(stores_cmp_query)
stores_cmp = stores_cmp[stores_cmp['revenue'] > revenue_min]
stores_cmp = stores_cmp[stores_cmp['revenue'] < revenue_max]
stores_list_to_comp = tuple(stores_cmp['store_id'])
logger.info('list of stores with revenue between 1.3 and 1.6 mil -->'
+ str(stores_list_to_comp))
# adding expected_new_stores column
main_df['expected_new_stores'] = expected_new_stores
# getting avg sales
avg_store_sales_query = '''
select
composition ,
sum(quantity)/ {count} as avg_drug_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
and "type" <> 'ethical'
and "store-id" in {stores_list_to_comp}
group by
composition
'''.format(compositions=tuple(drugs['composition']), \
stores_list_to_comp=stores_list_to_comp, \
count=len(stores_list_to_comp))
avg_store_sales = rs_db.get_df(avg_store_sales_query)
logger.info('avg composition sales retrieved for sample stores')
avg_store_sales['avg_drug_sales_quantity'] = avg_store_sales[
'avg_drug_sales_quantity'].round()
# merge to main_df
main_df = main_df.merge(avg_store_sales, on='composition', how='left')
main_df['avg_drug_sales_quantity'].fillna(0, inplace=True)
# get final forecast figures
main_df['forecast'] = main_df[[
'drug_sales_quantity',
'composition_sales_quantity_50%']].max(axis=1)
main_df['forecast'] = main_df['forecast'] * main_df['growth_factor'] * \
main_df['s_op_factor'] + main_df[
'expected_new_stores'] * \
main_df['avg_drug_sales_quantity']
main_df['forecast'] = main_df['forecast'].round()
# get input table and merge with main_df
logger.info('reading input table')
input_table_query = """
select
"drug-id" as drug_id,
lead_time_doh,
safety_stock_doh,
review_period
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" = 'drug_lvl_fcst_inputs'
"""
input_table = rs_db.get_df(input_table_query)
logger.info('input table read')
input_table = input_table.apply(pd.to_numeric, errors='ignore')
input_table['reorder_point_doh'] = input_table['lead_time_doh'] + \
input_table['safety_stock_doh']
input_table['min_doh'] = input_table['safety_stock_doh']
input_table['order_upto_point_doh'] = input_table['lead_time_doh'] + \
input_table['safety_stock_doh'] + \
input_table['review_period']
main_df = main_df.merge(input_table, on='drug_id', how='left')
# populating missing rows with defaults
main_df['lead_time_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'lead_time_doh'].item(), inplace=True)
main_df['safety_stock_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'safety_stock_doh'].item(), inplace=True)
main_df['review_period'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'review_period'].item(), inplace=True)
main_df['reorder_point_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'reorder_point_doh'].item(), inplace=True)
main_df['min_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'min_doh'].item(), inplace=True)
main_df['order_upto_point_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'order_upto_point_doh'].item(), inplace=True)
# calculate ss min max
main_df['safety_stock'] = (main_df['forecast'] / 28 *
main_df['safety_stock_doh']).round()
main_df['reorder_point'] = (main_df['forecast'] / 28 *
main_df['reorder_point_doh']).round()
main_df['order_upto_point'] = (main_df['forecast'] / 28 *
main_df['order_upto_point_doh']).round()
# get table structure to write to
to_upload_query = '''
select
*
from
"prod2-generico"."wh-safety-stock"
limit 1
'''
to_upload = rs_db.get_df(to_upload_query)
to_upload.columns = [c.replace('-', '_') for c in to_upload.columns]
to_upload.drop(0, axis=0, inplace=True)
to_upload['drug_id'] = main_df['drug_id']
to_upload['drug_name'] = main_df['drug_name']
to_upload['type'] = main_df['type']
to_upload['category'] = main_df['category']
to_upload['company'] = main_df['company']
# to_upload['bucket'] = main_df['bucket']
to_upload['fcst'] = main_df['forecast'].astype(int, errors='ignore')
to_upload['wh_id'] = wh_id
to_upload['forecast_type'] = 'goodaid'
to_upload['lead_time_mean'] = main_df['lead_time_doh']
to_upload['max_review_period'] = main_df['review_period'].astype(int, errors='ignore')
to_upload['demand_daily'] = main_df['forecast'] / 28
to_upload['safety_stock'] = main_df['safety_stock'].astype(int, errors='ignore')
to_upload['expected_nso'] = expected_new_stores
to_upload['reorder_point'] = main_df['reorder_point'].astype(int, errors='ignore')
to_upload['order_upto_point'] = main_df['order_upto_point'].astype(int, errors='ignore')
to_upload['last_month_sales'] = main_df['drug_sales_quantity'].astype(int, errors='ignore')
to_upload['safety_stock_days'] = main_df['safety_stock_doh']
to_upload['reorder_point_days'] = main_df['reorder_point_doh']
to_upload['order_upto_days'] = main_df['order_upto_point_doh']
to_upload['reset_date'] = run_date
to_upload['month'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%m"))
to_upload['year'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y"))
to_upload['month_begin_dt'] = str(
datetime.now(tz=gettz('Asia/Kolkata')).date() - timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
to_upload['created_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['created_by'] = 'etl-automation'
to_upload['updated_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['updated_by'] = 'etl-automation'
to_upload = to_upload.fillna('')
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
s3.write_df_to_db(df=to_upload, table_name='wh-safety-stock',
db=rs_db_write, schema='prod2-generico')
logger.info("wh-safety-stock table updated")
# WRITING ATTACHMENTS FOR SUCCESS
df_uri = s3.save_df_to_s3(df=main_df,
file_name='GAW_goodaid_forecast_{date}.csv'.format(date=str(run_date)))
# writing to doid
logger.info('writing to doid for ' +
str(int(to_upload[['drug_id']].nunique())) + ' drugs')
ss_data_upload = to_upload.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
type_list = tuple(drugs['type'].unique())
ss_data_upload = ss_data_upload.astype(float)
new_drug_entries, missed_entries = doid_update(
ss_data_upload, type_list, rs_db, 'prod2-generico', logger, gaid_omit=False)
rs_db.connection.close()
drugs_not_in_doi = len(new_drug_entries)
drugs_missed = len(missed_entries)
drugs_updated = len(ss_data_upload) - len(missed_entries) - len(new_drug_entries)
rs_db.close_connection()
rs_db_write.close_connection()
status = True
except Exception as e:
err_msg = str(e)
logger.info('wh_goodaid_forecast_343 job failed')
logger.exception(e)
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"GOODAID Warehouse forecast ({env}): {result}",
mail_body=f"""
drugs updated successfully --> {drugs_updated}
drugs not updated --> {drugs_missed}
drugs not in doid --> {drugs_not_in_doi}
""",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject=f"GOODAID Warehouse forecast ({env}): {result}",
mail_body=f"Run time: {datetime.now(tz=gettz('Asia/Kolkata'))} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/warehouse/wh_goodaid_forecast_343.ipynb | wh_goodaid_forecast_343.ipynb |
```
!pip install zeno_etl_libs==1.0.36
"""
Created on Sun May 26 23:28:09 2021
@author: [email protected]
Purpose: To generate forecast for Goodaid drugs at Bhiwandi warehouse
"""
import os
import sys
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from dateutil.tz import gettz
from scipy.stats import norm
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.helper.parameter.job_parameter import parameter
#tag = parameters
env = "dev"
os.environ['env'] = env
job_params = parameter.get_params(job_id=130)
email_to = job_params['email_to']
days = job_params['days']
lead_time_mean = job_params['lead_time_mean']
lead_time_std = job_params['lead_time_std']
max_review_period = job_params['max_review_period']
wh_id = 199
cap_ss_days = job_params['cap_ss_days']
service_level = job_params['service_level']
ordering_freq = job_params['ordering_freq']
logger = get_logger()
logger.info("Scripts begins. Env = " + env)
status = False
err_msg = ''
df_uri = ''
run_date = str(datetime.now().strftime("%Y-%m-%d"))
drugs_not_in_doi = 0
drugs_missed = 0
drugs_updated = 0
def get_launch_stock_per_store(rs_db, days, drugs):
new_stores_list_query = """
select
id as store_id,
date("opened-at") as opened_at
from
"prod2-generico".stores s
where
"opened-at" >= CURRENT_DATE - {days}
and id not in (281, 297)
""".format(days=days)
new_stores_list = rs_db.get_df(new_stores_list_query)
store_ids_list = tuple(new_stores_list['store_id'].astype(str))
# get shortbook launch orders
sb_orders_query = '''
select
distinct sb."store-id" as store_id,
sb."drug-id" as drug_id,
date(sb."created-at") as created_at,
sb.quantity as ordered_quantity,
date(s2."opened-at") as opened_at
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico".stores s2 on
s2.id = sb."store-id"
where
"store-id" in {store_ids}
and date(sb."created-at") < date(s2."opened-at")
'''.format(store_ids=store_ids_list, days=days)
sb_orders = rs_db.get_df(sb_orders_query)
df = sb_orders.copy()
df = df[df['drug_id'].isin(drugs['drug_id'])]
df = df[['store_id', 'drug_id', 'ordered_quantity']]
df.drop_duplicates(inplace=True)
new_stores_count = sb_orders['store_id'].nunique()
df = df[['drug_id', 'ordered_quantity']]
launch_stock = df.groupby('drug_id').sum().reset_index()
launch_stock_per_store = launch_stock.copy()
launch_stock_per_store['ordered_quantity'] = \
launch_stock['ordered_quantity'] / new_stores_count
launch_stock_per_store.rename(
columns={'ordered_quantity': 'launch_stock_per_store'}, inplace=True)
return launch_stock_per_store
try:
rs_db = DB()
rs_db.open_connection()
# read inputs file to get parameters
logger.info('reading input file to get parameters')
params_table_query = """
select
"param-name" as param,
value
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" not in ('drug_lvl_fcst_inputs' , 's_and_op_factors')
"""
logger.info('input parameters read')
params_table = rs_db.get_df(params_table_query)
params_table = params_table.apply(pd.to_numeric, errors='ignore')
revenue_min = int(params_table.where(
params_table['param'] == 'revenue_min', axis=0).dropna()['value'])
revenue_max = int(params_table.where(
params_table['param'] == 'revenue_max', axis=0).dropna()['value'])
new_stores_list_query = """
select
id as store_id,
date("opened-at") as opened_at
from
"prod2-generico".stores s
where
"opened-at" >= CURRENT_DATE - 90
and id not in (281, 297)
""".format(days=days)
new_stores_list = rs_db.get_df(new_stores_list_query)
expected_new_stores = len(new_stores_list)
logger.info("expected new stores --> " + str(expected_new_stores))
# get active gaid drugs list
drugs_query = '''
select
wssm."drug-id" as drug_id,
d.composition,
d."drug-name" as drug_name,
d.company,
d."type",
d.category
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products')
and d.company = 'GOODAID'
'''
drugs = rs_db.get_df(drugs_query)
logger.info('active drugs list pulled from wssm')
# get 28 days sales for active gaid drugs
drug_sales_query = '''
select
"drug-id" as drug_id,
date("created-at") as created_at,
sum(quantity) as drug_sales_quantity
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
group by
"drug-id",
date("created-at")
'''.format(days=days, drug_ids=tuple(drugs['drug_id']))
sales_data_for_std = rs_db.get_df(drug_sales_query)
drugs_std = sales_data_for_std.groupby('drug_id').std().reset_index()
drugs_std = drugs_std.rename(columns={'drug_sales_quantity': 'demand_daily_deviation'})
drug_sales = sales_data_for_std.groupby('drug_id').sum().reset_index()
logger.info('drug sales data pulled from rs')
drug_sales['drug_sales_quantity'] = drug_sales[
'drug_sales_quantity'] * 28 / days
# get non-ethical composition level sale
composition_sales_query = '''
select
composition as composition,
sum(quantity) as composition_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
and "type" <> 'ethical'
group by
composition
'''.format(days=days, compositions=tuple(drugs['composition']))
composition_sales = rs_db.get_df(composition_sales_query)
logger.info('composition data pulled from rs')
composition_sales['composition_sales_quantity'] = composition_sales[
'composition_sales_quantity'] * 28 / days
# merging data
main_df = drugs.merge(drug_sales, on='drug_id', how='left')
main_df['drug_sales_quantity'].fillna(0, inplace=True)
main_df = main_df.merge(composition_sales, on='composition',
how='left')
main_df['composition_sales_quantity'].fillna(0, inplace=True)
# getting 50% of composition level sales
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity'] * 0.5
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity_50%'].round(0)
# calculate month-on-month sales growth
# getting last-to-last 28 day sales for calcuating growth factor
last_to_last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_to_last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 56
and date("created-at") < current_date - 28
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_to_last_sales = rs_db.get_df(last_to_last_sales_query)
logger.info('last-to-last 28 day sales data pulled from rs')
# getting last 28 day sales
last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_sales = rs_db.get_df(last_sales_query)
logger.info('last 28 day sales data pulled from rs')
# merging to main_df
main_df = main_df.merge(last_to_last_sales, on='drug_id', how='left')
main_df['last_to_last_28_day_sales'].fillna(0, inplace=True)
main_df = main_df.merge(last_sales, on='drug_id', how='left')
main_df['last_28_day_sales'].fillna(0, inplace=True)
main_df['growth_factor'] = main_df['last_28_day_sales'] / main_df[
'last_to_last_28_day_sales']
main_df['growth_factor'].fillna(1, inplace=True)
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] == np.inf, 1,
main_df['growth_factor'])
# growth factor capped at 150% - min at 100%
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] > 1.5, 1.5,
main_df['growth_factor'])
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] < 1, 1,
main_df['growth_factor'])
# growth factor foreced to 1 when 50% comp sales > drug sales
main_df['growth_factor'] = np.where(main_df[
'composition_sales_quantity_50%'] >
main_df[
'drug_sales_quantity'], 1,
main_df['growth_factor'])
main_df['s_op_factor'] = 1
# get avg gaid sales for 13-16 lakh revenue stores
# getting stores lists to compare with
stores_cmp_query = '''
select
"store-id" as store_id,
round(sum("revenue-value")) as revenue
from
"prod2-generico".sales
where
date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"store-id"
'''
stores_cmp = rs_db.get_df(stores_cmp_query)
stores_cmp = stores_cmp[stores_cmp['revenue'] > revenue_min]
stores_cmp = stores_cmp[stores_cmp['revenue'] < revenue_max]
stores_list_to_comp = tuple(stores_cmp['store_id'])
logger.info('list of stores with revenue between 1.3 and 1.6 mil -->'
+ str(stores_list_to_comp))
# adding expected_new_stores column
main_df['expected_new_stores'] = expected_new_stores
# getting avg sales
avg_store_sales_query = '''
select
composition ,
sum(quantity)/ {count} as avg_drug_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
and "type" <> 'ethical'
and "store-id" in {stores_list_to_comp}
group by
composition
'''.format(compositions=tuple(drugs['composition']), \
stores_list_to_comp=stores_list_to_comp, \
count=len(stores_list_to_comp))
avg_store_sales = rs_db.get_df(avg_store_sales_query)
logger.info('avg composition sales retrieved for sample stores')
avg_store_sales['avg_drug_sales_quantity'] = avg_store_sales[
'avg_drug_sales_quantity'].round()
# merge to main_df
main_df = main_df.merge(avg_store_sales, on='composition', how='left')
main_df['avg_drug_sales_quantity'].fillna(0, inplace=True)
# get final forecast figures
main_df['forecast'] = main_df[[
'drug_sales_quantity',
'composition_sales_quantity_50%']].max(axis=1)
main_df['forecast'] = main_df['forecast'] * main_df['growth_factor'] * \
main_df['s_op_factor'] + main_df[
'expected_new_stores'] * \
main_df['avg_drug_sales_quantity']
main_df['forecast'] = main_df['forecast'].round()
main_df['demand_daily'] = main_df['forecast'] / 28
main_df = main_df.merge(drugs_std, on='drug_id', how='left')
main_df['demand_daily_deviation'].fillna(0, inplace=True)
main_df['lead_time_mean'] = lead_time_mean
main_df['lead_time_std'] = lead_time_std
main_df['review_period'] = max_review_period
main_df['ordering_freq'] = ordering_freq
main_df['service_level'] = service_level
# calculate ss min max
main_df['ss_wo_cap'] = (norm.ppf(main_df['service_level']).round(2) * np.sqrt(
(
main_df['lead_time_mean'] *
main_df['demand_daily_deviation'] *
main_df['demand_daily_deviation']
) +
(
main_df['lead_time_std'] *
main_df['lead_time_std'] *
main_df['demand_daily'] *
main_df['demand_daily']
)
)
).round(0)
main_df['cap_ss_days'] = np.where(main_df['ss_wo_cap'] / main_df['forecast'] * 28 > cap_ss_days,
cap_ss_days, '')
main_df['safety_stock'] = np.where(main_df['ss_wo_cap'] / main_df['forecast'] * 28 > cap_ss_days,
main_df['drug_sales_quantity'] / 28 * cap_ss_days,
main_df['ss_wo_cap']).round(0)
main_df['rop_without_nso'] = (
main_df['safety_stock'] +
main_df['demand_daily'] *
(
main_df['lead_time_mean'] + main_df['review_period']
)
).round()
launch_stock_per_store = get_launch_stock_per_store(rs_db, 90, drugs)
main_df = main_df.merge(launch_stock_per_store, on='drug_id', how='left')
main_df['launch_stock_per_store'].fillna(0, inplace=True)
main_df['reorder_point'] = main_df['rop_without_nso'] + \
np.round((main_df['lead_time_mean'] + main_df['review_period']) *
main_df['expected_new_stores'] / 90) * \
main_df['launch_stock_per_store']
main_df['order_upto_point'] = (
main_df['reorder_point'] +
main_df['ordering_freq'] * main_df['demand_daily']
).round()
main_df['safety_stock_doh'] = main_df['safety_stock'] / main_df['forecast'] * 28
main_df['reorder_point_doh'] = main_df['reorder_point'] / main_df['forecast'] * 28
main_df['order_upto_point_doh'] = main_df['order_upto_point'] / main_df['forecast'] * 28
# get table structure to write to
to_upload_query = '''
select
*
from
"prod2-generico"."wh-safety-stock"
limit 1
'''
to_upload = rs_db.get_df(to_upload_query)
to_upload.columns = [c.replace('-', '_') for c in to_upload.columns]
to_upload.drop(0, axis=0, inplace=True)
to_upload['drug_id'] = main_df['drug_id']
to_upload['drug_name'] = main_df['drug_name']
to_upload['type'] = main_df['type']
to_upload['category'] = main_df['category']
to_upload['company'] = main_df['company']
# to_upload['bucket'] = main_df['bucket']
to_upload['fcst'] = main_df['forecast'].astype(int, errors='ignore')
to_upload['wh_id'] = wh_id
to_upload['forecast_type'] = 'goodaid_199'
to_upload['lead_time_mean'] = main_df['lead_time_mean']
to_upload['max_review_period'] = main_df['review_period'].astype(int, errors='ignore')
to_upload['demand_daily'] = main_df['demand_daily']
to_upload['std'] = main_df['demand_daily_deviation']
to_upload['safety_stock'] = main_df['safety_stock'].astype(int, errors='ignore')
to_upload['expected_nso'] = expected_new_stores
to_upload['rop_without_nso'] = main_df['rop_without_nso'].astype(int, errors='ignore')
to_upload['reorder_point'] = main_df['reorder_point'].astype(int, errors='ignore')
to_upload['order_upto_point'] = main_df['order_upto_point'].astype(int, errors='ignore')
to_upload['last_month_sales'] = main_df['drug_sales_quantity'].astype(int, errors='ignore')
to_upload['safety_stock_days'] = main_df['safety_stock_doh']
to_upload['reorder_point_days'] = main_df['reorder_point_doh']
to_upload['order_upto_days'] = main_df['order_upto_point_doh']
to_upload['reset_date'] = run_date
to_upload['month'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%m"))
to_upload['year'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y"))
to_upload['month_begin_dt'] = str(
datetime.now(tz=gettz('Asia/Kolkata')).date() - timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
to_upload['created_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['created_by'] = 'etl-automation'
to_upload['updated_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['updated_by'] = 'etl-automation'
to_upload['cap_ss_days'] = main_df['cap_ss_days']
to_upload['ss_wo_cap'] = main_df['ss_wo_cap'].astype(int, errors='ignore')
to_upload['lead_time_std'] = main_df['lead_time_std']
to_upload['ordering_freq'] = main_df['ordering_freq']
to_upload = to_upload.fillna('')
#write connection
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
s3.write_df_to_db(df=to_upload, table_name='wh-safety-stock', db=rs_db_write, schema='prod2-generico')
logger.info("wh-safety-stock table updated")
# WRITING ATTACHMENTS FOR SUCCESS
df_uri = s3.save_df_to_s3(df=main_df,
file_name='BHW_goodaid_forecast_{date}.csv'.format(date=str(run_date)))
# writing to doid
logger.info('writing to doid for ' +
str(int(to_upload[['drug_id']].nunique())) + ' drugs')
ss_data_upload = to_upload.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
type_list = tuple(drugs['type'].unique())
ss_data_upload = ss_data_upload.astype(float)
new_drug_entries, missed_entries = doid_update(ss_data_upload, type_list, rs_db, 'prod2-generico', logger, gaid_omit=False)
rs_db.connection.close()
drugs_not_in_doi = len(new_drug_entries)
drugs_missed = len(missed_entries)
drugs_updated = len(ss_data_upload) - len(missed_entries) - len(new_drug_entries)
rs_db.close_connection()
rs_db_write.close_connection()
status = True
except Exception as e:
err_msg = str(e)
logger.info('wh_goodaid_forecast_199 job failed')
logger.exception(e)
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"Bhiwandi Warehouse GOODAID forecast ({env}): {result}",
mail_body=f"""
drugs updated successfully --> {drugs_updated}
drugs not updated --> {drugs_missed}
drugs not in doid --> {drugs_not_in_doi}
""",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject=f"Bhiwandi Warehouse GOODAID forecast ({env}): {result}",
mail_body=f"Run time: {datetime.now(tz=gettz('Asia/Kolkata'))} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/warehouse/wh_goodaid_forecast_199.ipynb | wh_goodaid_forecast_199.ipynb |
```
!pip install zeno_etl_libs==1.0.107
!pip install openpyxl
"""IPC combination level forecast for PMF stores"""
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.forecast_main import ipc_comb_forecast
from zeno_etl_libs.utils.ipc_pmf.ipc_drug_fcst.forecast_main import ipc_drug_forecast
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.fcst_mapping import fcst_comb_drug_map
from zeno_etl_libs.utils.ipc_pmf.safety_stock import safety_stock_calc
from zeno_etl_libs.utils.ipc_pmf.post_processing import post_processing
from zeno_etl_libs.utils.ipc_pmf.heuristics.recency_corr import fcst_correction
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
```
# Main Function
```
def main(debug_mode, reset_stores, reset_date, type_list_comb_lvl,
type_list_drug_lvl, v4_active_flag, drug_type_list_v4,
read_schema, rs_db_read, write_schema, rs_db_write, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# Define empty variables if required in case of fail
safety_stock_df = pd.DataFrame()
df_one_one = pd.DataFrame()
df_one_many = pd.DataFrame()
df_one_none = pd.DataFrame()
df_none_one = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
try:
for store_id in reset_stores:
logger.info(f"Running for store id: {store_id} and reset date: {reset_date}")
type_list_comb_lvl_str = str(type_list_comb_lvl).replace('[', '(').replace(']', ')')
type_list_drug_lvl_str = str(type_list_drug_lvl).replace('[', '(').replace(']', ')')
# RUNNING IPC-COMBINATION FORECAST PIPELINE
logger.info("Combination Forecast Pipeline starts")
fcst_df_comb_lvl, seg_df_comb_lvl, \
comb_sales_latest_12w, comb_sales_4w_wtd = ipc_comb_forecast(
store_id, reset_date, type_list_comb_lvl_str, read_schema, rs_db_read,
logger)
# RUNNING IPC-DRUG FORECAST PIPELINE
logger.info("Drug Forecast Pipeline starts")
fcst_df_drug_lvl, seg_df_drug_lvl, drug_sales_latest_12w,\
drug_sales_latest_4w, drug_sales_4w_wtd = ipc_drug_forecast(
store_id, reset_date, type_list_drug_lvl_str, read_schema,
rs_db_read, logger)
# RECENCY CORRECTION IF FCST=0, FCST=AVG_DEMAND_28D (FROM LATEST 12W)
logger.info("Recency correction starts")
fcst_df_comb_lvl, fcst_df_drug_lvl = fcst_correction(
fcst_df_comb_lvl, comb_sales_latest_12w, fcst_df_drug_lvl,
drug_sales_latest_12w, drug_sales_latest_4w, comb_sales_4w_wtd,
drug_sales_4w_wtd, logger)
# MAPPING FORECASTS TO ASSORTMENT DRUGS
logger.info("Allotting combination forecasts to drugs")
df_fcst_final, df_one_one, df_one_many, \
df_one_none, df_none_one = fcst_comb_drug_map(
store_id, reset_date, fcst_df_comb_lvl, fcst_df_drug_lvl,
type_list_comb_lvl, read_schema, rs_db_read, logger)
# SAFETY STOCK CALCULATIONS
logger.info("Safety Stock Calculations starts")
safety_stock_df = safety_stock_calc(
df_fcst_final, store_id, reset_date,
v4_active_flag, drug_type_list_v4, drug_sales_latest_12w,
read_schema, rs_db_read, logger)
# POST PROCESSING SS DF
logger.info("Post Processing SS-DF starts")
safety_stock_df, seg_df_comb_lvl, seg_df_drug_lvl = post_processing(
store_id, safety_stock_df, seg_df_comb_lvl, seg_df_drug_lvl,
read_schema, rs_db_read, logger)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-pmf-safety-stock
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-pmf-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc-pmf-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc-pmf-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc-pmf-comb-segmentation
seg_df_comb_lvl['reset_date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
seg_df_comb_lvl['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_comb_lvl['created-by'] = 'etl-automation'
seg_df_comb_lvl['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_comb_lvl['updated-by'] = 'etl-automation'
seg_df_comb_lvl.columns = [c.replace('_', '-') for c in seg_df_comb_lvl.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-pmf-comb-segmentation',
schema=write_schema)
columns = list(table_info['column_name'])
seg_df_comb_lvl = seg_df_comb_lvl[columns] # required column order
logger.info("Writing to table: ipc-pmf-comb-segmentation")
s3.write_df_to_db(df=seg_df_comb_lvl,
table_name='ipc-pmf-comb-segmentation',
db=rs_db_write, schema=write_schema)
# writing table ipc-pmf-drug-segmentation
seg_df_drug_lvl['reset_date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
seg_df_drug_lvl['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_drug_lvl['created-by'] = 'etl-automation'
seg_df_drug_lvl['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_drug_lvl['updated-by'] = 'etl-automation'
seg_df_drug_lvl.columns = [c.replace('_', '-') for c in
seg_df_drug_lvl.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-pmf-drug-segmentation',
schema=write_schema)
columns = list(table_info['column_name'])
seg_df_drug_lvl = seg_df_drug_lvl[columns] # required column order
logger.info("Writing to table: ipc-pmf-drug-segmentation")
s3.write_df_to_db(df=seg_df_drug_lvl,
table_name='ipc-pmf-drug-segmentation',
db=rs_db_write, schema=write_schema)
logger.info("All writes to RS-DB completed!")
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in
safety_stock_df.columns]
ss_data_upload = safety_stock_df.loc[
(safety_stock_df["order_upto_point"] > 0)]
ss_data_upload = ss_data_upload[['store_id', 'drug_id',
'safety_stock',
'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list_drug_lvl_str, rs_db_write,
write_schema, logger, gaid_omit=False)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
status = 'Success'
except Exception as error:
logger.exception(error)
return status, safety_stock_df, df_one_one, df_one_many, df_one_none, \
df_none_one, new_drug_entries, missed_entries
```
# Pass Prams
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
run_batch = "run_batch"
tot_batch = "tot_batch"
batch_stores = "batch_stores"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
# Read Params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=171)
# JOB EXCLUSIVE PARAMS
reset_date = args['reset_date']
reset_stores = args['reset_stores']
v4_active_flag = args['v4_active_flag']
drug_type_list_v4 = args['drug_type_list_v4']
type_list_comb_lvl = args['type_list_comb_lvl']
type_list_drug_lvl = args['type_list_drug_lvl']
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
# for batch run
reset_stores = list(set(reset_stores).intersection(batch_stores))
```
# Execute Main Function
```
""" calling the main function """
status, safety_stock_df, df_one_one, df_one_many, df_one_none, \
df_none_one, new_drug_entries, missed_entries = main(
debug_mode, reset_stores, reset_date, type_list_comb_lvl,
type_list_drug_lvl, v4_active_flag,
drug_type_list_v4, read_schema, rs_db_read, write_schema,
rs_db_write, logger)
# open RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
# Send Email Notification
```
ss_df_uri = s3.save_df_to_s3(
safety_stock_df, file_name=f"safety_stock_df_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
all_cases_xl_path = s3.write_df_to_excel(data={
'C1_one_one': df_one_one, 'C2_one_many': df_one_many,
'C3_one_none': df_one_none, 'C4_none_one': df_none_one},
file_name=f"all_mappings_{reset_date}.xlsx")
email = Email()
email.send_email_file(
subject=f"IPC Combination Fcst (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[ss_df_uri, new_drug_entries_uri,
missed_entries_uri],
file_paths=[all_cases_xl_path])
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/ipc-pmf/ipc_pmf.ipynb | ipc_pmf.ipynb |
```
!pip install zeno_etl_libs==1.0.40
"""main wrapper for IPC safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.django.api import Django
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.forecast_reset import ipc_forecast_reset
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.utils.ipc.store_portfolio_additions import generic_portfolio
```
## Main Function
```
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, rs_db_read, rs_db_write, read_schema,
write_schema, s3, django, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("IPC SS calculation started for store id: " + str(store_id))
if not type_list:
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
drug_class, weekly_fcst, safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = ipc_forecast_reset(
store_id, type_list, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
rs_db_read, read_schema,
drug_type_list_v4=drug_type_list_v4,
v5_active_flag=v5_active_flag,
v6_active_flag=v6_active_flag,
v6_type_list=v6_type_list,
v6_ptr_cut_off=v6_ptr_cut_off,
chronic_max_flag=chronic_max_flag,
logger=logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# KEEP ALL GENERIC COMPOSITIONS IN STORE
if keep_all_generic_comp == 'Y':
safety_stock_df = generic_portfolio(safety_stock_df,
rs_db_read, read_schema,
logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(store_id, drug_class, weekly_fcst,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value, ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
weekly_fcst.rename(
columns={'date': 'week_begin_dt', 'fcst': 'point_forecast',
'std': 'forecast_deviation'}, inplace=True)
weekly_fcst['store_id'] = weekly_fcst['store_id'].astype(int)
weekly_fcst['drug_id'] = weekly_fcst['drug_id'].astype(int)
weekly_fcst['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
weekly_fcst['week_begin_dt'] = weekly_fcst['week_begin_dt']
weekly_fcst['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['created-by'] = 'etl-automation'
weekly_fcst['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['updated-by'] = 'etl-automation'
weekly_fcst.columns = [c.replace('_', '-') for c in weekly_fcst.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-forecast',
schema=write_schema)
columns = list(table_info['column_name'])
weekly_fcst = weekly_fcst[columns] # required column order
logger.info("Writing to table: ipc-forecast")
s3.write_df_to_db(df=weekly_fcst,
table_name='ipc-forecast',
db=rs_db_write, schema=write_schema)
# writing table ipc-safety-stock
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc-abc-xyz-class
drug_class['store_id'] = drug_class['store_id'].astype(int)
drug_class['drug_id'] = drug_class['drug_id'].astype(int)
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in safety_stock_df.columns]
ss_data_upload = safety_stock_df.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema, logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
if isinstance(reset_store_ops, pd.DataFrame):
content_type = 74
object_id = reset_store_ops.loc[
reset_store_ops['store_id'] == store_id, 'object_id'].unique()
for obj in object_id:
request_body = {"object_id": int(obj), "content_type": content_type}
api_response, _ = django.django_model_execution_log_create_api(
request_body)
reset_store_ops.loc[
reset_store_ops['object_id'] == obj,
'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"IPC code execution status: {status}")
return status, order_value_all, new_drug_entries, missed_entries
```
## Pass Params
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
django = Django()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
## Read params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=109)
# JOB EXCLUSIVE PARAMS
exclude_stores = args["exclude_stores"]
goodaid_ss_flag = args["goodaid_ss_flag"]
ga_inv_weight = args["ga_inv_weight"]
rest_inv_weight = args["rest_inv_weight"]
top_inv_weight = args["top_inv_weight"]
chronic_max_flag = args["chronic_max_flag"]
wh_gen_consolidation = args["wh_gen_consolidation"]
v5_active_flag = args["v5_active_flag"]
v6_active_flag = args["v6_active_flag"]
v6_type_list = args["v6_type_list"]
v6_ptr_cut_off = args["v6_ptr_cut_off"]
reset_date = args["reset_date"]
reset_stores = args["reset_stores"]
v3_active_flag = args["v3_active_flag"]
corrections_selling_probability_cutoff = args["corrections_selling_probability_cutoff"]
corrections_cumulative_probability_cutoff = args["corrections_cumulative_probability_cutoff"]
drug_type_list_v4 = args["drug_type_list_v4"]
omit_npi = args["omit_npi"]
keep_all_generic_comp = args["keep_all_generic_comp"]
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# considering reset of old stores only (age > 1 year)
store_id = stores.loc[dt.datetime.now() -
stores['opened_at'] >
dt.timedelta(days=365), 'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']', ')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
reset_stores = reset_store_ops['store_id'].unique()
type_list = None
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
```
## Execute Main Function
```
""" calling the main function """
status, order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, rs_db_read, rs_db_write, read_schema,
write_schema, s3, django, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
## Send Email Notification
```
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"IPC SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/ipc-ss-main/ipc_ss_main.ipynb | ipc_ss_main.ipynb |
```
!pip install zeno_etl_libs==1.0.115
"""
main wrapper for Distributor Ranking 2.0 algorithm
author: [email protected]
"""
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.distributor_ranking2.distributor_ranking_calc import ranking_calc_dc, ranking_calc_franchisee
from zeno_etl_libs.utils.distributor_ranking2.tech_processing import process_tech_df
```
## Main Function
```
def main(debug_mode, reset_date, time_interval_dc, time_interval_franchisee,
volume_fraction, franchisee_ranking_active, franchisee_stores,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl, s3,
rs_db_read, rs_db_write, read_schema, write_schema):
mysql_write = MySQL(read_only=False)
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# define empty variables to return in case of fail
final_ranks_franchisee = pd.DataFrame()
ranked_features_franchisee = pd.DataFrame()
dc_evaluated = []
franchisee_stores_evaluated = []
# ensure all weights adds upto 1
sum_all_wts = sum(list(as_ms_weights_dc_drug_lvl.values())) + \
sum(list(as_ms_weights_dc_type_lvl.values())) + \
sum(list(pr_weights_dc_drug_lvl.values())) + \
sum(list(pr_weights_dc_type_lvl.values())) + \
sum(list(weights_franchisee_drug_lvl.values())) + \
sum(list(weights_franchisee_type_lvl.values()))
if sum_all_wts == 6:
logger.info("All input weights add upto 1 | Continue Execution")
else:
logger.info("Input weights does not add upto 1 | Stop Execution")
return status, reset_date, dc_evaluated, franchisee_stores_evaluated
try:
# calculate ranks
logger.info("Calculating Zippin DC-level Ranking")
ranked_features_dc, final_ranks_dc = ranking_calc_dc(
reset_date, time_interval_dc, as_ms_weights_dc_drug_lvl,
as_ms_weights_dc_type_lvl, pr_weights_dc_drug_lvl,
pr_weights_dc_type_lvl, logger, db=rs_db_read, schema=read_schema)
if franchisee_ranking_active == 'Y':
logger.info("Calculating Franchisee-level Ranking")
ranked_features_franchisee, \
final_ranks_franchisee = ranking_calc_franchisee(
reset_date, time_interval_franchisee, franchisee_stores,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl,
logger, db=rs_db_read, schema=read_schema)
else:
logger.info("Skipping Franchisee-level Ranking")
# process ranked dfs to tech required format
distributor_ranking_rules, \
distributor_ranking_rule_values = process_tech_df(
final_ranks_dc, final_ranks_franchisee, volume_fraction)
# combine rank df and feature df (dc & franchisee)
final_ranks = pd.concat([final_ranks_dc, final_ranks_franchisee], axis=0)
ranked_features = pd.concat([ranked_features_dc, ranked_features_franchisee], axis=0)
ranked_features.rename(
{"partial_dc_id": "dc_id", "partial_distributor_id": "distributor_id",
"partial_distributor_credit_period": "distributor_credit_period",
"partial_distributor_name": "distributor_name"}, axis=1, inplace=True)
final_ranks.rename(
{"partial_dc_id": "dc_id"}, axis=1, inplace=True)
# for email info
dc_evaluated = distributor_ranking_rules["dc_id"].unique().tolist()
franchisee_stores_evaluated = distributor_ranking_rules[
"store_id"].unique().tolist()
# adding required fields in tech df
distributor_ranking_rules['rule_start_date'] = reset_date
distributor_ranking_rules['is_active'] = 1
distributor_ranking_rules['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
distributor_ranking_rules['created_by'] = 'etl-automation'
# adding required fields in ds-internal df
final_ranks.loc[:, 'reset_date'] = reset_date
final_ranks['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
final_ranks['created_by'] = 'etl-automation'
final_ranks['updated_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
final_ranks['updated_by'] = 'etl-automation'
ranked_features.loc[:, 'reset_date'] = reset_date
ranked_features['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ranked_features['created_by'] = 'etl-automation'
ranked_features['updated_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ranked_features['updated_by'] = 'etl-automation'
# formatting column names
distributor_ranking_rule_values.columns = [c.replace('_', '-') for c in
distributor_ranking_rule_values.columns]
distributor_ranking_rules.columns = [c.replace('_', '-') for c in
distributor_ranking_rules.columns]
final_ranks.columns = [c.replace('_', '-') for c in final_ranks.columns]
ranked_features.columns = [c.replace('_', '-') for c in ranked_features.columns]
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
logger.info("Writing to table: distributor-ranking2-features")
table_info = helper.get_table_info(db=rs_db_write,
table_name='distributor-ranking2-features',
schema=write_schema)
columns = list(table_info['column_name'])
ranked_features = ranked_features[columns] # required column order
s3.write_df_to_db(df=ranked_features,
table_name='distributor-ranking2-features',
db=rs_db_write, schema=write_schema)
logger.info("Writing to table: distributor-ranking2-final-ranks")
table_info = helper.get_table_info(db=rs_db_write,
table_name='distributor-ranking2-final-ranks',
schema=write_schema)
columns = list(table_info['column_name'])
final_ranks = final_ranks[columns] # required column order
s3.write_df_to_db(df=final_ranks,
table_name='distributor-ranking2-final-ranks',
db=rs_db_write, schema=write_schema)
logger.info("Writing table to RS-DB completed!")
mysql_write.open_connection()
logger.info("Updating table to MySQL")
try:
index_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rules`',
con=mysql_write.connection).values[0]) + 1
redundant_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rule-values`',
con=mysql_write.connection).values[0]) + 1
except:
index_increment = 1
redundant_increment = 1
logger.info(f"Incremented distributor-ranking-rules by {index_increment}")
logger.info(f"Incremented distributor-ranking-rule-values by {redundant_increment}")
distributor_ranking_rules['id'] = distributor_ranking_rules['id'] + index_increment
distributor_ranking_rule_values['distributor-ranking-rule-id'] = distributor_ranking_rule_values[
'distributor-ranking-rule-id'] + index_increment
distributor_ranking_rule_values['id'] = distributor_ranking_rule_values['id'] + redundant_increment
logger.info("Setting existing rules to inactive")
mysql_write.engine.execute("UPDATE `distributor-ranking-rules` SET `is-active` = 0")
# mysql_write.engine.execute("SET FOREIGN_KEY_CHECKS=0") # use only in staging
logger.info("Writing to table: distributor-ranking-rules")
distributor_ranking_rules.to_sql(
name='distributor-ranking-rules',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
logger.info("Writing to table: distributor-ranking-rule-values")
distributor_ranking_rule_values.to_sql(
name='distributor-ranking-rule-values',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
# mysql_write.engine.execute("SET FOREIGN_KEY_CHECKS=1") # use only in staging
logger.info("Updating table to MySQL completed!")
mysql_write.close()
else:
logger.info("Writing to RS-DB & MySQL skipped")
status = 'Success'
logger.info(f"Distributor Ranking code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Distributor Ranking code execution status: {status}")
return status, reset_date, dc_evaluated, franchisee_stores_evaluated
```
## Pass Param
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
## Read Params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=94)
# JOB EXCLUSIVE PARAMS
reset_date = args["reset_date"]
time_interval_dc = args["time_interval_dc"]
time_interval_franchisee = args["time_interval_franchisee"]
volume_fraction = args["volume_fraction"]
franchisee_ranking_active = args["franchisee_ranking_active"]
franchisee_stores = args["franchisee_stores"]
as_ms_weights_dc_drug_lvl = args["as_ms_weights_dc_drug_lvl"]
as_ms_weights_dc_type_lvl = args["as_ms_weights_dc_type_lvl"]
pr_weights_dc_drug_lvl = args["pr_weights_dc_drug_lvl"]
pr_weights_dc_type_lvl = args["pr_weights_dc_type_lvl"]
weights_franchisee_drug_lvl = args["weights_franchisee_drug_lvl"]
weights_franchisee_type_lvl = args["weights_franchisee_type_lvl"]
if reset_date == 'YYYY-MM-DD':
reset_date = dt.date.today()
else:
reset_date = dt.datetime.strptime(reset_date, "%Y-%m-%d").date()
```
## Execute Main Function
```
""" calling the main function """
status, reset_date, dc_evaluated, \
franchisee_stores_evaluated = main(
debug_mode, reset_date, time_interval_dc, time_interval_franchisee,
volume_fraction, franchisee_ranking_active, franchisee_stores,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl, s3,
rs_db_read, rs_db_write, read_schema, write_schema)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
## Send Email Notification
```
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Distributor Ranking 2.0 Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
DC's Evaluated: {dc_evaluated}
Franchisee Stores Evaluated: {franchisee_stores_evaluated}
Job Params: {args}
""",
to_emails=email_to)
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/distributor-ranking2-main/distributor_ranking2_main.ipynb | distributor_ranking2_main.ipynb |
```
import argparse
import os
import datetime
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
env = "stage"
os.environ['env'] = env
logger = get_logger(level="INFO")
db = DB(read_only=False)
db.db_secrets
db.open_connection()
query = f"""
insert into "prod2-generico"."temp-str" (col1) values ('Hello at {datetime.datetime.now()}')
"""
db.execute(query=query)
db.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/redshift-write-demo.ipynb | redshift-write-demo.ipynb |
```
"""main wrapper for Non-IPC safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.utils.non_ipc.data_prep.non_ipc_data_prep import non_ipc_data_prep
from zeno_etl_libs.utils.non_ipc.forecast.forecast_main import non_ipc_forecast
from zeno_etl_libs.utils.non_ipc.safety_stock.safety_stock import non_ipc_safety_stock_calc
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
agg_week_cnt, kind, rs_db_read, rs_db_write, read_schema, write_schema,
logger):
s3 = S3()
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("Non-IPC SS calculation started for store id: " +
str(store_id))
# RUNNING DATA PREPARATION
drug_data_agg_weekly, drug_data_weekly, drug_class, \
bucket_sales = non_ipc_data_prep(
store_id_list=store_id, reset_date=reset_date,
type_list=type_list, db=rs_db_read, schema=read_schema,
agg_week_cnt=agg_week_cnt,
logger=logger)
# CREATING TRAIN FLAG TO HANDLE STORES WITH HISTORY < 16 WEEKS
week_count = drug_data_weekly['date'].nunique()
if week_count >= 16:
train_flag = True
else:
train_flag = False
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
out_of_sample = 1
horizon = 1
train, error, predict, ensemble_train, ensemble_error, \
ensemble_predict = non_ipc_forecast(
drug_data_agg_weekly, drug_data_weekly, drug_class,
out_of_sample, horizon, train_flag, logger, kind)
final_predict = ensemble_predict.query('final_fcst == "Y"')
safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = non_ipc_safety_stock_calc(
store_id, drug_data_weekly, reset_date, final_predict,
drug_class, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
chronic_max_flag, train_flag, drug_type_list_v4,
v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, rs_db_read, read_schema, logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
safety_stock_df['percentile'] = 0.5
final_predict.rename(columns={'month_begin_dt': 'date'},
inplace=True)
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(
store_id, drug_class, final_predict,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value,
ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
predict['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
predict['store_id'] = store_id
predict['month_begin_dt'] = predict['month_begin_dt'].dt.date
predict['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
predict['created-by'] = 'etl-automation'
predict['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
predict['updated-by'] = 'etl-automation'
predict.columns = [c.replace('_', '-') for c in predict.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-predict',
schema=write_schema)
columns = list(table_info['column_name'])
predict = predict[columns] # required column order
logger.info("Writing to table: non-ipc-predict")
s3.write_df_to_db(df=predict,
table_name='non-ipc-predict',
db=rs_db_write, schema=write_schema)
# writing table non-ipc-safety-stock
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: non-ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='non-ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table non-ipc-abc-xyz-class
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in
drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: non-ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='non-ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in
safety_stock_df.columns]
ss_data_upload = safety_stock_df.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
# logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
# if reset_store_ops != None:
# content_type = 74
# object_id = reset_store_ops.loc[
# reset_store_ops[
# 'store_id'] == store_id, 'object_id'].unique()
# for obj in object_id:
# request_body = {
# "object_id": int(obj), "content_type": content_type}
# api_response, _ = django_model_execution_log_create_api(
# request_body)
# reset_store_ops.loc[
# reset_store_ops['object_id'] == obj,
# 'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"Non-IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Non-IPC code execution status: {status}")
return order_value_all, new_drug_entries, missed_entries
# Parameter passing
env = "dev"
os.environ['env'] = env
email_to = "[email protected]"
debug_mode = "N"
# JOB EXCLUSIVE PARAMS
exclude_stores = [52, 60, 92, 243, 281]
goodaid_ss_flag = "Y"
ga_inv_weight = 0.5
rest_inv_weight = 0.0
top_inv_weight = 1
chronic_max_flag = "N"
wh_gen_consolidation = "Y"
v5_active_flag = "N"
v6_active_flag = "N"
v6_type_list = ['ethical', 'generic', 'others']
v6_ptr_cut_off = 400
reset_date = "YYYY-MM-DD"
reset_stores = [263]
v3_active_flag = "N"
corrections_selling_probability_cutoff = "{'ma_less_than_2': 0.40, 'ma_more_than_2' : 0.40}"
corrections_cumulative_probability_cutoff = "{'ma_less_than_2':0.50,'ma_more_than_2':0.63}"
drug_type_list_v4 = "{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}','ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}','others':'{0:[0,0,0], 1:[0,1,2], 2:[0,1,2],3:[1,2,3]}'}"
agg_week_cnt = 4
kind = 'mae'
omit_npi = 'N'
# EVALUATE REQUIRED JSON PARAMS
corrections_selling_probability_cutoff = literal_eval(
corrections_selling_probability_cutoff)
corrections_cumulative_probability_cutoff = literal_eval(
corrections_cumulative_probability_cutoff)
drug_type_list_v4 = literal_eval(drug_type_list_v4)
logger = get_logger()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled Non-IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(
str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# considering reset of stores aged (3 months < age < 1 year)
store_id = stores.loc[
(dt.datetime.now() - stores['opened_at'] > dt.timedelta(days=90)) &
(dt.datetime.now() - stores['opened_at'] <= dt.timedelta(days=365)),
'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']',')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
reset_stores = reset_store_ops['store_id'].unique()
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
""" calling the main function """
order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
agg_week_cnt, kind, rs_db_read, rs_db_write, read_schema,
write_schema, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SENT EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
# to write ..............
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/non_ipc_ss_main.ipynb | non_ipc_ss_main.ipynb |
```
"""main wrapper for IPC safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.utils.ipc.forecast_reset import ipc_forecast_reset
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
```
# main
```
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
rs_db_read, rs_db_write, read_schema, write_schema, logger):
s3 = S3()
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("IPC SS calculation started for store id: " + str(store_id))
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
drug_class, weekly_fcst, safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = ipc_forecast_reset(
store_id, type_list, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
rs_db_read, read_schema,
drug_type_list_v4=drug_type_list_v4,
v5_active_flag=v5_active_flag,
v6_active_flag=v6_active_flag,
v6_type_list=v6_type_list,
v6_ptr_cut_off=v6_ptr_cut_off,
chronic_max_flag=chronic_max_flag,
logger=logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(store_id, drug_class, weekly_fcst,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value, ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
weekly_fcst.rename(
columns={'date': 'week_begin_dt', 'fcst': 'point_forecast',
'std': 'forecast_deviation'}, inplace=True)
weekly_fcst['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
weekly_fcst['week_begin_dt'] = weekly_fcst['week_begin_dt']
weekly_fcst['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['created-by'] = 'etl-automation'
weekly_fcst['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['updated-by'] = 'etl-automation'
weekly_fcst.columns = [c.replace('_', '-') for c in weekly_fcst.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-forecast',
schema=write_schema)
columns = list(table_info['column_name'])
weekly_fcst = weekly_fcst[columns] # required column order
logger.info("Writing to table: ipc-forecast")
s3.write_df_to_db(df=weekly_fcst,
table_name='ipc-forecast',
db=rs_db_write, schema=write_schema)
# writing table ipc-safety-stock
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc-abc-xyz-class
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in safety_stock_df.columns]
ss_data_upload = safety_stock_df.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema, logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
# logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
# if reset_store_ops != None:
# content_type = 74
# object_id = reset_store_ops.loc[
# reset_store_ops[
# 'store_id'] == store_id, 'object_id'].unique()
# for obj in object_id:
# request_body = {
# "object_id": int(obj), "content_type": content_type}
# api_response, _ = django_model_execution_log_create_api(
# request_body)
# reset_store_ops.loc[
# reset_store_ops['object_id'] == obj,
# 'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"IPC code execution status: {status}")
return order_value_all, new_drug_entries, missed_entries
# Parameter passing
env = "stage"
os.environ['env'] = env
email_to = "[email protected]"
debug_mode = "N"
# JOB EXCLUSIVE PARAMS
exclude_stores = [52, 60, 92, 243, 281]
goodaid_ss_flag = "Y"
ga_inv_weight = 0.5
rest_inv_weight = 0.0
top_inv_weight = 1
chronic_max_flag = "N"
wh_gen_consolidation = "Y"
v5_active_flag = "N"
v6_active_flag = "N"
v6_type_list = ['ethical', 'generic', 'others']
v6_ptr_cut_off = 400
reset_date = "YYYY-MM-DD"
reset_stores = [2]
v3_active_flag = "N"
corrections_selling_probability_cutoff = "{'ma_less_than_2': 0.40, 'ma_more_than_2' : 0.40}"
corrections_cumulative_probability_cutoff = "{'ma_less_than_2':0.50,'ma_more_than_2':0.63}"
drug_type_list_v4 = "{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}','ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}','others':'{0:[0,0,0], 1:[0,1,2], 2:[0,1,2],3:[1,2,3]}'}"
omit_npi = 'N'
# EVALUATE REQUIRED JSON PARAMS
corrections_selling_probability_cutoff = literal_eval(
corrections_selling_probability_cutoff)
corrections_cumulative_probability_cutoff = literal_eval(
corrections_cumulative_probability_cutoff)
drug_type_list_v4 = literal_eval(drug_type_list_v4)
logger = get_logger()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if 0:
pass
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
""" calling the main function """
order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
rs_db_read, rs_db_write, read_schema, write_schema, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SENT EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
# to write ..............
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/ipc_ss_main.ipynb | ipc_ss_main.ipynb |
```
import asyncio
import errno
import io
import logging
import json
import os
import re
import time
from subprocess import Popen, PIPE, STDOUT, DEVNULL
from shlex import split
import zipfile as zip
import botocore
import boto3
def ensure_session(session=None):
"""If session is None, create a default session and return it. Otherwise return the session passed in"""
if session is None:
session = boto3.session.Session()
return session
def get_execution_role(session):
"""Return the role ARN whose credentials are used to call the API.
Throws an exception if the current AWS identity is not a role.
Returns:
(str): The role ARN
"""
assumed_role = session.client("sts").get_caller_identity()["Arn"]
if ":user/" in assumed_role:
user_name = assumed_role[assumed_role.rfind("/") + 1 :]
raise ValueError(
f"You are running as the IAM user '{user_name}'. You must supply an IAM role to run SageMaker jobs."
)
if "AmazonSageMaker-ExecutionRole" in assumed_role:
role = re.sub(
r"^(.+)sts::(\d+):assumed-role/(.+?)/.*$",
r"\1iam::\2:role/service-role/\3",
assumed_role,
)
return role
role = re.sub(
r"^(.+)sts::(\d+):assumed-role/(.+?)/.*$", r"\1iam::\2:role/\3", assumed_role
)
# Call IAM to get the role's path
role_name = role[role.rfind("/") + 1 :]
arn = session.client("iam").get_role(RoleName=role_name)["Role"]["Arn"]
if ":role/" in arn:
return arn
message = "The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role"
raise ValueError(message.format(arn))
def execute_notebook(
*,
image,
input_path,
output_prefix,
notebook,
parameters,
role=None,
instance_type,
session,
in_vpc
):
session = ensure_session(session)
if not role:
role = get_execution_role(session)
elif "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
if "/" not in image:
account = session.client("sts").get_caller_identity()["Account"]
region = session.region_name
image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, image)
if notebook == None:
notebook = input_path
base = os.path.basename(notebook)
nb_name, nb_ext = os.path.splitext(base)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = (
("papermill-" + re.sub(r"[^-a-zA-Z0-9]", "-", nb_name))[: 62 - len(timestamp)]
+ "-"
+ timestamp
)
input_directory = "/opt/ml/processing/input/"
local_input = input_directory + os.path.basename(input_path)
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
local_output = "/opt/ml/processing/output/"
api_args = {
"ProcessingInputs": [
{
"InputName": "notebook",
"S3Input": {
"S3Uri": input_path,
"LocalPath": input_directory,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "result",
"S3Output": {
"S3Uri": output_prefix,
"LocalPath": local_output,
"S3UploadMode": "EndOfJob",
},
},
],
},
"ProcessingJobName": job_name,
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": instance_type,
"VolumeSizeInGB": 40,
}
},
"StoppingCondition": {"MaxRuntimeInSeconds": 7200},
"AppSpecification": {
"ImageUri": image,
"ContainerArguments": [
"run_notebook",
],
},
"RoleArn": role,
"Environment": {},
}
if in_vpc:
api_args["NetworkConfig"] = {
'EnableInterContainerTrafficEncryption': False,
'EnableNetworkIsolation': False,
'VpcConfig': {
'SecurityGroupIds': [
"sg-0101c938006dab959"
],
'Subnets': [
'subnet-0446eb5f39df5ceca'
]
}
}
api_args["Environment"]["PAPERMILL_INPUT"] = local_input
api_args["Environment"]["PAPERMILL_OUTPUT"] = local_output + result
if os.environ.get("AWS_DEFAULT_REGION") != None:
api_args["Environment"]["AWS_DEFAULT_REGION"] = os.environ["AWS_DEFAULT_REGION"]
api_args["Environment"]["PAPERMILL_PARAMS"] = json.dumps(parameters)
api_args["Environment"]["PAPERMILL_NOTEBOOK_NAME"] = notebook
api_args["Environment"]["AWS_ACCESS_KEY_ID"] = "AKIA5NJ64OJ5UEZ4LS5P"
api_args["Environment"]["AWS_SECRET_ACCESS_KEY"] = "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"
api_args["Environment"]["REGION_NAME"] = "ap-south-1"
client = boto3.client("sagemaker")
result = client.create_processing_job(**api_args)
job_arn = result["ProcessingJobArn"]
job = re.sub("^.*/", "", job_arn)
return job
def default_bucket():
return "sagemaker-ap-south-1-921939243643"
def upload_notebook(notebook, session=None):
"""Uploads a notebook to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook (str):
The filename of the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
with open(notebook, "rb") as f:
return upload_fileobj(f, session)
def upload_fileobj(notebook_fileobj, session=None):
"""Uploads a file object to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook_fileobj (fileobj):
A file object (as returned from open) that is reading from the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
session = ensure_session(session)
snotebook = "notebook-{}.ipynb".format(
time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
)
s3 = session.client("s3")
key = "papermill_input/" + snotebook
bucket = default_bucket()
s3path = "s3://{}/{}".format(bucket, key)
s3.upload_fileobj(notebook_fileobj, bucket, key)
return s3path
def get_output_prefix():
"""Returns an S3 prefix in the Python SDK default bucket."""
return "s3://{}/papermill_output".format(default_bucket())
def wait_for_complete(job_name, progress=True, sleep_time=10, session=None):
"""Wait for a notebook execution job to complete.
Args:
job_name (str):
The name of the SageMaker Processing Job executing the notebook. (Required)
progress (boolean):
If True, print a period after every poll attempt. (Default: True)
sleep_time (int):
The number of seconds between polls. (Default: 10)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
A tuple with the job status and the failure message if any.
"""
session = ensure_session(session)
client = session.client("sagemaker")
done = False
while not done:
if progress:
print(".", end="")
desc = client.describe_processing_job(ProcessingJobName=job_name)
status = desc["ProcessingJobStatus"]
if status != "InProgress":
done = True
else:
time.sleep(sleep_time)
if progress:
print()
return status, desc.get("FailureReason")
def download_notebook(job_name, output=".", session=None):
"""Download the output notebook from a previously completed job.
Args:
job_name (str): The name of the SageMaker Processing Job that executed the notebook. (Required)
output (str): The directory to copy the output file to. (Default: the current working directory)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The filename of the downloaded notebook.
"""
session = ensure_session(session)
client = session.client("sagemaker")
desc = client.describe_processing_job(ProcessingJobName=job_name)
prefix = desc["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
notebook = os.path.basename(desc["Environment"]["PAPERMILL_OUTPUT"])
s3path = "{}/{}".format(prefix, notebook)
if not os.path.exists(output):
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
p1 = Popen(split("aws s3 cp --no-progress {} {}/".format(s3path, output)))
p1.wait()
return "{}/{}".format(output.rstrip("/"), notebook)
def run_notebook(
image,
notebook,
parameters={},
role=None,
instance_type="ml.m5.large",
output_prefix=None,
output=".",
session=None,
in_vpc=False
):
"""Run a notebook in SageMaker Processing producing a new output notebook.
Args:
image (str): The ECR image that defines the environment to run the job (required).
notebook (str): The local notebook to upload and run (required).
parameters (dict): The dictionary of parameters to pass to the notebook (default: {}).
role (str): The name of a role to use to run the notebook (default: calls get_execution_role()).
instance_type (str): The SageMaker instance to use for executing the job (default: ml.m5.large).
output_prefix (str): The prefix path in S3 for where to store the output notebook
(default: determined based on SageMaker Python SDK)
output (str): The directory to copy the output file to (default: the current working directory).
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
Returns:
A tuple with the processing job name, the job status, the failure reason (or None) and the the path to
the result notebook. The output notebook name is formed by adding a timestamp to the original notebook name.
"""
session = ensure_session(session)
if output_prefix is None:
output_prefix = get_output_prefix()
s3path = upload_notebook(notebook, session)
job_name = execute_notebook(
image=image,
input_path=s3path,
output_prefix=output_prefix,
notebook=notebook,
parameters=parameters,
role=role,
instance_type=instance_type,
session=session,
in_vpc=in_vpc
)
print("Job {} started".format(job_name))
status, failure_reason = wait_for_complete(job_name)
if status == "Completed":
local = download_notebook(job_name, output=output)
else:
local = None
return (job_name, status, local, failure_reason)
# run_notebook(
# image="notebook-runner",
# notebook="send-glue-job-logs.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"job_name": "prod-8-sales"},
# in_vpc=False
# )
# run_notebook(
# image="notebook-runner",
# notebook="redshift-write-demo.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"env": "stage"},
# in_vpc=True
# )
# run_notebook(
# image="notebook-runner",
# notebook="s3_read_write.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# in_vpc=False
# )
# run_notebook(
# image="notebook-runner",
# notebook="ml-demo.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# in_vpc=True
# )
run_notebook(
image="notebook-runner",
notebook="ipc_ss_main.ipynb",
role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
parameters={"env": "stage"},
in_vpc=True
)
# run_notebook(
# image="notebook-runner",
# notebook="Untitled.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"env": "stage"},
# in_vpc=True
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/run_notebook.ipynb | run_notebook.ipynb |
```
# External Libs
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import random
import os
import datetime
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
# Set env
env = "stage"
os.environ['env'] = env
logger = get_logger(level="INFO")
logger.info(f"env: {env}")
```
### Import modified dataset and category splits
```
s3 = S3(bucket_name="sagemaker-ap-south-1-921939243643")
csv_full_path = s3.download_file_from_s3(file_name="data/modified_data.csv")
df1 = pd.read_csv(csv_full_path)
df2 = df1.loc[3:27, :]
df2 = df2.drop(['Unnamed: 0'], axis=1)
df2['Dates'] = pd.to_datetime(df2['Dates']) # converting to datetime format
df2 = df2.set_index('Dates')
df2.tail()
```
### Train - Test Split Function
<b>for_valid=True</b> implies, the split is for validation. In this case, all data before March 2021, is taken the last three months in the obtained dataset is taken as Test set, and all the prior datapoints are taken into Train set.
<b>for_valid=False</b> implies the split is for final model. Hence all the datapoints before March 2021 is taken into Train set. The data of March 2021 is only considered into Test set.
```
def test_train_split(drug_id, for_valid=True):
df3 = df2[[drug_id]]
if for_valid:
train = df3[0:-5] # Training Split
test = df3[-5:-2] # Testing Split
else:
train = df3[0:-2] # Training Split
test = df3[-2:-1] # For 2021 March
return train, test
```
### Model Libraries -- Imports
```
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from sklearn.metrics import mean_squared_error, mean_absolute_error
import warnings
warnings.filterwarnings("ignore")
# from statsmodels.tsa.api import SimpleExpSmoothing, Holt
```
### Define Exponential Smoothening Model
A simple exponential smoothing model. The <i><b>smoothing constant</b></i> hyperparameter is not provided, so that the algorithm finds the best parameter itself. This model does not capture any trend, it simply forecasts a constant value for all the future predictions. The forecasting is performed by giving higher weightage to the most recent values decided by the <i><b>smoothing constant</b></i>.
```
def exp_mod(drug_id, for_valid=True):
train, test = test_train_split(drug_id, for_valid)
fit = SimpleExpSmoothing(train).fit() # Optimum alpha automatically computed
if for_valid:
fcast = fit.forecast(len(test))
mae = mean_absolute_error(test, fcast)
return mae
else:
fcast = fit.forecast(1)
return int(math.ceil(fcast.values[0]))
# result_1 = exp_mod('216583') # for testing
# result_2 = exp_mod('216583', for_valid=False) # for testing
```
### Define Holts Winter Model
This is better model, due to its ability to capture trend. In order to fit the model, a <b><i>linear trend</i></b> is assumed. The hyperparameter values such as <i><b>smoothing_level</b></i> and <i><b>smoothing_trend</b></i> are also assumed to be <b>0.8</b> and <b>0.2</b> respectively.
```
def holts_mod(drug_id, for_valid=True):
train, test = test_train_split(drug_id, for_valid)
fit = Holt(train).fit(smoothing_level=0.8, smoothing_trend=0.2, optimized=False) # assume hyp-params and linear trend
if for_valid:
fcast = fit.forecast(len(test))
mae = mean_absolute_error(test, fcast)
return mae
else:
fcast = fit.forecast(1)
return int(math.ceil(fcast.values[0]))
# result_3 = holts_mod('216583') # for testing
# result_4 = holts_mod('216583', for_valid=False) # for testing
```
### Define LSTM Model
A basic LSTM model, much more powerfull than previous models. Uses special class of Neural Networks to train the model. The hyperparametrs such as <i><b>number of layers, number of neurons, activation function, optimizers</b></i> and <i><b>number of epochs</b></i> are assumed.
```
# preparing independent and dependent features
def prepare_lstm_data(timeseries_data, n_features):
X, y =[],[]
for i in range(len(timeseries_data)):
# find the end of this pattern
end_ix = i + n_features
# check if we are beyond the sequence
if end_ix > len(timeseries_data)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = timeseries_data[i:end_ix], timeseries_data[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def lstm_mod(drug_id, for_valid=True, n_steps=3, epochs=500):
n_features = 1
train, test = test_train_split(drug_id, for_valid)
X, y = prepare_lstm_data(train.values, n_steps) # function call
X = X.reshape((X.shape[0], X.shape[1], n_features))
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features)))
model.add(LSTM(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=epochs, verbose=0)
if for_valid:
# demonstrate prediction for next n days
x_input = train[-(len(test)):][drug_id].values
temp_input=list(x_input)
lst_output=[]
i=0
while(i<len(test)):
if(len(temp_input)>n_steps):
x_input=np.array(temp_input[1:])
x_input = x_input.reshape((1, n_steps, n_features))
yhat = model.predict(x_input, verbose=0)
temp_input.append(yhat[0][0])
temp_input=temp_input[1:]
lst_output.append(yhat[0][0])
i=i+1
else:
x_input = x_input.reshape((1, n_steps, n_features))
yhat = model.predict(x_input, verbose=0)
temp_input.append(yhat[0][0])
lst_output.append(yhat[0][0])
i=i+1
#converting to dictionary --> df
pred_dict = {'Dates': ['2020-12-01', '2021-01-01', '2021-02-01'], # assuming forecasting for 3 days
'Sales': lst_output}
pred_df = pd.DataFrame(pred_dict)
pred_df['Dates'] = pd.to_datetime(pred_df['Dates']) # converting to datetime format
pred_df = pred_df.set_index('Dates')
mae = mean_absolute_error(test, pred_df)
return mae
else:
x_input = train[-3:][drug_id].values
x_input = x_input.reshape((1, 3, 1))
yhat = model.predict(x_input, verbose=0)
return int(math.ceil(yhat[0][0]))
# result_5 = lstm_mod('216583') # for testing
# result_6 = lstm_mod(drug_id='216583', for_valid=False) # for testing
import tensorflow as tf
import tensorflow_datasets as tfds
print("TensorFlow version:", tf.__version__)
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
tf.config.list_physical_devices('GPU')
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
batch_size = 128
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(batch_size)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(batch_size)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, kernel_size=(3, 3),
activation='relu'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3),
activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
# tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
# tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
)
## Write result to DB
db = DB(read_only=False)
db.open_connection()
all_result = f"result_5: {datetime.datetime.now()}"
query = f"""
insert into "prod2-generico"."temp-str" (col1) values ('Hello at {datetime.datetime.now()}: {all_result}')
"""
db.execute(query=query)
db.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/ml-demo.ipynb | ml-demo.ipynb |
```
import asyncio
import errno
import io
import logging
import json
import os
import re
import time
from subprocess import Popen, PIPE, STDOUT, DEVNULL
from shlex import split
import zipfile as zip
import botocore
import boto3
def ensure_session(session=None):
"""If session is None, create a default session and return it. Otherwise return the session passed in"""
if session is None:
session = boto3.session.Session()
return session
def get_execution_role(session):
"""Return the role ARN whose credentials are used to call the API.
Throws an exception if the current AWS identity is not a role.
Returns:
(str): The role ARN
"""
assumed_role = session.client("sts").get_caller_identity()["Arn"]
if ":user/" in assumed_role:
user_name = assumed_role[assumed_role.rfind("/") + 1 :]
raise ValueError(
f"You are running as the IAM user '{user_name}'. You must supply an IAM role to run SageMaker jobs."
)
if "AmazonSageMaker-ExecutionRole" in assumed_role:
role = re.sub(
r"^(.+)sts::(\d+):assumed-role/(.+?)/.*$",
r"\1iam::\2:role/service-role/\3",
assumed_role,
)
return role
role = re.sub(
r"^(.+)sts::(\d+):assumed-role/(.+?)/.*$", r"\1iam::\2:role/\3", assumed_role
)
# Call IAM to get the role's path
role_name = role[role.rfind("/") + 1 :]
arn = session.client("iam").get_role(RoleName=role_name)["Role"]["Arn"]
if ":role/" in arn:
return arn
message = "The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role"
raise ValueError(message.format(arn))
def execute_notebook(
*,
image,
input_path,
output_prefix,
notebook,
parameters,
role=None,
instance_type,
session,
in_vpc
):
session = ensure_session(session)
if not role:
role = get_execution_role(session)
elif "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
if "/" not in image:
account = session.client("sts").get_caller_identity()["Account"]
region = session.region_name
image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, image)
if notebook == None:
notebook = input_path
base = os.path.basename(notebook)
nb_name, nb_ext = os.path.splitext(base)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = (
("papermill-" + re.sub(r"[^-a-zA-Z0-9]", "-", nb_name))[: 62 - len(timestamp)]
+ "-"
+ timestamp
)
input_directory = "/opt/ml/processing/input/"
local_input = input_directory + os.path.basename(input_path)
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
local_output = "/opt/ml/processing/output/"
api_args = {
"ProcessingInputs": [
{
"InputName": "notebook",
"S3Input": {
"S3Uri": input_path,
"LocalPath": input_directory,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "result",
"S3Output": {
"S3Uri": output_prefix,
"LocalPath": local_output,
"S3UploadMode": "EndOfJob",
},
},
],
},
"ProcessingJobName": job_name,
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": instance_type,
"VolumeSizeInGB": 40,
}
},
"StoppingCondition": {"MaxRuntimeInSeconds": 7200},
"AppSpecification": {
"ImageUri": image,
"ContainerArguments": [
"run_notebook",
],
},
"RoleArn": role,
"Environment": {},
}
if in_vpc:
api_args["NetworkConfig"] = {
'EnableInterContainerTrafficEncryption': False,
'EnableNetworkIsolation': False,
'VpcConfig': {
'SecurityGroupIds': [
"sg-0101c938006dab959"
],
'Subnets': [
'subnet-0446eb5f39df5ceca'
]
}
}
api_args["Environment"]["PAPERMILL_INPUT"] = local_input
api_args["Environment"]["PAPERMILL_OUTPUT"] = local_output + result
if os.environ.get("AWS_DEFAULT_REGION") != None:
api_args["Environment"]["AWS_DEFAULT_REGION"] = os.environ["AWS_DEFAULT_REGION"]
api_args["Environment"]["PAPERMILL_PARAMS"] = json.dumps(parameters)
api_args["Environment"]["PAPERMILL_NOTEBOOK_NAME"] = notebook
api_args["Environment"]["AWS_ACCESS_KEY_ID"] = "AKIA5NJ64OJ5UEZ4LS5P"
api_args["Environment"]["AWS_SECRET_ACCESS_KEY"] = "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"
api_args["Environment"]["REGION_NAME"] = "ap-south-1"
client = boto3.client("sagemaker")
result = client.create_processing_job(**api_args)
job_arn = result["ProcessingJobArn"]
job = re.sub("^.*/", "", job_arn)
return job
def default_bucket():
return "sagemaker-ap-south-1-921939243643"
def upload_notebook(notebook, session=None):
"""Uploads a notebook to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook (str):
The filename of the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
with open(notebook, "rb") as f:
return upload_fileobj(f, session)
def upload_fileobj(notebook_fileobj, session=None):
"""Uploads a file object to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook_fileobj (fileobj):
A file object (as returned from open) that is reading from the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
session = ensure_session(session)
snotebook = "notebook-{}.ipynb".format(
time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
)
s3 = session.client("s3")
key = "papermill_input/" + snotebook
bucket = default_bucket()
s3path = "s3://{}/{}".format(bucket, key)
s3.upload_fileobj(notebook_fileobj, bucket, key)
return s3path
def get_output_prefix():
"""Returns an S3 prefix in the Python SDK default bucket."""
return "s3://{}/papermill_output".format(default_bucket())
def wait_for_complete(job_name, progress=True, sleep_time=10, session=None):
"""Wait for a notebook execution job to complete.
Args:
job_name (str):
The name of the SageMaker Processing Job executing the notebook. (Required)
progress (boolean):
If True, print a period after every poll attempt. (Default: True)
sleep_time (int):
The number of seconds between polls. (Default: 10)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
A tuple with the job status and the failure message if any.
"""
session = ensure_session(session)
client = session.client("sagemaker")
done = False
while not done:
if progress:
print(".", end="")
desc = client.describe_processing_job(ProcessingJobName=job_name)
status = desc["ProcessingJobStatus"]
if status != "InProgress":
done = True
else:
time.sleep(sleep_time)
if progress:
print()
return status, desc.get("FailureReason")
def download_notebook(job_name, output=".", session=None):
"""Download the output notebook from a previously completed job.
Args:
job_name (str): The name of the SageMaker Processing Job that executed the notebook. (Required)
output (str): The directory to copy the output file to. (Default: the current working directory)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The filename of the downloaded notebook.
"""
session = ensure_session(session)
client = session.client("sagemaker")
desc = client.describe_processing_job(ProcessingJobName=job_name)
prefix = desc["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
notebook = os.path.basename(desc["Environment"]["PAPERMILL_OUTPUT"])
s3path = "{}/{}".format(prefix, notebook)
if not os.path.exists(output):
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
p1 = Popen(split("aws s3 cp --no-progress {} {}/".format(s3path, output)))
p1.wait()
return "{}/{}".format(output.rstrip("/"), notebook)
def run_notebook(
image,
notebook,
parameters={},
role=None,
instance_type="ml.m5.large",
output_prefix=None,
output=".",
session=None,
in_vpc=False
):
"""Run a notebook in SageMaker Processing producing a new output notebook.
Args:
image (str): The ECR image that defines the environment to run the job (required).
notebook (str): The local notebook to upload and run (required).
parameters (dict): The dictionary of parameters to pass to the notebook (default: {}).
role (str): The name of a role to use to run the notebook (default: calls get_execution_role()).
instance_type (str): The SageMaker instance to use for executing the job (default: ml.m5.large).
output_prefix (str): The prefix path in S3 for where to store the output notebook
(default: determined based on SageMaker Python SDK)
output (str): The directory to copy the output file to (default: the current working directory).
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
Returns:
A tuple with the processing job name, the job status, the failure reason (or None) and the the path to
the result notebook. The output notebook name is formed by adding a timestamp to the original notebook name.
"""
session = ensure_session(session)
if output_prefix is None:
output_prefix = get_output_prefix()
s3path = upload_notebook(notebook, session)
job_name = execute_notebook(
image=image,
input_path=s3path,
output_prefix=output_prefix,
notebook=notebook,
parameters=parameters,
role=role,
instance_type=instance_type,
session=session,
in_vpc=in_vpc
)
print("Job {} started".format(job_name))
status, failure_reason = wait_for_complete(job_name)
if status == "Completed":
local = download_notebook(job_name, output=output)
else:
local = None
return (job_name, status, local, failure_reason)
# run_notebook(
# image="notebook-runner",
# notebook="send-glue-job-logs.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"job_name": "prod-8-sales"},
# in_vpc=False
# )
# run_notebook(
# image="notebook-runner",
# notebook="redshift-write-demo.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"env": "stage"},
# in_vpc=True
# )
# run_notebook(
# image="notebook-runner",
# notebook="s3_read_write.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# in_vpc=False
# )
# run_notebook(
# image="notebook-runner",
# notebook="ml-demo.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# in_vpc=True
# )
run_notebook(
image="notebook-runner",
notebook="ipc_ss_main.ipynb",
role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
parameters={"env": "stage"},
in_vpc=True
)
# run_notebook(
# image="notebook-runner",
# notebook="Untitled.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"env": "stage"},
# in_vpc=True
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/.ipynb_checkpoints/run_notebook-checkpoint.ipynb | run_notebook-checkpoint.ipynb |
```
import argparse
import os
import datetime
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
env = "stage"
os.environ['env'] = env
logger = get_logger(level="INFO")
db = DB(read_only=False)
db.db_secrets
db.open_connection()
query = f"""
insert into "prod2-generico"."temp-str" (col1) values ('Hello at {datetime.datetime.now()}')
"""
db.execute(query=query)
db.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/.ipynb_checkpoints/redshift-write-demo-checkpoint.ipynb | redshift-write-demo-checkpoint.ipynb |
```
"""main wrapper for IPC safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.utils.ipc.forecast_reset import ipc_forecast_reset
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
```
# main
```
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
rs_db_read, rs_db_write, read_schema, write_schema, logger):
s3 = S3()
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("IPC SS calculation started for store id: " + str(store_id))
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
drug_class, weekly_fcst, safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = ipc_forecast_reset(
store_id, type_list, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
rs_db_read, read_schema,
drug_type_list_v4=drug_type_list_v4,
v5_active_flag=v5_active_flag,
v6_active_flag=v6_active_flag,
v6_type_list=v6_type_list,
v6_ptr_cut_off=v6_ptr_cut_off,
chronic_max_flag=chronic_max_flag,
logger=logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(store_id, drug_class, weekly_fcst,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value, ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
weekly_fcst.rename(
columns={'date': 'week_begin_dt', 'fcst': 'point_forecast',
'std': 'forecast_deviation'}, inplace=True)
weekly_fcst['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
weekly_fcst['week_begin_dt'] = weekly_fcst['week_begin_dt']
weekly_fcst['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['created-by'] = 'etl-automation'
weekly_fcst['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['updated-by'] = 'etl-automation'
weekly_fcst.columns = [c.replace('_', '-') for c in weekly_fcst.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-forecast',
schema=write_schema)
columns = list(table_info['column_name'])
weekly_fcst = weekly_fcst[columns] # required column order
logger.info("Writing to table: ipc-forecast")
s3.write_df_to_db(df=weekly_fcst,
table_name='ipc-forecast',
db=rs_db_write, schema=write_schema)
# writing table ipc-safety-stock
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc-abc-xyz-class
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in safety_stock_df.columns]
ss_data_upload = safety_stock_df.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema, logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
# logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
# if reset_store_ops != None:
# content_type = 74
# object_id = reset_store_ops.loc[
# reset_store_ops[
# 'store_id'] == store_id, 'object_id'].unique()
# for obj in object_id:
# request_body = {
# "object_id": int(obj), "content_type": content_type}
# api_response, _ = django_model_execution_log_create_api(
# request_body)
# reset_store_ops.loc[
# reset_store_ops['object_id'] == obj,
# 'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"IPC code execution status: {status}")
return order_value_all, new_drug_entries, missed_entries
# Parameter passing
env = "stage"
os.environ['env'] = env
email_to = "[email protected]"
debug_mode = "N"
# JOB EXCLUSIVE PARAMS
exclude_stores = [52, 60, 92, 243, 281]
goodaid_ss_flag = "Y"
ga_inv_weight = 0.5
rest_inv_weight = 0.0
top_inv_weight = 1
chronic_max_flag = "N"
wh_gen_consolidation = "Y"
v5_active_flag = "N"
v6_active_flag = "N"
v6_type_list = ['ethical', 'generic', 'others']
v6_ptr_cut_off = 400
reset_date = "YYYY-MM-DD"
reset_stores = [2]
v3_active_flag = "N"
corrections_selling_probability_cutoff = "{'ma_less_than_2': 0.40, 'ma_more_than_2' : 0.40}"
corrections_cumulative_probability_cutoff = "{'ma_less_than_2':0.50,'ma_more_than_2':0.63}"
drug_type_list_v4 = "{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}','ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}','others':'{0:[0,0,0], 1:[0,1,2], 2:[0,1,2],3:[1,2,3]}'}"
omit_npi = 'N'
# EVALUATE REQUIRED JSON PARAMS
corrections_selling_probability_cutoff = literal_eval(
corrections_selling_probability_cutoff)
corrections_cumulative_probability_cutoff = literal_eval(
corrections_cumulative_probability_cutoff)
drug_type_list_v4 = literal_eval(drug_type_list_v4)
logger = get_logger()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if 0:
pass
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
""" calling the main function """
order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
rs_db_read, rs_db_write, read_schema, write_schema, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SENT EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
# to write ..............
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/experiments/.ipynb_checkpoints/ipc_ss_main-checkpoint.ipynb | ipc_ss_main-checkpoint.ipynb |
```
!pip install zeno_etl_libs==1.0.49
"""main wrapper for new-stores safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.new_stores.new_stores_ipc import new_stores_ss_calc
from zeno_etl_libs.utils.new_stores.helper_functions import get_drug_info, order_value_report
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
```
## Main Function
```
def main(debug_mode, reset_stores, goodaid_ss_flag,
ga_inv_weight, rest_inv_weight, top_inv_weight, wh_gen_consolidation,
type_list, rs_db_read, rs_db_write, read_schema, write_schema, s3, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
reset_date = dt.date.today().strftime("%Y-%m-%d")
# define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
try:
for store_id in reset_stores:
logger.info("New store SS calculation started for store id: " +
str(store_id))
# NEW STORES SS CALCULATION
ss_stores = new_stores_ss_calc(store_id, reset_date, rs_db_read,
read_schema, logger)
# EXTRA INFO FETCH
data_inv, data_ptr, data_drug_info, data_drug_grade,\
data_stores = get_drug_info(store_id, rs_db_read, read_schema)
# MERGE DATA
ss_stores_merge = ss_stores.merge(
data_inv[['drug_id', 'current_inventory']],
how='left', on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_ptr, how='left',
on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_drug_info, how='left',
on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_drug_grade, how='left',
on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_stores, how='left',
on='store_id')
logger.info("Null values in dataframes, count is {}".format(
ss_stores_merge.isnull().sum()))
# fill Null values
ss_stores_merge['current_inventory'].fillna(0, inplace=True)
ss_stores_merge['ptr'].fillna(67, inplace=True)
ss_stores_merge['type'].fillna('', inplace=True)
ss_stores_merge['category'].fillna('', inplace=True)
ss_stores_merge['drug_grade'].fillna('NA', inplace=True)
# final data-frame name for update
new_stores_ss = ss_stores_merge.copy()
logger.info("SS list base algo+triggers length is {}".format(
len(new_stores_ss)))
logger.info(
"Types in list are - {}".format(new_stores_ss['type'].unique()))
# remove banned and discontinued drugs
new_stores_ss = new_stores_ss[~new_stores_ss['type'].isin(
['banned', 'discontinued-products'])]
logger.info(
"Types in list are - {}".format(new_stores_ss['type'].unique()))
logger.info(
"SS list after removing banned and discontinued - length is {}".format(
len(new_stores_ss)))
# order value report
order_value = order_value_report(new_stores_ss)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
new_stores_ss, consolidation_log = stores_ss_consolidation(
new_stores_ss, rs_db_read, read_schema,
min_column='min', ss_column='safety_stock',
max_column='max')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
new_stores_ss, good_aid_ss_log = update_ga_ss(
new_stores_ss, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='min', ss_column='safety_stock',
max_column='max', logger=logger)
# few more columns
new_stores_ss['inventory_quantity'] = new_stores_ss['current_inventory']
new_stores_ss['fptr'] = new_stores_ss['ptr']
new_stores_ss['store_id'] = store_id
new_stores_ss['daily_sales_1'] = -1
new_stores_ss['daily_sales_2'] = -1
new_stores_ss['daily_sales_3'] = -1
new_stores_ss['ads'] = -1
new_stores_ss['ads_min'] = -1
new_stores_ss['ads_ss'] = -1
new_stores_ss['ads_max'] = -1
new_stores_ss['algo_max_days'] = 30
# adjustment for ethical
# same logic as in new_store_ipc_funcs.ss_calc
new_stores_ss['algo_max_days'] = np.round(
np.where(new_stores_ss['type'].isin(
['ethical', 'high-value-ethical']),
new_stores_ss['algo_max_days'] * (1 / 2),
new_stores_ss['algo_max_days'] * (2 / 3)))
# for min
new_stores_ss['algo_min_days'] = np.where(new_stores_ss['max'] > 0,
(new_stores_ss['min'] /
new_stores_ss['max']
) * new_stores_ss[
'algo_max_days'], 0)
# for ss
new_stores_ss['algo_ss_days'] = np.where(new_stores_ss['max'] > 0,
(new_stores_ss[
'safety_stock'] /
new_stores_ss['max']
) * new_stores_ss[
'algo_max_days'], 0)
new_stores_ss['corr_min'] = new_stores_ss['min']
new_stores_ss['corr_ss'] = new_stores_ss['safety_stock']
new_stores_ss['corr_max'] = new_stores_ss['max']
new_stores_ss['to_order_quantity'] = np.where(
new_stores_ss['inventory_quantity']
<= new_stores_ss['corr_ss'],
new_stores_ss['corr_max'] -
new_stores_ss['inventory_quantity'],
0)
new_stores_ss['to_order_value'] = new_stores_ss['fptr'] * \
new_stores_ss['to_order_quantity']
# required columns
new_store_ss = new_stores_ss[[
'store_id', 'store_name', 'drug_id', 'drug_name', 'type',
'category', 'drug_grade', 'inventory_quantity',
'min', 'safety_stock', 'max',
'daily_sales_1', 'daily_sales_2', 'daily_sales_3',
'ads', 'ads_min', 'ads_ss', 'ads_max',
'algo_min_days', 'algo_ss_days', 'algo_max_days',
'corr_min', 'corr_ss', 'corr_max',
'to_order_quantity', 'fptr', 'to_order_value', 'algo_type']]
# overall order value
order_value_all = order_value_all.append(order_value,
ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table non-ipc-safety-stock
# new_store_ss['store_id'] = new_store_ss['store_id'].astype(int)
new_store_ss['reset-date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
new_store_ss['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
new_store_ss['created-by'] = 'etl-automation'
new_store_ss['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
new_store_ss['updated-by'] = 'etl-automation'
new_store_ss.columns = [c.replace('_', '-') for c in
new_store_ss.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='new-store-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
new_store_ss = new_store_ss[columns] # required column order
logger.info("Writing to table: new-store-safety-stock")
s3.write_df_to_db(df=new_store_ss,
table_name='new-store-safety-stock',
db=rs_db_write, schema=write_schema)
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
new_store_ss.columns = [c.replace('-', '_') for c in
new_store_ss.columns]
ss_data_upload = new_store_ss.query('corr_max > 0')[
['store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']]
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"New-Stores-SS code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"New-Stores-SS code execution status: {status}")
return status, reset_date, new_drug_entries, missed_entries, order_value_all
```
## Pass Params
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
## Read params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=116)
# JOB EXCLUSIVE PARAMS
exclude_stores = args["exclude_stores"]
goodaid_ss_flag = args["goodaid_ss_flag"]
ga_inv_weight = args["ga_inv_weight"]
rest_inv_weight = args["rest_inv_weight"]
top_inv_weight = args["top_inv_weight"]
wh_gen_consolidation = args["wh_gen_consolidation"]
reset_stores = args["reset_stores"]
```
## Get Stores and Type List
```
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# new stores list
new_stores = stores.loc[
(dt.datetime.now() - stores['opened_at'] <= dt.timedelta(days=90)) &
(dt.datetime.now() - stores['opened_at'] >= dt.timedelta(days=30)), 'id'].values
if reset_stores == [0]: # Fetch all new stores
reset_stores = new_stores
logger.info(f"Algo to run for all new stores: {reset_stores}")
else:
reset_stores = list(set(reset_stores).intersection(new_stores))
logger.info(f"Algo to run for specified new stores: {reset_stores}")
if not reset_stores:
logger.info(f"ALERT: None of specified stores is a new store")
reset_stores = new_stores
logger.info(f"REVERT: Algo to run for all new stores: {reset_stores}")
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
```
## Execute Main Function
```
""" calling the main function """
status, reset_date, new_drug_entries, missed_entries, \
order_value_all = main(
debug_mode, reset_stores, goodaid_ss_flag, ga_inv_weight,
rest_inv_weight, top_inv_weight, wh_gen_consolidation,
type_list, rs_db_read, rs_db_write, read_schema, write_schema, s3,
logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
## Send Email Notification
```
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"New Stores SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/new-stores-ss-main/new_stores_ss_main.ipynb | new_stores_ss_main.ipynb |
```
!pip install zeno_etl_libs==1.0.123
"""main wrapper for IPC2.0 safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
# sys.path.insert(0,'/Users/tusharuike/ETL')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.django.api import Django
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
from zeno_etl_libs.utils.ipc2.forecast_main import ipc_forecast
from zeno_etl_libs.utils.ipc2.safety_stock import safety_stock_calc
from zeno_etl_libs.utils.ipc2.portfolio_consolidation import wh_consolidation, \
goodaid_consolidation, D_class_consolidation
from zeno_etl_libs.utils.ipc.store_portfolio_additions import generic_portfolio
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc2.post_processing import post_processing
from zeno_etl_libs.utils.ipc2.helpers.correction_flag import compare_df, \
add_correction_flag
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.utils.ipc2.helpers.outlier_check import check_oup_outlier
```
# Main Function
```
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
v3_active_flag, v4_active_flag, v5_active_flag, v6_active_flag,
d_class_consolidation, wh_gen_consolidation, goodaid_ss_flag,
keep_all_generic_comp, omit_npi, ga_inv_weight, rest_inv_weight,
top_inv_weight, v6_type_list, v6_ptr_cut_off, open_po_turbhe_active,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
outlier_check, rs_db_read, rs_db_write, read_schema, write_schema,
s3, django, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# Define empty variables if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
df_outliers_all = pd.DataFrame()
manual_doid_upd_all = pd.DataFrame()
try:
for store_id in reset_stores:
logger.info(f"Running for store id: {store_id} and reset date: {reset_date}")
if not type_list:
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
# RUNNING IPC2.0 FORECAST PIPELINE
logger.info("Forecast Pipeline starts...")
agg_fcst, cal_sales, weekly_fcst, seg_df, drug_class = ipc_forecast(
store_id, reset_date, type_list, read_schema, rs_db_read,
logger)
# SAFETY STOCK CALCULATIONS
logger.info("Safety Stock Calculations starts...")
safety_stock_df = safety_stock_calc(
agg_fcst, cal_sales, store_id, reset_date, v3_active_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
v4_active_flag, drug_type_list_v4, v5_active_flag,
open_po_turbhe_active, read_schema, rs_db_read, logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
logger.info("WH Generic Consolidation starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = wh_consolidation(
safety_stock_df, rs_db_read, read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'WH')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
logger.info("GA SS Modification starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = goodaid_consolidation(
safety_stock_df, rs_db_read, read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'GA')
# D-CLASS SKU CONSOLIDATION
if d_class_consolidation == 'Y':
logger.info("D Class Consolidation starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = D_class_consolidation(
safety_stock_df, store_id, rs_db_read, read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'DCC')
# KEEP ALL GENERIC COMPOSITIONS IN STORE
if keep_all_generic_comp == 'Y':
logger.info("All Generic Composition starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = generic_portfolio(safety_stock_df, rs_db_read,
read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'AG')
# OMIT NPI DRUGS
if omit_npi == 'Y':
logger.info("Omit NPI starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'NPI')
# POST PROCESSING AND ORDER VALUE CALCULATIONS
logger.info("Post Processing starts")
safety_stock_df, order_value, weekly_fcst, \
seg_df = post_processing(safety_stock_df, weekly_fcst, seg_df,
store_id, read_schema, rs_db_read,
logger)
order_value_all = order_value_all.append(order_value, ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc2-weekly-forecast
weekly_fcst['store_id'] = weekly_fcst['store_id'].astype(int)
weekly_fcst['drug_id'] = weekly_fcst['drug_id'].astype(int)
weekly_fcst['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
weekly_fcst['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['created-by'] = 'etl-automation'
weekly_fcst['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['updated-by'] = 'etl-automation'
weekly_fcst.columns = [c.replace('_', '-') for c in
weekly_fcst.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc2-weekly-forecast',
schema=write_schema)
columns = list(table_info['column_name'])
weekly_fcst = weekly_fcst[columns] # required column order
logger.info("Writing to table: ipc2-weekly-forecast")
s3.write_df_to_db(df=weekly_fcst,
table_name='ipc2-weekly-forecast',
db=rs_db_write, schema=write_schema)
# writing table ipc2-safety-stock
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc2-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc2-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc2-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc2-segmentation
seg_df['store_id'] = seg_df['store_id'].astype(int)
seg_df['drug_id'] = seg_df['drug_id'].astype(int)
seg_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
seg_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df['created-by'] = 'etl-automation'
seg_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df['updated-by'] = 'etl-automation'
seg_df.columns = [c.replace('_', '-') for c in seg_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc2-segmentation',
schema=write_schema)
columns = list(table_info['column_name'])
seg_df = seg_df[columns] # required column order
logger.info("Writing to table: ipc2-segmentation")
s3.write_df_to_db(df=seg_df,
table_name='ipc2-segmentation',
db=rs_db_write, schema=write_schema)
logger.info("All writes to RS-DB completed!")
# OUP OUTLIER CHECK
if outlier_check == 'Y':
logger.info("Outlier detection starts")
outlier_drugs, df_outliers, \
manual_doid_upd_df = check_oup_outlier(
safety_stock_df, store_id, reset_date, rs_db_read,
read_schema)
df_outliers_all = df_outliers_all.append(df_outliers)
manual_doid_upd_all = manual_doid_upd_all.append(manual_doid_upd_df)
else:
outlier_drugs = []
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in safety_stock_df.columns]
ss_data_upload = safety_stock_df.loc[
(safety_stock_df["order_upto_point"] > 0) &
(~safety_stock_df["drug_id"].isin(outlier_drugs))]
ss_data_upload = ss_data_upload[['store_id', 'drug_id',
'safety_stock', 'reorder_point', 'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
if isinstance(reset_store_ops, pd.DataFrame):
content_type = 74
object_id = reset_store_ops.loc[
reset_store_ops[
'store_id'] == store_id, 'object_id'].unique()
for obj in object_id:
request_body = {"object_id": int(obj),
"content_type": content_type}
api_response, _ = django.django_model_execution_log_create_api(
request_body)
reset_store_ops.loc[
reset_store_ops['object_id'] == obj,
'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"IPC code execution status: {status}")
return status, order_value_all, new_drug_entries, missed_entries,\
df_outliers_all, manual_doid_upd_all
```
# Pass Params
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
run_batch = "run_batch"
tot_batch = "tot_batch"
batch_stores = "batch_stores"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
django = Django()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
# Read params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=140)
# JOB EXCLUSIVE PARAMS
exclude_stores = args["exclude_stores"]
goodaid_ss_flag = args["goodaid_ss_flag"]
ga_inv_weight = args["ga_inv_weight"]
rest_inv_weight = args["rest_inv_weight"]
top_inv_weight = args["top_inv_weight"]
d_class_consolidation = args["d_class_consolidation"]
wh_gen_consolidation = args["wh_gen_consolidation"]
v5_active_flag = args["v5_active_flag"]
v6_active_flag = args["v6_active_flag"]
v6_type_list = args["v6_type_list"]
v6_ptr_cut_off = args["v6_ptr_cut_off"]
reset_date = args["reset_date"]
reset_stores = args["reset_stores"]
v3_active_flag = args["v3_active_flag"]
v4_active_flag = args["v4_active_flag"]
corrections_selling_probability_cutoff = args["corrections_selling_probability_cutoff"]
corrections_cumulative_probability_cutoff = args["corrections_cumulative_probability_cutoff"]
drug_type_list_v4 = args["drug_type_list_v4"]
omit_npi = args["omit_npi"]
keep_all_generic_comp = args["keep_all_generic_comp"]
outlier_check = args["outlier_check"]
open_po_turbhe_active = args["open_po_turbhe_active"]
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# only stores aged > 3 months are eligible
store_id = stores.loc[dt.datetime.now() -
stores['opened_at'] >
dt.timedelta(days=90), 'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']', ')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
scheduled_stores = reset_store_ops['store_id'].unique()
type_list = None
# for batch run
reset_stores = list(set(scheduled_stores).intersection(batch_stores))
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
# for batch run
reset_stores = list(set(reset_stores).intersection(batch_stores))
scheduled_stores = [0]
```
# Execute Main Function
```
""" calling the main function """
status, order_value_all, new_drug_entries, missed_entries, \
df_outliers_all, manual_doid_upd_all = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
v3_active_flag, v4_active_flag, v5_active_flag, v6_active_flag,
d_class_consolidation, wh_gen_consolidation, goodaid_ss_flag,
keep_all_generic_comp, omit_npi, ga_inv_weight, rest_inv_weight,
top_inv_weight, v6_type_list, v6_ptr_cut_off, open_po_turbhe_active,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
outlier_check, rs_db_read, rs_db_write, read_schema, write_schema,
s3, django, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
# Send Email Notification
```
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
df_outliers_all_uri = s3.save_df_to_s3(df_outliers_all,
file_name=f"df_outliers_all_{reset_date}.csv")
manual_doid_upd_all_uri = s3.save_df_to_s3(manual_doid_upd_all,
file_name=f"manual_doid_upd_all_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS (IPC-RUN STATUS)
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"IPC2.0 SS Reset: (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Batch: {str(run_batch)+'/'+str(tot_batch)}
Batch Stores: {batch_stores}
Reset Stores: {reset_stores}
Scheduled Stores: {scheduled_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
# SEND EMAIL ATTACHMENTS (OUTLIER WARNING)
outlier_count = df_outliers_all.shape[0]
if outlier_count > 0:
outlier_order_qty = df_outliers_all["to_order_quantity"].sum()
outlier_order_val = round(df_outliers_all["to_order_value"].sum(), 2)
outlier_stores = list(df_outliers_all["store_id"].unique())
email.send_email_file(
subject=f"IPC2.0 OUTLIER WARNING (SM-{env}) {reset_date}: "
f"Cases {outlier_count}",
mail_body=f"""
Batch: {str(run_batch)+'/'+str(tot_batch)}
Stores: {outlier_stores}
Cases: {outlier_count}
Order Quantity: {outlier_order_qty}
Order Value: {outlier_order_val}
Note: For the detected cases SS, ROP & OUP is set to 0.
Please verify and upload attached file using DOID-GLUE JOB.
""",
to_emails=email_to, file_uris=[df_outliers_all_uri,
manual_doid_upd_all_uri])
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/ipc2-ss-main/ipc2_ss_main.ipynb | ipc2_ss_main.ipynb |
```
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from dateutil.relativedelta import relativedelta
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
import pandas as pd
import numpy as np
import dateutil
import datetime
from datetime import timedelta
from dateutil.tz import gettz
import gc
from zeno_etl_libs.django.api import Sql
import typing
from functools import reduce
from sklearn.preprocessing import StandardScaler
env = 'dev'
datem = ''
sqlwrite = 'yes'
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'drug-grades'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if datem=='':
date1=date1 = datetime.date.today().strftime('%Y-%m-%d')
# logger.info('Entry taken from params',str(date1))
else:
date1=datem
# logger.info('Selecting the default date as the job run date',str(date1))
q_aa = f"""
select
"bill-id",
"patient-id" ,
"store-id" ,
"store-name" as "name",
"drug-id" ,
"drug-name" ,
"type" ,
"created-date" as "created-at" ,
NVL(sum(case when "bill-flag" = 'gross' then quantity end),
0) as "sold-quantity",
NVL(sum(case when "bill-flag" = 'return' then quantity end),
0) as "returned-quantity",
sum("net-quantity") as "quantity",
sum(rate) as "rate"
from
"prod2-generico"."sales"
where
datediff('day','{date1}',
"created-date") between -180 and -1
group by
"bill-id",
"patient-id" ,
"store-id" ,
"store-name",
"drug-id" ,
"drug-name" ,
"type" ,
"created-date"
having
sum("net-quantity")>0
"""
logger.info(q_aa)
df_aa = rs_db.get_df(q_aa)
df_aa.columns = [c.replace('-', '_') for c in df_aa.columns]
# logger.info('Shape of data',str(df_aa.shape))
df_aa['quantity'].fillna(0,inplace=True)
df_aa['rate'].fillna(0,inplace=True)
df_aa['value'] = df_aa['rate'] * df_aa['quantity']
# =============================================================================
# Store opened at
# =============================================================================
q_bb = """
SELECT
"id",
datediff('day' ,
"opened-at",
'{}') as "age"
FROM
"prod2-generico"."stores"
WHERE
datediff('day' ,
"opened-at",
'{}' ) < 180
""".format(date1,date1)
df_bb = rs_db.get_df(q_bb)
df_bb.columns = [c.replace('-', '_') for c in df_bb.columns]
logger.info('Shape of stores data:{}'.format(str(df_bb.shape)))
def store_age(df_bb):
if df_bb['age'] >= 90:
return '3-6 month'
else:
return '1-3 month'
df_bb['age1'] = df_bb.apply(lambda x: store_age(x), axis=1)
# =============================================================================
# quantity sold
# =============================================================================
df_qty = df_aa.groupby(['drug_id', 'store_id'])[['quantity']].sum().reset_index()
df_qty1 = df_aa.groupby(['drug_id'])[['quantity']].sum().reset_index()
# =============================================================================
# revenue
# =============================================================================
df_revenue = df_aa.groupby(['drug_id', 'store_id'])[['value']].sum().reset_index()
df_revenue1 = df_aa.groupby(['drug_id'])[['value']].sum().reset_index()
# =============================================================================
# no. of bills
# =============================================================================
df_bills = df_aa.groupby(['drug_id', 'store_id'])[['bill_id']].nunique().reset_index()
df_bills1 = df_aa.groupby(['drug_id'])[['bill_id']].nunique().reset_index()
# =============================================================================
# no. of consumers
# =============================================================================
df_consumers = df_aa.groupby(['drug_id', 'store_id'])[['patient_id']].nunique().reset_index()
df_consumers1 = df_aa.groupby(['drug_id'])[['patient_id']].nunique().reset_index()
df_aa['created_at'] = pd.to_datetime(df_aa['created_at'])
# =============================================================================
# no. of days sold
# =============================================================================
df_aa['days'] = df_aa['created_at'].dt.date
df_days = df_aa.groupby(['drug_id', 'store_id'])[['days']].nunique().reset_index()
df_days1 = df_aa.groupby(['drug_id'])[['days']].nunique().reset_index()
# =============================================================================
# recency (last sold)
# =============================================================================
days = timedelta(1)
period_end_d = pd.to_datetime(date1) - days
df_recency = df_aa.groupby(['drug_id', 'store_id'])[['created_at']].max().reset_index()
df_recency1 = df_aa.groupby(['drug_id'])[['created_at']].max().reset_index()
df_recency['recency'] = (pd.to_datetime(period_end_d) - df_recency['created_at']).dt.days
df_recency1['recency'] = (pd.to_datetime(period_end_d) - df_recency1['created_at']).dt.days
# =============================================================================
# merge all features
# =============================================================================
meg = [df_qty, df_revenue, df_bills, df_consumers, df_days, df_recency]
df_features = reduce(lambda left, right: pd.merge(left, right, on=[
'drug_id', 'store_id'], how='outer'), meg)
del (df_features['created_at'])
meg1 = [df_qty1, df_revenue1, df_bills1, df_consumers1, df_days1, df_recency1]
df_features1 = reduce(lambda left, right: pd.merge(left, right, on=[
'drug_id'], how='outer'), meg1)
del (df_features1['created_at'])
df_features = df_features1.append(df_features)
df_features['store_id'] = df_features['store_id'].fillna(999)
df_features = df_features.reset_index().drop('index', axis=1)
# =============================================================================
# creating standard scaler store wise
# =============================================================================
temp_normalise = df_features[['store_id', 'quantity', 'value', 'bill_id', 'patient_id', 'days', 'recency']]
class SklearnWrapper:
def __init__(self, transform: typing.Callable):
self.transform = transform
def __call__(self, df):
transformed = self.transform.fit_transform(df.values)
return pd.DataFrame(transformed, columns=df.columns, index=df.index)
# This one will apply any sklearn transform you pass into it to a group.
df_rescaled = (
temp_normalise.groupby('store_id')
.apply(SklearnWrapper(StandardScaler()))
.drop('store_id', axis=1)
)
temp2_normalise = df_rescaled
# =============================================================================
# importing pca_components and appling to scaled data set.
# =============================================================================
pca_file_name= 'drug_grades/pca_components.csv'
pca_file_path= s3.download_file_from_s3(file_name=pca_file_name)
pca_components=pd.read_csv(pca_file_path,delimiter=',')
# =============================================================================
# creating Euclidean Distance Caculator and applyin to nearest cluster
# =============================================================================
cluster_file_name= 'drug_grades/cluster_centers_1.csv'
pca_file_path= s3.download_file_from_s3(file_name=cluster_file_name)
cluster_centers_set=pd.read_csv(pca_file_path,delimiter=',')
cluster_centers_set = np.array(cluster_centers_set)
# Euclidean Distance Caculator
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
clusters = []
test = np.dot(np.array(temp2_normalise), (np.array(pca_components).T))
for i in range(len(test)):
distances = dist(np.array(test[i]), (cluster_centers_set))
cluster = np.argmin(distances)
clusters.append(cluster)
cluster_df = pd.DataFrame(clusters)
cluster_df.columns = ['final_cluster']
# =============================================================================
# Summary pivot 1
# =============================================================================
test_df = pd.DataFrame(test)
cluster_lvl_1 = pd.merge(test_df, cluster_df,
right_index=True, left_index=True)
cluster_lvl1_output = pd.merge(cluster_lvl_1, df_features, how='inner',
left_index=True, right_index=True)
cluster_lvl1_output_pivot = cluster_lvl1_output.groupby(['final_cluster', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(
drop=True)
cluster_lvl1_output_pivot.columns = ['_'.join(x) for x in
cluster_lvl1_output_pivot.columns.ravel()]
cluster_lvl1_output_pivot_name = 'drug_grades/{}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Uploading File to S3
s3.save_df_to_s3(df=cluster_lvl1_output_pivot, file_name=cluster_lvl1_output_pivot_name)
# =============================================================================
# # 2nd level
# =============================================================================
# =============================================================================
# Further split of large cluster
# =============================================================================
further_split_lvl2 = cluster_lvl1_output[cluster_lvl1_output['final_cluster'] == 0]
# change features here if needed
further_split_lvl2 = pd.DataFrame(further_split_lvl2[[0, 1, 2, 3]])
further_split_lvl2_mat = np.array(further_split_lvl2)
cluster2_file_name= 'drug_grades/cluster_centers_2.csv'
pca_file_path= s3.download_file_from_s3(file_name=cluster2_file_name)
cluster_centers_set2=pd.read_csv(pca_file_path,delimiter=',')
cluster_centers_set2 = np.array(cluster_centers_set2)
clusters_lvl2 = []
for i in range(len(further_split_lvl2)):
distances = dist((further_split_lvl2_mat[i]), (cluster_centers_set2))
clusterlvl2 = np.argmin(distances)
clusters_lvl2.append(clusterlvl2)
further_split_lvl2_df = pd.DataFrame(further_split_lvl2)
further_split_lvl2_df['final_cluster_lvl2'] = clusters_lvl2
# =============================================================================
# Summary pivot 2
# =============================================================================
cluster_lvl2_output = pd.merge(cluster_lvl1_output, further_split_lvl2_df[['final_cluster_lvl2']],
how='inner',
left_index=True, right_index=True)
cluster_lvl2_output_pivot = cluster_lvl2_output.groupby(['final_cluster_lvl2', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(
drop=True)
cluster_lvl2_output_pivot.columns = ['_'.join(x) for x in
cluster_lvl2_output_pivot.columns.ravel()]
cluster_lvl2_output_pivot_name = 'drug_grades/{}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Uploading File to S3
s3.save_df_to_s3(df=cluster_lvl2_output_pivot, file_name=cluster_lvl2_output_pivot_name)
# =============================================================================
# Final cluster
# =============================================================================
cluster_file = cluster_lvl1_output[cluster_lvl1_output['final_cluster'] != 0]
final_cluster_file = cluster_file.append(cluster_lvl2_output)
final_cluster_file['cluster'] = final_cluster_file['final_cluster'
].astype(str) + '_' + final_cluster_file['final_cluster_lvl2'].astype(str)
final_output_pivot = final_cluster_file.groupby(['cluster', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(drop=True)
final_output_pivot.columns = ['_'.join(x) for x in
final_output_pivot.columns.ravel()]
final_output_pivot['drug%'] = final_output_pivot['drug_id_count'
] / final_output_pivot['drug_id_count'].sum()
final_output_pivot['spend%'] = final_output_pivot['value_sum'
] / final_output_pivot['value_sum'].sum()
final_output_pivot['drug%']=final_output_pivot['drug%'].astype('float64')
final_output_pivot['spend%']=final_output_pivot['spend%'].astype('float64')
final_output_pivot['factor'] = final_output_pivot['spend%'] / final_output_pivot['drug%']
# =============================================================================
# cluster allocation
# =============================================================================
new_store = df_bb['id'].values
new_store1 = df_bb['id'][df_bb['age1'] == '3-6 month'].values
new_store2 = df_bb['id'][df_bb['age1'] == '1-3 month'].values
new_store1_cluster = final_cluster_file[final_cluster_file.store_id.isin(new_store1)]
new_store2_cluster = final_cluster_file[final_cluster_file.store_id.isin(new_store2)]
Enterprise_cluster = final_cluster_file[final_cluster_file.store_id == 999]
old_stores_cluster = final_cluster_file[(~final_cluster_file.store_id.isin(new_store)) &
(final_cluster_file.store_id != 999)]
new_store1_cluster.drop(['cluster'], axis=1, inplace=True)
new_store2_cluster.drop(['cluster'], axis=1, inplace=True)
new_store1_predict = pd.merge(new_store1_cluster, Enterprise_cluster[['drug_id', 'cluster']], how='left',
left_on='drug_id', right_on='drug_id')
for i in range(len(new_store2)):
Enterprise_temp = Enterprise_cluster.copy()
Enterprise_temp['new_store_id'] = new_store2[i]
if i == 0:
new_store2_predict_data = Enterprise_temp
else:
new_store2_predict_data = new_store2_predict_data.append(Enterprise_temp)
new_store2_predict = new_store2_predict_data
del new_store2_predict['store_id']
new_store2_predict = new_store2_predict.rename({'new_store_id': 'store_id'}, axis=1)
cluster_all = new_store1_predict.append(new_store2_predict)
cluster_all = cluster_all.append(Enterprise_cluster)
cluster_all = cluster_all.append(old_stores_cluster)
# =============================================================================
# Summary report
# =============================================================================
cluster_all_pivote = cluster_all.groupby(['cluster', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(drop=True)
cluster_all_pivote.columns = ['_'.join(x) for x in
cluster_all_pivote.columns.ravel()]
cluster_all_pivote['drug%'] = cluster_all_pivote['drug_id_count'
] / cluster_all_pivote['drug_id_count'].sum()
cluster_all_pivote['spend%'] = cluster_all_pivote['value_sum'
] / cluster_all_pivote['value_sum'].sum()
cluster_all_pivote['drug%'] = cluster_all_pivote['drug%'].astype('float64')
cluster_all_pivote['spend%'] = cluster_all_pivote['spend%'].astype('float64')
cluster_all_pivote['factor'] = cluster_all_pivote['spend%'
] / cluster_all_pivote['drug%']
cluster_all_pivote_name = 'drug_grades/{}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Uploading File to S3
s3.save_df_to_s3(df=cluster_all_pivote, file_name=cluster_all_pivote_name)
# =============================================================================
# Assigning Cluster
# =============================================================================
def assign_cluster(cluster_all):
if cluster_all['cluster'] == '1_nan':
return 'A1'
elif cluster_all['cluster'] == '2_nan':
return 'A1'
elif cluster_all['cluster'] == '4_nan':
return 'A2'
elif cluster_all['cluster'] == '0_2.0':
return 'B'
elif cluster_all['cluster'] == '3_nan':
return 'D'
elif cluster_all['cluster'] == '0_0.0':
return 'C'
elif cluster_all['cluster'] == '0_1.0':
return 'C'
else:
return cluster_all['cluster']
cluster_all['grade'] = cluster_all.apply(lambda row: assign_cluster(row), axis=1)
cluster_all_name = 'drug_grades/{}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Uploading File to S3
s3.save_df_to_s3(df=cluster_all, file_name=cluster_all_name)
cluster_all_pivote1 = cluster_all.groupby(['grade', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(drop=True)
cluster_all_pivote1.columns = ['_'.join(x) for x in
cluster_all_pivote1.columns.ravel()]
cluster_all_pivote1['drug%'] = cluster_all_pivote1['drug_id_count'
] / cluster_all_pivote1['drug_id_count'].sum()
cluster_all_pivote1['spend%'] = cluster_all_pivote1['value_sum'
] / cluster_all_pivote1['value_sum'].sum()
cluster_all_pivote1['drug%'] = cluster_all_pivote1['drug%'].astype('float64')
cluster_all_pivote1['spend%'] = cluster_all_pivote1['spend%'].astype('float64')
cluster_all_pivote1['factor'] = cluster_all_pivote1['spend%'
] / cluster_all_pivote1['drug%']
cluster_all_pivote1_name = 'drug_grades/{}.csv'.format(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Uploading File to S3
s3.save_df_to_s3(df=cluster_all_pivote1, file_name=cluster_all_pivote1_name)
final_data = cluster_all[['store_id', 'drug_id', 'grade']]
final_data['calculation_date'] = date1
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
final_data.columns = [c.replace('_', '-') for c in final_data.columns]
final_data['created-at'] = datetime.datetime.now()
final_data['store-id'] = final_data['store-id'].astype(int)
final_data['drug-id'] = final_data['drug-id'].astype(int)
s3.write_df_to_db(df=final_data[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
'''getting current grades and replacing them with new if changed'''
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
for store_id in final_data['store-id'].unique():
if sqlwrite == 'yes':
if store_id != 999:
current_grade_query = f'''
SELECT
id,
"store-id",
"drug-id",
"drug-grade"
FROM "prod2-generico"."drug-order-info-data"
WHERE "store-id" = {store_id}
'''
current_grade = rs_db.get_df(current_grade_query)
current_grade.columns = [c.replace('-', '_') for c in current_grade.columns]
current_grade.columns = list(map(
lambda s: str.replace(s, '-', '_'),
list(current_grade.columns.values)
))
final_data_store = final_data.loc[
final_data['store-id'] == store_id,
['store-id', 'drug-id', 'grade']]
final_data_store.columns = [c.replace('-', '_') for c in final_data_store.columns]
grade_joined = current_grade.merge(
final_data_store, on=['store_id', 'drug_id'], how='outer')
grade_joined.loc[grade_joined['grade'].isna(), 'grade'] = 'NA'
new_drug_entries = new_drug_entries.append(
grade_joined[grade_joined['id'].isna()])
grade_joined = grade_joined[~grade_joined['id'].isna()]
grade_joined['change_flag'] = np.where(
grade_joined['drug_grade'] == grade_joined['grade'],
'same', 'changed')
logger.info('Store ' + str(store_id))
logger.info('Total grades calculated' + str(final_data_store.shape[0]))
logger.info('Grades changed' + str(grade_joined[
grade_joined['change_flag'] == 'changed'].shape[0]))
grades_to_change = grade_joined.loc[
grade_joined['change_flag'] == 'changed',
['id', 'store_id', 'drug_id', 'grade']]
grades_to_change.columns = ['id', 'store_id', 'drug_id', 'drug_grade']
data_to_be_updated_list = list(
grades_to_change[['id', 'drug_grade']].apply(dict, axis=1))
sql = Sql()
sql.update(
{'table': 'DrugOrderInfoData',
'data_to_be_updated': data_to_be_updated_list}, logger
)
update_test_query = f'''
SELECT
"store-id",
"drug-id",
"drug-grade"
FROM "prod2-generico"."drug-order-info-data"
WHERE "store-id" = {store_id}
and "grade-updated-at" >= CURRENT_TIMESTAMP - INTERVAL '10 MINUTE'
and "grade-updated-at" < CURRENT_TIMESTAMP
'''
update_test = rs_db.get_df(update_test_query)
update_test.columns = [c.replace('-', '_') for c in update_test.columns]
update_test.columns = list(map(
lambda s: str.replace(s, '-', '_'),
list(update_test.columns.values)
))
update_test = grades_to_change.merge(
update_test, how='left', on=['store_id', 'drug_id'],
suffixes=('', '_updated'))
mismatch = update_test[
update_test['drug_grade'] != update_test['drug_grade_updated']]
missed_entries = missed_entries.append(mismatch)
logger.info('For store ' + str(store_id) + 'update mismatch count'
+ str(mismatch.shape[0]))
new_drug_entries_name = 'drug_grades/{}.csv'.format(new_drug_entries)
# Uploading File to S3
s3.save_df_to_s3(df=new_drug_entries[
['store_id', 'drug_id', 'drug_grade']], file_name=new_drug_entries_name)
missed_entries_name = 'drug_grades/{}.csv'.format(missed_entries)
# Uploading File to S3
s3.save_df_to_s3(df=missed_entries, file_name=missed_entries_name)
rs_db.close_connection()
rs_db_write.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/drug-grades/drug-grades.ipynb | drug-grades.ipynb |
```
!pip install -U pandasql
!pip install zeno_etl_libs==1.0.38
"""
Author:[email protected]
Purpose: Churn prediction
"""
import json
import os
from datetime import datetime as dt
from pickle import load, dump
import argparse
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, roc_curve, plot_roc_curve
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.tree import DecisionTreeClassifier
env = "dev"
email_to = ["[email protected]"]
re_train = 1
features = []
schema = "prod2-generico"
prod_write = 1
# params ( retrain or just predict from trained model )
if re_train:
end = str(dt.today().date())
start = str(dt.today().date() - relativedelta(months=7))
else:
end = str(dt.today().date())
start = str(dt.today().date() - relativedelta(months=6))
features = ['patient-id'] + features
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
#table_name = 'customer-behaviour-segment'
rs_db = DB()
rs_db.open_connection()
table_name = 'consumer-churn'
rs_db_write = DB(read_only=prod_write)
rs_db_write.open_connection()
s3 = S3(bucket_name='datascience-manager')
def seek():
""" get the data """
pass
def run_fun(rs_db, s3):
# write logic here
pass
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
logger.info(table_info)
read_schema = 'prod2-generico'
if isinstance(table_info, type(None)):
print(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."consumer-churn"
WHERE
DATE("created-at") = '{dt.now().date()}';"""
logger.info(truncate_query)
rs_db_write.execute(truncate_query)
# data_fetching
data_q = f"""
select
rm."patient-id" ,
rm.id as "bill-id",
rm."bill-date",
rm."created-at" as "bill-created-at",
rm."total-spend",
rm."spend-generic",
rm."value-segment",
rm."system-age-days" as "system-age"
from
"{read_schema}"."retention-master" rm
where
rm."bill-date" between '{start}' and '{end}';
"""
data = rs_db.get_df(data_q)
def dataframe_creator(bill_data, re_train, features=[]) :
# Preparing dataset
data['bill-date'] = pd.to_datetime(data['bill-date'])
if re_train:
df_end = str(dt.today().date() + relativedelta(months=-1))
else:
df_end = str(dt.today().date())
df = data[(data['bill-date'] < df_end)]
df = df[['patient-id', 'bill-id', 'bill-date', 'bill-created-at', 'total-spend',
'spend-generic', 'value-segment', 'system-age']]
logger.info(df.info())
# data type correction
df['total-spend'] = df['total-spend'].astype(float)
df['spend-generic'] = df['spend-generic'].astype(float)
# total spend and ABV in 6 months
df2_nob_spend = df.groupby('patient-id', as_index=False).agg({'bill-id': 'nunique',
'total-spend': ['mean', 'sum'],
'spend-generic': 'sum'})
df2_nob_spend['generic-pc'] = df2_nob_spend[('spend-generic', 'sum')] / df2_nob_spend[('total-spend', 'sum')]
df2_nob_spend = df2_nob_spend.drop(columns=[('spend-generic', 'sum')])
df2_nob_spend.columns = ['patient-id', 'nob', 'abv', 'total-spend', 'generic-pc']
df3_rec_bill = df.sort_values('bill-created-at', ascending=False)
df3_rec_bill = df3_rec_bill.groupby('patient-id', as_index=False).head(1)
df3_rec_bill['value-segment'] = df3_rec_bill['value-segment'].map({'platinum': 4,
'gold': 3,
'silver': 2,
'others': 1})
df3_rec_bill.loc[:, 'today'] = df_end
df3_rec_bill['recency'] = pd.to_datetime(df3_rec_bill['today']) - df3_rec_bill['bill-date']
df3_rec_bill['recency'] = df3_rec_bill['recency'].dt.days
df3_rec_bill = df3_rec_bill[['patient-id', 'recency', 'system-age', 'value-segment']]
df4_bill_diff = df[['bill-id', 'patient-id', 'bill-date']]
df4_bill_diff = df4_bill_diff.sort_values('bill-date')
df4_bill_diff['shifted-date'] = df4_bill_diff.groupby('patient-id', as_index=False)['bill-date'].shift(1)
df4_bill_diff['bill-diff'] = df4_bill_diff['bill-date'] - df4_bill_diff['shifted-date']
df4_bill_diff['bill-diff'] = df4_bill_diff['bill-diff'].dt.days
df4_bill_diff['bill-diff'] = df4_bill_diff.groupby('patient-id', as_index=False)['bill-diff'].backfill()
df4_bill_diff = df4_bill_diff.groupby('patient-id', as_index=False).agg({'bill-diff': ['mean', 'std']})
df4_bill_diff = df4_bill_diff.fillna(0)
df4_bill_diff.columns = ['patient-id', 'mean-purchase-interval', 'std-purchase-interval']
final_df = pd.merge(df2_nob_spend, df3_rec_bill, on='patient-id', how='left')
final_df = pd.merge(final_df, df4_bill_diff, on='patient-id', how='left')
for i in final_df.columns:
if final_df[i].dtype == 'float64':
final_df[i] = final_df[i].round(4)
if re_train:
train_label = data[(data['bill-date'] >= df_end)]
train_label = train_label[['patient-id']].drop_duplicates()
train_label['churn'] = 0
final_df = pd.merge(final_df, train_label, on='patient-id', how='left')
final_df['churn'] = final_df['churn'].fillna(1)
final_df = final_df.drop_duplicates(subset='patient-id')
else:
final_df = final_df.drop_duplicates(subset='patient-id')
final_df = final_df[features]
final_df = final_df.dropna()
return final_df
def find_optimal_cutoff(target, predicted):
""" Find the optimal probability cutoff point for a classification model
----------
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
Returns
-------
list type, with optimal cutoff value
"""
fpr, tpr, threshold = roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame(
{'tf': pd.Series(tpr - (1 - fpr), index=i), 'threshold': pd.Series(threshold, index=i)})
roc_t = roc.iloc[(roc.tf - 0).abs().argsort()[:1]]
return list(roc_t['threshold'])
if re_train == 1 :
final_df = dataframe_creator(data, re_train=1, features=[])
else :
final_df = dataframe_creator(data, re_train=0, features=features)
if re_train:
y = final_df[['churn']]
X = final_df.drop(columns=['churn', 'patient-id'])
# train-test split
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.20,
random_state=0,
stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_train,
y_train,
test_size=0.20,
random_state=0,
stratify=y_train)
# Baseline DecisionTree Model
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
dtc_report = classification_report(y_val, dtc.predict(X_val))
logger.info('decision_tree baseline model classification report validation data : '
'{}'.format(dtc_report))
# extracting all alphas
alphas = dtc.cost_complexity_pruning_path(X_train, y_train)
alphas = alphas['ccp_alphas']
# finding best alphas
g_search = GridSearchCV(dtc,
param_grid={'ccp_alpha': list(set(alphas.round(6)))},
cv=5,
scoring='roc_auc',
n_jobs=-1,
verbose=0)
# fit the grid search to the data
g_search.fit(X_train, y_train)
# putting best params in DT
dtc = DecisionTreeClassifier(**g_search.best_params_)
dtc.fit(X_train, y_train)
# bp = best params
dtc_report_bp_train = classification_report(y_train, dtc.predict(X_train))
dtc_report_bp_val = classification_report(y_val, dtc.predict(X_val))
logger.info('decision_tree tuned model classification report train : '
'{}'.format(dtc_report_bp_train))
logger.info('decision_tree tuned model classification report validation : '
'{}'.format(dtc_report_bp_val))
ft_imp = pd.DataFrame(data=dtc.feature_importances_,
index=X_train.columns).sort_values(0, ascending=False)
ft_imp[1] = ft_imp[0].cumsum()
# feature selection
feat_selection = ft_imp[ft_imp[1] < 0.90]
if len(feat_selection) <= 5:
feat = ft_imp.index[:5]
else:
feat = feat_selection.index
X_train = X_train[feat]
X_test = X_test[feat]
X_val = X_val[feat]
X = X[feat]
logger.info('feature selected : {}'.format(feat))
# Taking best params from DT
depth = np.linspace(dtc.get_depth() / 2, dtc.get_depth(), 5).round()
alpha = dtc.ccp_alpha
# Create the parameter grid based on the results of best decision tree
param_grid = {
'bootstrap': [True],
'max_depth': depth,
'max_features': ["sqrt", "log2"],
'ccp_alpha': [alpha],
'n_estimators': [25, 50, 75, 100, 150, 200, 250]
}
# Create a based model
rf = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=rf, param_grid=param_grid,
cv=5, n_jobs=-1, verbose=0, scoring='roc_auc')
grid_search.fit(X_train, y_train)
rf = RandomForestClassifier(**grid_search.best_params_)
rf.fit(X_train, y_train)
# classification report
rf_report_bp_train = classification_report(y_train, rf.predict(X_train))
rf_report_bp_val = classification_report(y_val, rf.predict(X_val))
rf_report_bp_test = classification_report(y_test, rf.predict(X_test))
logger.info('random_forest tuned model classification report train : '
'{}'.format(rf_report_bp_train))
logger.info('random_forest tuned model classification report validation : '
'{}'.format(rf_report_bp_val))
logger.info('random_forest tuned model classification report test : '
'{}'.format(rf_report_bp_test))
cutoff = find_optimal_cutoff(y_train, rf.predict_proba(X_train)[:, 1])[0]
# Train data
#plot_roc_curve(rf, X_train, y_train)
# plt.savefig(output_dir_path + 'roc_curve_Train.png')
logger.info('optimal cutoff value : {}'.format(round(cutoff, 3)))
# Validation data
#plot_roc_curve(rf, X_val, y_val)
# plt.savefig(output_dir_path + 'roc_curve_Val.png')
# Test data
#plot_roc_curve(rf, X_test, y_test)
# plt.savefig(output_dir_path + 'roc_curve_Test.png')
# # Saving model
# dump(rf, open(output_dir_path + 'model.pkl', 'wb'))
# script_manager_obj.s3_admin_obj.upload_object(output_dir_path + 'model.pkl',
# f'data/Job-{script_manager_obj.job_id}/input/model.pkl')
if re_train:
final_df = dataframe_creator(data, re_train=0, features=feat)
final_df['churn-prob'] = rf.predict_proba(final_df)[:, 1]
final_df['churn-prediction'] = np.where(final_df['churn-prob'] >= cutoff, 1, 0)
final_df['created-at'] = dt.now().date()
final_df['re-trained'] = 1
else:
# script_manager_obj.s3_admin_obj.get_object(f'data/Job-{script_manager_obj.job_id}/input/model.pkl',
# input_dir_path)
# rf = load(open(input_dir_path + 'model.pkl', 'rb'))
pred = final_df.drop(columns=['patient-id'])
final_df['churn-prob'] = rf.predict_proba(pred)[:, 1]
final_df['churn-prediction'] = np.where(final_df['churn-prob'] >= job_data_params['cutoff'], 1, 0)
final_df['created-at'] = dt.now().date()
final_df['re-trained'] = 0
# Write to csv
s3.save_df_to_s3(df=final_df,
file_name='consumer_churn_prediction_{}.csv'.format(dt.today()), index=False)
# data type correction
final_df['churn'] = final_df['churn'].astype(int)
final_df['value-segment'] = final_df['value-segment'].astype(int)
# upload to db
s3.write_df_to_db(df=final_df[table_info['column_name']],
table_name=table_name, db=rs_db_write, schema=schema)
email = Email()
subject = "Task status churn calcualtion"
mail_body = "Churn data upload succeeded"
file_uris= []
email.send_email_file(subject=subject,
mail_body=mail_body,
to_emails=email_to, file_uris=file_uris, file_paths=[])
# for action_dict in actions_list:
# if action_dict['category'] == 'EML':
# to_emails = action_dict['email_to']
# subject = 'churn prediction algo status : {}'.format(status)
# mail_body = 'Table fetch from {} to {} '.format(start, end)
# if job_data_params['re_train']:
# file_paths = [output_dir_path + 'debug_{}.txt'.format(script_manager_obj.job_id),
# output_dir_path + 'roc_curve_Train.png',
# output_dir_path + 'roc_curve_Val.png',
# output_dir_path + 'roc_curve_Test.png']
# else:
# file_paths = [output_dir_path + 'debug_{}.txt'.format(script_manager_obj.job_id)]
# send_email_file(subject, mail_body, to_emails, file_paths)
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/consumer-churn/consumer-churn.ipynb | consumer-churn.ipynb |
```
!pip install -U pandasql
!pip install zeno_etl_libs==1.0.31
!pip install pymssql
"""
# Author - [email protected], [email protected]
# Purpose - script with DSS write action for customer behaviour (transactional) segment
"""
import os
import json
import argparse
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from datetime import datetime as dt, timedelta
import numpy as np
import pandas as pd
import gc
import pandasql as ps
from dateutil.tz import gettz
# Normalisation (Standardization)
def standardize(x_var, mean_x, std_x):
"""
Standardizing 'x' variable by it's mean and std provided
"""
return (x_var - mean_x) / std_x
def cluster_predict(data_matrix, centroids):
"""
Predict cluster number, from data matrix given
And centroids given
Just find nearest cluster for each data point
"""
clusters = []
for unit in data_matrix:
distances = []
for center in centroids:
dist = np.sum((unit - center) ** 2)
# print(dist)
distances.append(dist)
# print(distances)
closest_centroid = np.argmin(distances)
# print(closest_centroid)
clusters.append(closest_centroid)
return clusters
env = "dev"
email_to = ["[email protected]", "[email protected]"]
period_end_d_plus1 = "0"
schema = "public"
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
table_name = 'customer-behaviour-segment'
rs_db = DB()
rs_db.open_connection()
s3 = S3(bucket_name='datascience-manager')
def seek():
""" get the data """
pass
def run_fun(rs_db, s3):
# write logic here
pass
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
logger.info(table_info)
# Segment calculation date can either be fetched from db manager or from run-date
# Segment calculation date should be 1st of every month
try:
period_end_d_plus1 = str(dt.strptime(period_end_d_plus1, "%Y-%m-%d").date())
period_end_d_plus1 = period_end_d_plus1[:-3] + '-01'
except ValueError:
period_end_d_plus1 = dt.today().strftime('%Y-%m') + '-01'
read_schema = 'prod2-generico'
calc_year_month = dt.strptime(period_end_d_plus1, "%Y-%m-%d").strftime("%Y_%b")
# Period start date
period_start_d_ts = dt.strptime(period_end_d_plus1, '%Y-%m-%d') - timedelta(days=180)
period_start_d = period_start_d_ts.strftime('%Y-%m-%d')
# Period end date
period_end_d_ts = dt.strptime(period_end_d_plus1, '%Y-%m-%d') - timedelta(days=1)
period_end_d = period_end_d_ts.strftime('%Y-%m-%d')
###################################################
# Patients and bills in last 6 months
###################################################
data_q = f"""
select
s."patient-id",
count(distinct s."bill-id") as "num-bills-period",
min(date(s."created-at")) as "first-time-in-period",
max(date(s."created-at")) as "last-time-in-period",
count(distinct date(s."created-at")) as "num-days-visited",
sum(s.quantity) as "total-quantity-period",
sum(s."revenue-value") as "total-spend-period",
sum(case when s."type" in ('ethical', 'high-value-ethical') then quantity else 0 end) as "quantity-ethical",
sum(case when s."type" in ('generic', 'high-value-generic') then quantity else 0 end) as "quantity-generic",
sum(case when s."type" = 'surgical' then quantity else 0 end) as "quantity-surgical",
sum(case when s."type" = 'ayurvedic' then quantity else 0 end) as "quantity-ayurvedic",
sum(case when s."type" = 'general' then quantity else 0 end) as "quantity-general",
sum(case when s."type" = 'otc' then quantity else 0 end) as "quantity-otc",
sum(case when s."category" = 'chronic' then quantity else 0 end) as "quantity-chronic",
min(pm."first-bill-date") as "overall-min-bill-date",
min(pm."hd-min-bill-date") as "min-hd-creation-date"
from
"{read_schema}".sales s
left join "{read_schema}"."patients-metadata-2" pm on
s."patient-id" = pm.id
where
DATE(s."created-at") between '{period_start_d}' and '{period_end_d}'
and s."bill-flag" = 'gross'
group by
s."patient-id";
"""
bill_data = rs_db.get_df(query=data_q)
logger.info(f"fetched data - data-type {bill_data.info()}")
bill_data['first-time-in-period'] = pd.to_datetime(bill_data['first-time-in-period'])
bill_data['last-time-in-period'] = pd.to_datetime(bill_data['last-time-in-period'])
bill_data['total-spend-period'] = bill_data['total-spend-period'].astype(float)
bill_data[(bill_data['quantity-generic']==0)].sort_values('num-days-visited', ascending=False).head()
#26455527
# ###################################################
# # Bill level summary
# ###################################################
# bill_summ_q = """
# SELECT
# `bill-id`,
# `patient-id`,
# `bill-date`,
# SUM(quantity) AS `total-quantity-bill`,
# SUM(rate*quantity) AS `total-spend-bill`,
# SUM(CASE
# WHEN `drug-type` IN ('ethical','high-value-ethical') THEN quantity
# ELSE 0
# END) AS `quantity-ethical`,
# SUM(CASE
# WHEN `drug-type` IN ('generic','high-value-generic') THEN quantity
# ELSE 0
# END) AS `quantity-generic`,
# SUM(CASE
# WHEN `drug-type` = 'surgical' THEN quantity
# ELSE 0
# END) AS `quantity-surgical`,
# SUM(CASE
# when `drug-type` = 'ayurvedic' THEN quantity
# ELSE 0
# END) AS `quantity-ayurvedic`,
# SUM(CASE
# WHEN `drug-type` = 'general' THEN quantity
# ELSE 0
# END) AS `quantity-general`,
# SUM(CASE
# WHEN `drug-type` = 'otc' THEN quantity
# ELSE 0
# END) AS `quantity-otc`,
# SUM(CASE
# WHEN `drug-category` = 'chronic' THEN quantity
# ELSE 0
# END) AS `quantity-chronic`
# FROM
# data
# GROUP BY
# `bill-id`,
# `patient-id`,
# `bill-date`
# """
# bill_grp = ps.sqldf(bill_summ_q, locals())
# ###################################################
# # Patient level grouping
# ###################################################
# patient_summ_q = """
# SELECT
# `patient-id`,
# COUNT(distinct `bill-id`) AS `num-bills-period`,
# MIN(`bill-date`) AS `first-time-in-period`,
# MAX(`bill-date`) AS `last-time-in-period`,
# COUNT(DISTINCT `bill-date`) AS `num-days-visited`,
# SUM(`total-quantity-bill`) AS `total-quantity-period`,
# SUM(`total-spend-bill`) AS `total-spend-period`,
# SUM(`quantity-ethical`) AS `quantity-ethical`,
# SUM(`quantity-generic`) AS `quantity-generic`,
# SUM(`quantity-surgical`) AS `quantity-surgical`,
# SUM(`quantity-ayurvedic`) AS `quantity-ayurvedic`,
# SUM(`quantity-general`) AS `quantity-general`,
# SUM(`quantity-otc`) AS `quantity-otc`,
# SUM(`quantity-chronic`) AS `quantity-chronic`
# FROM
# bill_grp
# GROUP BY
# `patient-id`
# """
# patient_level = ps.sqldf(patient_summ_q, locals())
# ###################################################
# # Customer minimum bill date
# ###################################################
# acq_q = f"""
# SELECT
# "patient-id",
# MIN(DATE("created-at")) AS "overall-min-bill-date"
# FROM
# "{read_schema}"."bills-1"
# WHERE
# DATE("created-at") <= '{period_end_d}'
# GROUP BY
# "patient-id"
# """
# data_cc = rs_db.get_df(query=acq_q)
# data_cc['overall-min-bill-date'] = pd.to_datetime(data_cc['overall-min-bill-date'])
# ###################################################
# # HD customers
# ###################################################
# hd_q = f"""
# SELECT
# "patient-id",
# MIN(DATE("created-at")) AS "min-hd-creation-date"
# FROM
# "{read_schema}"."patients-store-orders"
# WHERE
# "order-type" = 'delivery'
# and DATE("created-at") <= '{period_end_d}'
# GROUP BY
# "patient-id"
# """
# data_hd = rs_db.get_df(query=hd_q)
# data_hd['min-hd-creation-date'] = pd.to_datetime(data_hd['min-hd-creation-date'])
# # Append this info
# data_merge = patient_level.merge(data_cc, how='left', on=['patient-id', 'patient-id'])
# data_merge = data_merge.merge(data_hd, how='left', on=['patient-id', 'patient-id'])
# Change data-sets names
data = bill_data.copy()
# for types_col in ['quantity-ethical', 'quantity-generic', 'quantity-surgical',
# 'quantity-ayurvedic', 'quantity-general', 'quantity-otc',
# 'quantity-chronic']:
# print(types_col + "-pc")
###################################################
# Derived features
###################################################
data['spend-per-bill'] = np.round(data['total-spend-period'] / data['num-bills-period'], 2)
data['units-per-bill'] = np.round(data['total-quantity-period'] / data['num-bills-period'], 2)
data['total-interaction-period'] = (pd.to_datetime(data['last-time-in-period']).dt.normalize()
- pd.to_datetime(data['first-time-in-period']).dt.normalize()
).dt.days
data['avg-purchase-interval'] = data['total-interaction-period'] / (data['num-days-visited'] - 1)
# Generico age is defined as last date in period, to date creation of customer
data['generico-age-customer'] = (pd.to_datetime(data['last-time-in-period']).dt.normalize()
- pd.to_datetime(data['overall-min-bill-date']).dt.normalize()
).dt.days
data['recency-customer'] = (pd.to_datetime(period_end_d).normalize()
- pd.to_datetime(data['last-time-in-period']).dt.normalize()
).dt.days
for types_col in ['quantity-ethical', 'quantity-generic', 'quantity-surgical',
'quantity-ayurvedic', 'quantity-general', 'quantity-otc',
'quantity-chronic']:
data[types_col + "-pc"] = data[types_col] / data['total-quantity-period']
data['chronic-yes'] = np.where(data['quantity-chronic-pc'] > 0, 1, 0)
###################################################
# Remove outliers - custom defined as of now
###################################################
data_for_mean_std = data[data['units-per-bill'] <= 50]
data_for_mean_std = data_for_mean_std[data_for_mean_std['spend-per-bill'] <= 10000]
data_for_mean_std = data_for_mean_std[data_for_mean_std['num-days-visited'] <= 52]
data_for_mean_std = data_for_mean_std[data_for_mean_std['num-bills-period'] <= 52]
###################################################
# Clustering is done for old repeat customers only, so
###################################################
old_c_period_end_d_ts = dt.strptime(period_end_d, '%Y-%m-%d') - timedelta(days=60)
old_c_period_end_d = old_c_period_end_d_ts.strftime('%Y-%m-%d')
data_for_mean_std = data_for_mean_std[
(pd.to_datetime(data_for_mean_std['overall-min-bill-date']) <= old_c_period_end_d) &
(data_for_mean_std['num-days-visited'] > 1)]
feature_names = ['num-days-visited', 'spend-per-bill', 'units-per-bill',
'total-interaction-period', 'avg-purchase-interval',
'generico-age-customer', 'recency-customer',
'quantity-ethical-pc', 'quantity-generic-pc',
'quantity-surgical-pc', 'quantity-ayurvedic-pc',
'quantity-general-pc', 'quantity-otc-pc', 'quantity-chronic-pc']
# feature_names
data_for_mean_std = data_for_mean_std[feature_names]
# Save mean and sd
mean_std_old_repeat_14f = pd.DataFrame(columns=['feature-name', 'mean', 'std'])
mean_std_old_repeat_14f['feature-name'] = data_for_mean_std.columns
for i in data_for_mean_std.columns:
data_i_mean = data_for_mean_std[i].mean()
data_i_std = data_for_mean_std[i].std()
mean_std_old_repeat_14f.loc[mean_std_old_repeat_14f['feature-name'] == i,
'mean'] = data_i_mean
mean_std_old_repeat_14f.loc[mean_std_old_repeat_14f['feature-name'] == i,
'std'] = data_i_std
###################################################
# Pre-processing starts here
###################################################
# Extra info appended
data['home-delivery-flag'] = np.where(data['min-hd-creation-date'] <= period_end_d,
'yes', 'no')
# HD flag for summarization purpose
data['hd-yes'] = np.where(data['home-delivery-flag'] == 'yes',
1, 0)
data['newcomer-flag'] = np.where(pd.to_datetime(data['overall-min-bill-date']) > old_c_period_end_d,
'newcomer', 'old_customer')
data['singletripper-flag'] = np.where(data['num-days-visited'] == 1,
'singletripper', 'repeat_customer')
data_superset = data.copy()
data_old_repeat = data[
(data['newcomer-flag'] == 'old_customer') &
(data['singletripper-flag'] == 'repeat_customer')].copy()
# Save this as main data
data = data_old_repeat.copy()
data = data[feature_names]
# Import mean and std per feature
mean_std_features = mean_std_old_repeat_14f.copy()
mean_std_features = mean_std_features[['feature-name', 'mean', 'std']]
# Standardization
for i in data.columns:
mean_i = list(mean_std_features.loc[mean_std_features['feature-name'] == i, 'mean'])[0]
std_i = list(mean_std_features.loc[mean_std_features['feature-name'] == i, 'std'])[0]
# Standardize
data[i + "-norm"] = standardize(data[i], mean_i, std_i)
# Keep only Standardized columns for modelling
norm_cols = [i for i in data.columns if i.endswith("-norm")]
data = data[norm_cols]
# Read PCA Components
pca_components = pd.read_csv(s3.download_file_from_s3('data/Job-6/input/pca_repeat_14f_10pca_94pc_variance.csv'))
pca_components.drop(columns=['Unnamed: 0'], inplace=True)
# Convert dataset to matrix form
data_mat = np.array(data)
# Convert PCA components to matrix form
pca_mat = np.array(pca_components).T
# Multiply data matrix to PCA matrix, to transform into PCA features
data_to_pca = np.matmul(data_mat, pca_mat)
# KMeans
# centroids import
kmeans_centroids = pd.read_csv(s3.download_file_from_s3('data/Job-6/input/kmeans_centroids_repeat_6c_14f_10pca.csv'))
kmeans_centroids.drop(columns=['Unnamed: 0'], inplace=True)
kmeans_centroids
# Convert centroids data-set to matrix form
kmeans_centroids_mat = np.array(kmeans_centroids)
###################################################
# Predict
###################################################
# Predict cluster number
cluster_no = cluster_predict(data_to_pca, kmeans_centroids_mat)
# Back to pandas data-set
data_final = data.copy()
data_final['cluster'] = cluster_no
data_merge = data_old_repeat.merge(data_final, how='inner', left_index=True,
right_index=True)
# To summarize on
summary_cols_median = ['num-days-visited', 'spend-per-bill',
'units-per-bill', 'total-interaction-period',
'avg-purchase-interval', 'generico-age-customer',
'recency-customer', 'quantity-ethical-pc',
'quantity-generic-pc', 'quantity-chronic-pc',
'total-spend-period'] # for info purpose
summary_cols_mean = summary_cols_median + ['chronic-yes', 'hd-yes']
median_agg_dict = {'num-days-visited': ['count', 'median'],
'spend-per-bill': 'median',
'units-per-bill': 'median',
'total-interaction-period': 'median',
'avg-purchase-interval': 'median',
'generico-age-customer': 'median',
'recency-customer': 'median',
'quantity-ethical-pc': 'median',
'quantity-generic-pc': 'median',
'quantity-chronic-pc': 'median',
'total-spend-period': ['median', 'sum']}
# Make it re-usable later on
mean_agg_dict = {'num-days-visited': ['count', 'mean'],
'spend-per-bill': 'mean',
'units-per-bill': 'mean',
'total-interaction-period': 'mean',
'avg-purchase-interval': 'mean',
'generico-age-customer': 'mean',
'recency-customer': 'mean',
'quantity-ethical-pc': 'mean',
'quantity-generic-pc': 'mean',
'quantity-chronic-pc': 'mean',
'total-spend-period': ['mean', 'sum'],
'chronic-yes': 'mean',
'hd-yes': 'mean'}
###################################################
# Profile summary of clusters
###################################################
# Mean profile
profile_data = data_merge[summary_cols_mean + ['cluster']].groupby(
['cluster']).agg(mean_agg_dict)
length_base_cluster = len(data_merge)
def profile_extra_cols(profile_data_pass, length_base_pass):
# Segment % share in data-set
profile_data_pass['count-pc'] = np.round(
profile_data_pass['num-days-visited']['count'] * 100 / length_base_pass)
# Round all numbers
profile_data_pass = np.round(profile_data_pass, 2)
return profile_data_pass
profile_data = profile_extra_cols(profile_data, length_base_cluster)
# Median profile
profile_data_med = data_merge[summary_cols_median + ['cluster']].groupby(
['cluster']).agg(median_agg_dict)
profile_data_med = profile_extra_cols(profile_data_med, length_base_cluster)
# Save both profile summaries (mean and median) to .csv
s3.save_df_to_s3(df=profile_data,
file_name='Behaviour_Segment_Output/profile_data_{}.csv'.format(calc_year_month),
index=False)
s3.save_df_to_s3(df=profile_data_med,
file_name='Behaviour_Segment_Output/profile_data_med_{}.csv'.format(calc_year_month),
index=False)
###################################################
# Name clusters
###################################################
data_merge['cluster-name'] = data_merge['cluster'].map({0: 'generic_heavy',
1: 'regular',
3: 'super',
5: 'ethical_heavy',
2: 'other_type',
4: 'other_type'})
# Patient_id wise, for all
data_superset_merge = data_superset.merge(data_merge[['patient-id', 'cluster-name']],
how='left',
on=['patient-id', 'patient-id'])
def assign_extra_segment(data_pass):
"""
Add segment names to segments not covered in clustering
"""
if (data_pass['newcomer-flag'] == 'newcomer' and
data_pass['singletripper-flag'] == 'repeat_customer'):
return 'newcomer_repeat'
elif (data_pass['newcomer-flag'] == 'newcomer' and
data_pass['singletripper-flag'] == 'singletripper'):
return 'newcomer_singletripper'
elif (data_pass['newcomer-flag'] == 'old_customer' and
data_pass['singletripper-flag'] == 'singletripper'):
return 'singletripper'
else:
return data_pass['cluster-name']
# Assign segment names for extra segments
data_superset_merge['behaviour-segment'] = data_superset_merge.apply(
lambda row: assign_extra_segment(row), axis=1)
data_superset_merge.groupby('behaviour-segment', as_index=False)['patient-id'].nunique()
###################################################
# Profiling all segment (Summary statistics for information)
###################################################
# Mean profile
profile_data_all = data_superset_merge[summary_cols_mean + ['behaviour-segment']].groupby(
['behaviour-segment']).agg(mean_agg_dict)
length_base_segment = len(data_superset_merge)
profile_data_all = profile_extra_cols(profile_data_all, length_base_segment)
# Median profile
profile_data_med_all = data_superset_merge[summary_cols_median + ['behaviour-segment']].groupby(
['behaviour-segment']).agg(median_agg_dict)
profile_data_med_all = profile_extra_cols(profile_data_med_all, length_base_segment)
# Save both profile summaries (mean and median) to .csv
profile_data_all = s3.save_df_to_s3(df=profile_data_all,
file_name='Behaviour_Segment_Output/profile_data_all_{}.csv'.format(calc_year_month),
index=False)
profile_data_med_all =s3.save_df_to_s3(df=profile_data_med_all,
file_name='Behaviour_Segment_Output/profile_data_med_all_{}.csv'.format(calc_year_month),
index=False)
# Save as .csv, the profile summary of each segment
for i in data_superset_merge['behaviour-segment'].unique():
segment_i = data_superset_merge[data_superset_merge['behaviour-segment'] == i]
logger.info(f'Length of {i} segment is {len(segment_i)}')
# Summarize
profile_i = segment_i[summary_cols_mean].describe()
s3.save_df_to_s3(df=profile_i,
file_name='profile_{}.csv'.format(i),
index=False)
# Now this data is source of truth
data = data_superset_merge.copy()
###################################################
# Assign unique store to patient
###################################################
patient_store_q = f"""
select
pm.id as "patient-id",
"primary-store-id" as "store-id",
s."name" as "store-name"
from
"{read_schema}"."patients-metadata-2" pm
left join "{read_schema}".stores s on
pm."primary-store-id" = s.id
where
DATEDIFF('days', '{period_end_d_plus1}', "last-bill-date") between -180 and -1;
"""
data_store = rs_db.get_df(query=patient_store_q)
# data_store['rank'] = data_store.sort_values(['store-bills', 'store-spend'],
# ascending=[False, False]
# ).groupby(['patient-id']).cumcount() + 1
# patient_store = data_store[data_store['rank'] == 1][['patient-id', 'store-id']]
# # Stores
# stores_q = f"""
# SELECT
# "id" AS "store-id",
# "name" AS "store-name"
# FROM
# "{read_schema}"."stores"
# """
# stores = rs_db.get_df(query=stores_q)
# patient_store = patient_store.merge(stores, how='inner',
# on=['store-id', 'store-id'])
data = data.merge(data_store, how='left', left_on=['patient-id'],
right_on=['patient-id'])
# Export data
keep_cols = ['patient-id', 'num-bills-period', 'total-spend-period',
'spend-per-bill', 'units-per-bill',
'generico-age-customer', 'recency-customer', 'quantity-ethical-pc',
'quantity-generic-pc', 'quantity-chronic-pc', 'chronic-yes', 'hd-yes',
'newcomer-flag', 'singletripper-flag', 'behaviour-segment',
'store-id', 'store-name']
write_data = data[keep_cols]
# Round some numbers
for i in ['quantity-ethical-pc', 'quantity-generic-pc', 'quantity-chronic-pc']:
write_data[i] = np.round(write_data[i], 2)
for i in ['total-spend-period', 'spend-per-bill']:
write_data[i] = np.round(write_data[i], 2)
write_data = write_data.rename(columns={'units-per-bill': 'quantity-per-bill'})
# Make some columns for logging purpose
runtime_date = dt.today().strftime('%Y-%m-%d')
runtime_month = dt.today().strftime('%Y-%m')
write_data['segment-calculation-date'] = period_end_d_plus1
write_data['upload-date'] = runtime_date
write_data['base-list-identifier'] = runtime_month
#data-type correction
write_data['generico-age-customer'] = write_data['generico-age-customer'].fillna(0)
write_data['store-id'] = write_data['store-id'].fillna(0)
write_data['store-id'] = write_data['store-id'].astype(int)
logger.info(write_data.info())
# etl
write_data['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
write_data['created-by'] = 'etl-automation'
write_data['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
write_data['updated-by'] = 'etl-automation'
if isinstance(table_info, type(None)):
print(f"table: {table_name} do not exist")
else:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}"
WHERE "segment-calculation-date" = '{period_end_d_plus1}'
'''
logger.info(truncate_query)
rs_db.execute(truncate_query)
# drop duplicates subset - patient-id
write_data.drop_duplicates(subset=['patient-id'], inplace=True)
s3.save_df_to_s3(df=write_data,
file_name='Behaviour_Segment_Output/behaviour_segment_data_{}.csv'.format(calc_year_month), index=False)
###################################################
# Append this updated_churn to Redshift DB
###################################################
s3.write_df_to_db(df=write_data[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
email = Email()
subject = "Task Status behaviour segment calculation"
mail_body = "Behaviour segments upload succeeded"
file_uris= [profile_data_all, profile_data_med_all]
email.send_email_file(subject=subject,
mail_body=mail_body,
to_emails=email_to, file_uris=file_uris, file_paths=[])
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/behaviour-segment/behaviour-segment.ipynb | behaviour-segment.ipynb |
import argparse
import sys
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import gc
import pandasql as ps
from datetime import datetime as dt
# Normalisation (Standardization)
def standardize(x_var, mean_x, std_x):
"""
Standardizing 'x' variable by it's mean and std provided
"""
return (x_var - mean_x) / std_x
def cluster_predict(data_matrix, centroids):
"""
Predict cluster number, from data matrix given
And centroids given
Just find nearest cluster for each data point
"""
clusters = []
for unit in data_matrix:
distances = []
for center in centroids:
dist = np.sum((unit - center) ** 2)
# print(dist)
distances.append(dist)
# print(distances)
closest_centroid = np.argmin(distances)
# print(closest_centroid)
clusters.append(closest_centroid)
return clusters
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default=["[email protected]", "[email protected]"], type=str,
required=False)
parser.add_argument('-pedp', '--period_end_d_plus1', default="0", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
period_end_d_plus1 = args.period_end_d_plus1
logger = get_logger()
logger.info(f"env: {env}")
schema = 'prod2-generico'
table_name = 'customer-behaviour-segment-test'
rs_db = DB()
rs_db.open_connection()
s3 = S3(bucket_name='datascience-manager')
def seek():
""" get the data """
pass
def run_fun(rs_db, s3):
# write logic here
pass
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
logger.info(table_info)
# Segment calculation date can either be fetched from db manager or from run-date
try:
period_end_d_plus1 = args.period_end_d_plus1
period_end_d_plus1 = str(datetime.strptime(period_end_d_plus1, "%Y-%m-%d").date())
except ValueError:
period_end_d_plus1 = datetime.today().strftime('%Y-%m-%d')
if isinstance(table_info, type(None)):
print(f"table: {table_name} do not exist")
else:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}"
WHERE "segment-calculation-date" = '{period_end_d_plus1}'
'''
logger.info(truncate_query)
rs_db.execute(truncate_query)
read_schema = 'prod2-generico'
calc_year_month = datetime.strptime(period_end_d_plus1, "%Y-%m-%d").strftime("%Y_%b")
# Period start date
period_start_d_ts = datetime.strptime(period_end_d_plus1, '%Y-%m-%d') - timedelta(days=180)
period_start_d = period_start_d_ts.strftime('%Y-%m-%d')
# Period end date
period_end_d_ts = datetime.strptime(period_end_d_plus1, '%Y-%m-%d') - timedelta(days=1)
period_end_d = period_end_d_ts.strftime('%Y-%m-%d')
###################################################
# Patients and bills in last 6 months
###################################################
bills_q = f"""
SELECT
"id" AS "bill-id",
"patient-id",
DATE("created-at") AS "bill-date"
FROM
"{read_schema}"."bills-1"
WHERE
DATE("created-at") between '{period_start_d}' and '{period_end_d}'
"""
data_bill = rs_db.get_df(query=bills_q)
###################################################
# Bill-items in last 6months
###################################################
bi_q = f"""
SELECT
"bill-id",
"inventory-id",
"rate",
"quantity"
FROM
"{read_schema}"."bill-items-1"
WHERE
DATE("created-at") between '{period_start_d}' and '{period_end_d}'
"""
data_billitem = rs_db.get_df(query=bi_q)
###################################################
# Inventory data
###################################################
inv_q = f"""
SELECT
"id" AS "inventory-id",
"drug-id"
FROM
"{read_schema}"."inventory-1"
GROUP BY
"id",
"drug-id"
"""
data_inventory = rs_db.get_df(query=inv_q)
###################################################
# Drugs
###################################################
drugs_q = f"""
SELECT
"id" AS "drug-id",
"type" AS "drug-type",
"category" AS "drug-category",
"is-repeatable",
"repeatability-index"
FROM
"{read_schema}"."drugs"
"""
data_drugs = rs_db.get_df(query=drugs_q)
# Merge these data-frames
data = data_bill.merge(data_billitem, how='inner', on=['bill-id'])
data = data.merge(data_inventory, how='left', on=['inventory-id'])
data = data.merge(data_drugs, how='left', on=['drug-id'])
# Delete temp data-frames
del [[data_bill, data_billitem,
data_inventory, data_drugs]]
gc.collect()
data_bill = pd.DataFrame()
data_billitem = pd.DataFrame()
data_inventory = pd.DataFrame()
data_drugs = pd.DataFrame()
data['bill-date'] = pd.to_datetime(data['bill-date']).dt.date
data['rate'] = data['rate'].astype(float)
###################################################
# Bill level summary
###################################################
bill_summ_q = """
SELECT
`bill-id`,
`patient-id`,
`bill-date`,
SUM(quantity) AS `total-quantity-bill`,
SUM(rate*quantity) AS `total-spend-bill`,
SUM(CASE
WHEN `drug-type` IN ('ethical','high-value-ethical') THEN quantity
ELSE 0
END) AS `quantity-ethical`,
SUM(CASE
WHEN `drug-type` IN ('generic','high-value-generic') THEN quantity
ELSE 0
END) AS `quantity-generic`,
SUM(CASE
WHEN `drug-type` = 'surgical' THEN quantity
ELSE 0
END) AS `quantity-surgical`,
SUM(CASE
when `drug-type` = 'ayurvedic' THEN quantity
ELSE 0
END) AS `quantity-ayurvedic`,
SUM(CASE
WHEN `drug-type` = 'general' THEN quantity
ELSE 0
END) AS `quantity-general`,
SUM(CASE
WHEN `drug-type` = 'otc' THEN quantity
ELSE 0
END) AS `quantity-otc`,
SUM(CASE
WHEN `drug-category` = 'chronic' THEN quantity
ELSE 0
END) AS `quantity-chronic`
FROM
data
GROUP BY
`bill-id`,
`patient-id`,
`bill-date`
"""
bill_grp = ps.sqldf(bill_summ_q, locals())
###################################################
# Patient level grouping
###################################################
patient_summ_q = """
SELECT
`patient-id`,
COUNT(distinct `bill-id`) AS `num-bills-period`,
MIN(`bill-date`) AS `first-time-in-period`,
MAX(`bill-date`) AS `last-time-in-period`,
COUNT(DISTINCT `bill-date`) AS `num-days-visited`,
SUM(`total-quantity-bill`) AS `total-quantity-period`,
SUM(`total-spend-bill`) AS `total-spend-period`,
SUM(`quantity-ethical`) AS `quantity-ethical`,
SUM(`quantity-generic`) AS `quantity-generic`,
SUM(`quantity-surgical`) AS `quantity-surgical`,
SUM(`quantity-ayurvedic`) AS `quantity-ayurvedic`,
SUM(`quantity-general`) AS `quantity-general`,
SUM(`quantity-otc`) AS `quantity-otc`,
SUM(`quantity-chronic`) AS `quantity-chronic`
FROM
bill_grp
GROUP BY
`patient-id`
"""
patient_level = ps.sqldf(patient_summ_q, locals())
###################################################
# Customer minimum bill date
###################################################
acq_q = f"""
SELECT
"patient-id",
MIN(DATE("created-at")) AS "overall-min-bill-date"
FROM
"{read_schema}"."bills-1"
WHERE
DATE("created-at") <= '{period_end_d}'
GROUP BY
"patient-id"
"""
data_cc = rs_db.get_df(query=acq_q)
data_cc['overall-min-bill-date'] = pd.to_datetime(data_cc['overall-min-bill-date'])
###################################################
# HD customers
###################################################
hd_q = f"""
SELECT
"patient-id",
MIN(DATE("created-at")) AS "min-hd-creation-date"
FROM
"{read_schema}"."patients-store-orders"
WHERE
"order-type" = 'delivery'
and DATE("created-at") <= '{period_end_d}'
GROUP BY
"patient-id"
"""
data_hd = rs_db.get_df(query=hd_q)
data_hd['min-hd-creation-date'] = pd.to_datetime(data_hd['min-hd-creation-date'])
# Append this info
data_merge = patient_level.merge(data_cc, how='left', on=['patient-id', 'patient-id'])
data_merge = data_merge.merge(data_hd, how='left', on=['patient-id', 'patient-id'])
# Change data-sets names
data = data_merge.copy()
for types_col in ['quantity-ethical', 'quantity-generic', 'quantity-surgical',
'quantity-ayurvedic', 'quantity-general', 'quantity-otc',
'quantity-chronic']:
logger.info(types_col + "-pc")
###################################################
# Derived features
###################################################
data['spend-per-bill'] = np.round(data['total-spend-period'] / data['num-bills-period'], 2)
data['units-per-bill'] = np.round(data['total-quantity-period'] / data['num-bills-period'], 2)
data['total-interaction-period'] = (pd.to_datetime(data['last-time-in-period']).dt.normalize()
- pd.to_datetime(data['first-time-in-period']).dt.normalize()
).dt.days
data['avg-purchase-interval'] = data['total-interaction-period'] / (data['num-days-visited'] - 1)
# Generico age is defined as last date in period, to date creation of customer
data['generico-age-customer'] = (pd.to_datetime(data['last-time-in-period']).dt.normalize()
- pd.to_datetime(data['overall-min-bill-date']).dt.normalize()
).dt.days
data['recency-customer'] = (pd.to_datetime(period_end_d).normalize()
- pd.to_datetime(data['last-time-in-period']).dt.normalize()
).dt.days
for types_col in ['quantity-ethical', 'quantity-generic', 'quantity-surgical',
'quantity-ayurvedic', 'quantity-general', 'quantity-otc',
'quantity-chronic']:
data[types_col + "-pc"] = data[types_col] / data['total-quantity-period']
data['chronic-yes'] = np.where(data['quantity-chronic-pc'] > 0, 1, 0)
###################################################
# Remove outliers - custom defined as of now
###################################################
data_for_mean_std = data[data['units-per-bill'] <= 50]
data_for_mean_std = data_for_mean_std[data_for_mean_std['spend-per-bill'] <= 10000]
data_for_mean_std = data_for_mean_std[data_for_mean_std['num-days-visited'] <= 52]
data_for_mean_std = data_for_mean_std[data_for_mean_std['num-bills-period'] <= 52]
###################################################
# Clustering is done for old repeat customers only, so
###################################################
old_c_period_end_d_ts = datetime.strptime(period_end_d, '%Y-%m-%d') - timedelta(days=60)
old_c_period_end_d = old_c_period_end_d_ts.strftime('%Y-%m-%d')
data_for_mean_std = data_for_mean_std[
(pd.to_datetime(data_for_mean_std['overall-min-bill-date']) <= old_c_period_end_d) &
(data_for_mean_std['num-days-visited'] > 1)]
feature_names = ['num-days-visited', 'spend-per-bill', 'units-per-bill',
'total-interaction-period', 'avg-purchase-interval',
'generico-age-customer', 'recency-customer',
'quantity-ethical-pc', 'quantity-generic-pc',
'quantity-surgical-pc', 'quantity-ayurvedic-pc',
'quantity-general-pc', 'quantity-otc-pc', 'quantity-chronic-pc']
# feature_names
data_for_mean_std = data_for_mean_std[feature_names]
# Save mean and sd
mean_std_old_repeat_14f = pd.DataFrame(columns=['feature-name', 'mean', 'std'])
mean_std_old_repeat_14f['feature-name'] = data_for_mean_std.columns
for i in data_for_mean_std.columns:
data_i_mean = data_for_mean_std[i].mean()
data_i_std = data_for_mean_std[i].std()
mean_std_old_repeat_14f.loc[mean_std_old_repeat_14f['feature-name'] == i,
'mean'] = data_i_mean
mean_std_old_repeat_14f.loc[mean_std_old_repeat_14f['feature-name'] == i,
'std'] = data_i_std
###################################################
# Pre-processing starts here
###################################################
# Extra info appended
data['home-delivery-flag'] = np.where(data['min-hd-creation-date'] <= period_end_d,
'yes', 'no')
# HD flag for summarization purpose
data['hd-yes'] = np.where(data['home-delivery-flag'] == 'yes',
1, 0)
data['newcomer-flag'] = np.where(pd.to_datetime(data['overall-min-bill-date']) > old_c_period_end_d,
'newcomer', 'old-customer')
data['singletripper-flag'] = np.where(data['num-days-visited'] == 1,
'singletripper', 'repeat-customer')
data_superset = data.copy()
data_old_repeat = data[
(data['newcomer-flag'] == 'old-customer') &
(data['singletripper-flag'] == 'repeat-customer')].copy()
# Save this as main data
data = data_old_repeat.copy()
data = data[feature_names]
# Import mean and std per feature
mean_std_features = mean_std_old_repeat_14f.copy()
mean_std_features = mean_std_features[['feature-name', 'mean', 'std']]
# Standardization
for i in data.columns:
mean_i = list(mean_std_features.loc[mean_std_features['feature-name'] == i, 'mean'])[0]
std_i = list(mean_std_features.loc[mean_std_features['feature-name'] == i, 'std'])[0]
# Standardize
data[i + "-norm"] = standardize(data[i], mean_i, std_i)
# Keep only Standardized columns for modelling
norm_cols = [i for i in data.columns if i.endswith("-norm")]
data = data[norm_cols]
# Read PCA Components
pca_components = pd.read_csv(s3.download_file_from_s3('data/Job-6/input/pca_repeat_14f_10pca_94pc_variance.csv'))
pca_components.drop(columns=['Unnamed: 0'], inplace=True)
# Convert dataset to matrix form
data_mat = np.array(data)
# Convert PCA components to matrix form
pca_mat = np.array(pca_components).T
# Multiply data matrix to PCA matrix, to transform into PCA features
data_to_pca = np.matmul(data_mat, pca_mat)
# KMeans
# centroids import
kmeans_centroids = pd.read_csv(s3.download_file_from_s3('data/Job-6/input/kmeans_centroids_repeat_6c_14f_10pca.csv'))
kmeans_centroids.drop(columns=['Unnamed: 0'], inplace=True)
# Convert centroids data-set to matrix form
kmeans_centroids_mat = np.array(kmeans_centroids)
###################################################
# Predict
###################################################
# Predict cluster number
cluster_no = cluster_predict(data_to_pca, kmeans_centroids_mat)
# Back to pandas data-set
data_final = data.copy()
data_final['cluster'] = cluster_no
data_merge = data_old_repeat.merge(data_final, how='inner', left_index=True,
right_index=True)
# To summarize on
summary_cols_median = ['num-days-visited', 'spend-per-bill',
'units-per-bill', 'total-interaction-period',
'avg-purchase-interval', 'generico-age-customer',
'recency-customer', 'quantity-ethical-pc',
'quantity-generic-pc', 'quantity-chronic-pc',
'total-spend-period'] # for info purpose
summary_cols_mean = summary_cols_median + ['chronic-yes', 'hd-yes']
median_agg_dict = {'num-days-visited': ['count', 'median'],
'spend-per-bill': 'median',
'units-per-bill': 'median',
'total-interaction-period': 'median',
'avg-purchase-interval': 'median',
'generico-age-customer': 'median',
'recency-customer': 'median',
'quantity-ethical-pc': 'median',
'quantity-generic-pc': 'median',
'quantity-chronic-pc': 'median',
'total-spend-period': ['median', 'sum']}
# Make it re-usable later on
mean_agg_dict = {'num-days-visited': ['count', 'mean'],
'spend-per-bill': 'mean',
'units-per-bill': 'mean',
'total-interaction-period': 'mean',
'avg-purchase-interval': 'mean',
'generico-age-customer': 'mean',
'recency-customer': 'mean',
'quantity-ethical-pc': 'mean',
'quantity-generic-pc': 'mean',
'quantity-chronic-pc': 'mean',
'total-spend-period': ['mean', 'sum'],
'chronic-yes': 'mean',
'hd-yes': 'mean'}
###################################################
# Profile summary of clusters
###################################################
# Mean profile
profile_data = data_merge[summary_cols_mean + ['cluster']].groupby(
['cluster']).agg(mean_agg_dict)
length_base_cluster = len(data_merge)
def profile_extra_cols(profile_data_pass, length_base_pass):
# Segment % share in data-set
profile_data_pass['count-pc'] = np.round(
profile_data_pass['num-days-visited']['count'] * 100 / length_base_pass)
# Round all numbers
profile_data_pass = np.round(profile_data_pass, 2)
return profile_data_pass
profile_data = profile_extra_cols(profile_data, length_base_cluster)
# Median profile
profile_data_med = data_merge[summary_cols_median + ['cluster']].groupby(
['cluster']).agg(median_agg_dict)
profile_data_med = profile_extra_cols(profile_data_med, length_base_cluster)
# Save both profile summaries (mean and median) to .csv
s3.save_df_to_s3(df=profile_data,
file_name='Behaviour_Segment_Output/profile_data_{}.csv'.format(calc_year_month),
index=False)
s3.save_df_to_s3(df=profile_data_med,
file_name='Behaviour_Segment_Output/profile_data_med_{}.csv'.format(calc_year_month),
index=False)
###################################################
# Name clusters
###################################################
data_merge['cluster-name'] = data_merge['cluster'].map({0: 'generic_heavy',
1: 'regular',
3: 'super',
5: 'ethical_heavy',
2: 'other_type',
4: 'other_type'})
# Patient_id wise, for all
data_superset_merge = data_superset.merge(data_merge[['patient-id', 'cluster-name']],
how='left',
on=['patient-id', 'patient-id'])
def assign_extra_segment(data_pass):
"""
Add segment names to segments not covered in clustering
"""
if (data_pass['newcomer-flag'] == 'newcomer' and
data_pass['singletripper-flag'] == 'repeat_customer'):
return 'newcomer_repeat'
elif (data_pass['newcomer-flag'] == 'newcomer' and
data_pass['singletripper-flag'] == 'singletripper'):
return 'newcomer-singletripper'
elif (data_pass['newcomer-flag'] == 'old_customer' and
data_pass['singletripper-flag'] == 'singletripper'):
return 'singletripper'
else:
return data_pass['cluster-name']
# Assign segment names for extra segments
data_superset_merge['behaviour-segment'] = data_superset_merge.apply(
lambda row: assign_extra_segment(row), axis=1)
###################################################
# Profiling all segment (Summary statistics for information)
###################################################
# Mean profile
profile_data_all = data_superset_merge[summary_cols_mean + ['behaviour-segment']].groupby(
['behaviour-segment']).agg(mean_agg_dict)
length_base_segment = len(data_superset_merge)
profile_data_all = profile_extra_cols(profile_data_all, length_base_segment)
# Median profile
profile_data_med_all = data_superset_merge[summary_cols_median + ['behaviour-segment']].groupby(
['behaviour-segment']).agg(median_agg_dict)
profile_data_med_all = profile_extra_cols(profile_data_med_all, length_base_segment)
# Save both profile summaries (mean and median) to .csv
profile_data_all = s3.save_df_to_s3(df=profile_data_all,
file_name='Behaviour_Segment_Output/profile_data_all_{}.csv'.format(
calc_year_month),
index=False)
profile_data_med_all = s3.save_df_to_s3(df=profile_data_med_all,
file_name='Behaviour_Segment_Output/profile_data_med_all_{}.csv'.format(
calc_year_month),
index=False)
# Save as .csv, the profile summary of each segment
for i in data_superset_merge['behaviour-segment'].unique():
segment_i = data_superset_merge[data_superset_merge['behaviour-segment'] == i]
logger.info(f'Length of {i} segment is {len(segment_i)}')
# Summarize
profile_i = segment_i[summary_cols_mean].describe()
s3.save_df_to_s3(df=profile_i,
file_name='profile_{}.csv'.format(i),
index=False)
# Now this data is source of truth
data = data_superset_merge.copy()
###################################################
# Assign unique store to patient
###################################################
patient_store_q = f"""
SELECT
"patient-id",
"store-id",
COUNT(DISTINCT "id") AS "store-bills",
SUM("net-payable") AS "store-spend"
FROM
"{read_schema}"."bills-1"
WHERE
DATEDIFF('days', '{period_end_d_plus1}', DATE("created-at")) between -180 and -1
GROUP BY
"patient-id",
"store-id"
"""
data_store = rs_db.get_df(query=patient_store_q)
data_store['rank'] = data_store.sort_values(['store-bills', 'store-spend'],
ascending=[False, False]
).groupby(['patient-id']).cumcount() + 1
patient_store = data_store[data_store['rank'] == 1][['patient-id', 'store-id']]
# Stores
stores_q = f"""
SELECT
"id" AS "store-id",
"name" AS "store-name"
FROM
"{read_schema}"."stores"
"""
stores = rs_db.get_df(query=stores_q)
patient_store = patient_store.merge(stores, how='inner',
on=['store-id', 'store-id'])
data = data.merge(patient_store, how='left', left_on=['patient-id'],
right_on=['patient-id'])
# Export data
keep_cols = ['patient-id', 'num-bills-period', 'total-spend-period',
'spend-per-bill', 'units-per-bill',
'generico-age-customer', 'recency-customer', 'quantity-ethical-pc',
'quantity-generic-pc', 'quantity-chronic-pc', 'chronic-yes', 'hd-yes',
'newcomer-flag', 'singletripper-flag', 'behaviour-segment',
'store-id', 'store-name']
write_data = data[keep_cols]
# Round some numbers
for i in ['quantity-ethical-pc', 'quantity-generic-pc', 'quantity-chronic-pc']:
write_data[i] = np.round(write_data[i], 2)
for i in ['total-spend-period', 'spend-per-bill']:
write_data[i] = np.round(write_data[i], 2)
write_data = write_data.rename(columns={'units-per-bill': 'quantity-per-bill'})
# Make some columns for logging purpose
runtime_date = datetime.today().strftime('%Y-%m-%d')
runtime_month = datetime.today().strftime('%Y-%m')
write_data['segment-calculation-date'] = period_end_d_plus1
write_data['upload-date'] = runtime_date
write_data['base_list-identifier'] = runtime_month
# etl
write_data['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
write_data['created-by'] = 'etl-automation'
write_data['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
write_data['updated-by'] = 'etl-automation'
s3.save_df_to_s3(df=write_data,
file_name='Behaviour_Segment_Output/behaviour_segment_data_{}.csv'.format(calc_year_month),
index=False)
###################################################
# Append this updated_churn to Redshift DB
###################################################
s3.write_df_to_db(df=write_data[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
email = Email()
subject = "Task Status behaviour segment calculation"
mail_body = "Behaviour segments upload succeeded"
file_uris = [profile_data_all, profile_data_med_all]
email.send_email_file(subject=subject,
mail_body=mail_body,
to_emails=email_to, file_uris=file_uris, file_paths=[]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/behaviour-segment/behaviour-segment.py | behaviour-segment.py |
```
#installing extra libraries on prod
!pip install zeno-etl-libs==1.0.40
!pip install openpyxl==3.0.9
!pip install nltk==3.6.7
!pip install tweepy==4.3.0
!pip install apiclient==1.0.4
"""
Fetching Playstore reviews on daily basis
Author : [email protected]
"""
import argparse
import sys
import re
import os
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
import time
import datetime
import os
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.tokenize import word_tokenize, RegexpTokenizer, sent_tokenize
from nltk.corpus import stopwords
nltk.download('stopwords')
nltk.download('punkt') # divides a whole text data into sentences
nltk.download('vader_lexicon')
import tweepy
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.email.email import Email
```
## Pass Params
```
env = "dev"
full_run = 0
email_to ="[email protected],[email protected],[email protected],[email protected],[email protected]"
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'zeno-tweets'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# max of data
table_q = """
select
max("tweet-created-at") max_exp
from
"prod2-generico"."zeno-tweets"
"""
max_exp_date = rs_db.get_df(table_q)
max_exp_date['max_exp'].fillna(np.nan, inplace=True)
print(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
print(max_exp_date)
# params
if full_run or max_exp_date == 'NaN':
start = '2017-05-13'
else:
start = max_exp_date
start = dateutil.parser.parse(start)
# defining keys and tokens
consumer_key = 'c57SU7sulViKSmjsOTi4kTO3W'
consumer_secret = 'cNT3yk5ibQ315AWNCJHgE9ipCGlM1XnenHZu9cBWaVL3q7fPew'
access_token = '796747210159517701-DhOBQgwzeb6q4eXlI4WjwPRJH1CuEIT'
access_token_secret = 'sMrnPZ4ExI8um43wquUvFEUCTyY61HYRf7z3jv00ltXlt'
# making api connection
# authentication
def auth(consumer_key, consumer_secret, access_token, access_token_secret):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
api = auth(consumer_key, consumer_secret, access_token, access_token_secret)
# remove url
def remove_url(txt):
"""Replace URLs found in a text string with nothing
(i.e. it will remove the URL from the string).
Parameters
----------
txt : string
A text string that you want to parse and remove urls.
Returns
-------
The same txt string with url's removed.
"""
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split())
# searching for the keyword in tweeter and tokenizing it
def tweet(search_term, count=100000):
# Create a custom search term and define the number of tweets
tweets = api.search_tweets(search_term, count=count)
# Remove URLs
tweets_no_urls = [remove_url(tweet.text) for tweet in tweets]
# lowercase
tweet_data = [sent_tokenize(x.lower()) for x in tweets_no_urls]
tweet_data = pd.DataFrame(data=tweet_data, columns=['tweetext'])
tweet_att = [[search_term, x.lang, x.user.location, x.created_at, x.id, x.user.name,
x.user.followers_count, x.user.friends_count, x.text, x.place, x.user.time_zone] for x in tweets]
tweet_att = pd.DataFrame(data=tweet_att, columns=['search_term', 'lang', 'loc', 'created-at', 'id', 'username',
'followers', 'friends', 'og tweet', 'place', 'Tz'])
final_data = pd.concat([tweet_data, tweet_att], axis=1)
return final_data
# removing stopwords
def remove_sw(sent, corpus):
stop_words = set(stopwords.words(corpus))
word_tokens = word_tokenize(sent)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
filtered_sentence = ' '.join(filtered_sentence)
return [filtered_sentence]
# finding sentiment intensity analyzer
def sentiment_analyser(lst):
sid = SentimentIntensityAnalyzer()
sentiment = [sid.polarity_scores(x) for x in lst]
neg = [sid.polarity_scores(x)['neg'] for x in lst]
neu = [sid.polarity_scores(x)['neu'] for x in lst]
pos = [sid.polarity_scores(x)['pos'] for x in lst]
comp = [sid.polarity_scores(x)['compound'] for x in lst]
return neg[0], neu[0], pos[0], comp[0]
# running all above functions
def run_all(search_term, count=1000000):
print("API handshake successful")
print("Searching for term ", search_term)
tweet_data = tweet(search_term, count=count)
# print(tweet_data)
print("Removing stopwords")
sw = 'english'
if tweet_data.empty:
return tweet_data
else:
tweet_data['tweetext_filter'] = tweet_data['tweetext'].apply(lambda x: remove_sw(x, sw), 1)
print("Analysing sentiment for ", search_term)
tweet_data['neg', 'neu', 'pos', 'comp'] = tweet_data['tweetext_filter'].apply(lambda x: sentiment_analyser(x), 1)
tweet_data[['neg', 'neu', 'pos', 'comp']] = tweet_data['neg', 'neu', 'pos', 'comp'].apply(pd.Series)
tweet_data.drop(columns=('neg', 'neu', 'pos', 'comp'), inplace=True)
# sentiment, neg, neu, pos, comp = sentiment_analyser(tweets)
# df = build_df(pos,neg,neu,comp, tweets)
print('Done \n')
return tweet_data
search_terms = ['#zeno_health','@zeno_health']
tws = pd.DataFrame()
try:
for search_term in search_terms:
tw = run_all(search_term, count=1000000)
tws = pd.concat([tws, tw], axis=0)
print('Done')
tws = tws[((tws['lang'].isin(['en', 'hi']) & (~tws['tweetext'].str.startswith('rt'))))]
except BaseException as e:
print('failed on_status,', str(e))
time.sleep(3)
tws
if tws.empty:
print('DataFrame is empty!')
exit()
tws = tws[
['og tweet', 'id', 'created-at', 'search_term', 'lang', 'loc', 'username', 'followers', 'friends', 'neg', 'neu',
'pos', 'comp']]
dict = {'id': 'tweet-id',
'og tweet': 'tweet',
'search_term': 'search-term',
'lang': 'language',
'loc': 'location',
'created-at': 'tweet-created-at',
'pos': 'positive-sentiment',
'neu': 'neutral-sentiment',
'neg': 'negative-sentiment',
'comp': 'compound-sentiment'}
tws.rename(columns=dict, inplace=True)
tws['tweet-created-at'] = pd.to_datetime(tws['tweet-created-at']). \
dt.tz_convert('Asia/Kolkata').dt.tz_localize(None)
# etl
tws['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
tws['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
tws['created-by'] = 'etl-automation'
tws['updated-by'] = 'etl-automation'
tws['tweet-type'] = np.where(tws['negative-sentiment'] >= tws['positive-sentiment'], 'Detractor', 'Promoter')
tws_mail = tws[['tweet-id', 'tweet', 'tweet-created-at', 'search-term', 'language', 'location', 'username', 'followers',
'friends', 'tweet-type']]
tws_mail = tws_mail.sort_values(by=['tweet-type'], ascending=True)
tws_mail = tws_mail[(tws_mail['tweet-created-at'] > start)]
tws = tws[(tws['tweet-created-at'] > start)]
if tws.empty:
print('DataFrame is empty!')
rs_db.close_connection()
exit()
tws.columns = [c.replace('_', '-') for c in tws.columns]
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "tweet-created-at" >'{start}' '''
print(truncate_query)
rs_db.execute(truncate_query)
s3.write_df_to_db(df=tws[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
file_name = 'Zeno_Tweets.xlsx'
file_path = s3.write_df_to_excel(data={'Zeno Tweets': tws_mail}, file_name=file_name)
email = Email()
# file_path ='/Users/Lenovo/Downloads/utter.csv'
email.send_email_file(subject="Zeno Tweets",
mail_body='Zeno Tweets',
to_emails=email_to, file_uris=[], file_paths=[file_path])
rs_db.close_connection()
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/tweets/zeno-tweets.ipynb | zeno-tweets.ipynb |
```
!pip install zeno_etl_libs==1.0.40
"""main wrapper for Non-IPC safety stock reset"""
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.django.api import Django
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.non_ipc.data_prep.non_ipc_data_prep import non_ipc_data_prep
from zeno_etl_libs.utils.non_ipc.forecast.forecast_main import non_ipc_forecast
from zeno_etl_libs.utils.non_ipc.safety_stock.safety_stock import non_ipc_safety_stock_calc
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.utils.ipc.store_portfolio_additions import generic_portfolio
```
## Main Function
```
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, agg_week_cnt, kind, rs_db_read, rs_db_write,
read_schema, write_schema, s3, django, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("Non-IPC SS calculation started for store id: " + str(store_id))
if not type_list:
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
# RUNNING DATA PREPARATION
drug_data_agg_weekly, drug_data_weekly, drug_class, \
bucket_sales = non_ipc_data_prep(
store_id_list=store_id, reset_date=reset_date,
type_list=type_list, db=rs_db_read, schema=read_schema,
agg_week_cnt=agg_week_cnt,
logger=logger)
# CREATING TRAIN FLAG TO HANDLE STORES WITH HISTORY < 16 WEEKS
week_count = drug_data_weekly['date'].nunique()
if week_count >= 16:
train_flag = True
else:
train_flag = False
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
out_of_sample = 1
horizon = 1
train, error, predict, ensemble_train, ensemble_error, \
ensemble_predict = non_ipc_forecast(
drug_data_agg_weekly, drug_data_weekly, drug_class,
out_of_sample, horizon, train_flag, logger, kind)
final_predict = ensemble_predict.query('final_fcst == "Y"')
safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = non_ipc_safety_stock_calc(
store_id, drug_data_weekly, reset_date, final_predict,
drug_class, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
chronic_max_flag, train_flag, drug_type_list_v4,
v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, rs_db_read, read_schema, logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# KEEP ALL GENERIC COMPOSITIONS IN STORE
if keep_all_generic_comp == 'Y':
safety_stock_df = generic_portfolio(safety_stock_df,
rs_db_read, read_schema,
logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
safety_stock_df['percentile'] = 0.5
final_predict.rename(columns={'month_begin_dt': 'date'},
inplace=True)
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(
store_id, drug_class, final_predict,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value,
ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
predict['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
predict['store_id'] = store_id
predict['store_id'] = predict['store_id'].astype(int)
predict['drug_id'] = predict['drug_id'].astype(int)
predict['month_begin_dt'] = predict['month_begin_dt'].dt.date
predict['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
predict['created-by'] = 'etl-automation'
predict['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
predict['updated-by'] = 'etl-automation'
predict.columns = [c.replace('_', '-') for c in predict.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-predict',
schema=write_schema)
columns = list(table_info['column_name'])
predict = predict[columns] # required column order
logger.info("Writing to table: non-ipc-predict")
s3.write_df_to_db(df=predict,
table_name='non-ipc-predict',
db=rs_db_write, schema=write_schema)
# writing table non-ipc-safety-stock
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: non-ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='non-ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table non-ipc-abc-xyz-class
drug_class['store_id'] = drug_class['store_id'].astype(int)
drug_class['drug_id'] = drug_class['drug_id'].astype(int)
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in
drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: non-ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='non-ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in
safety_stock_df.columns]
# prevent heavy outliers
ss_data_upload = safety_stock_df.query('order_upto_point < 1000')
ss_data_upload = ss_data_upload.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
if isinstance(reset_store_ops, pd.DataFrame):
content_type = 74
object_id = reset_store_ops.loc[
reset_store_ops['store_id'] == store_id, 'object_id'].unique()
for obj in object_id:
request_body = {"object_id": int(obj), "content_type": content_type}
api_response, _ = django.django_model_execution_log_create_api(
request_body)
reset_store_ops.loc[
reset_store_ops['object_id'] == obj,
'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"Non-IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Non-IPC code execution status: {status}")
return status, order_value_all, new_drug_entries, missed_entries
```
## Pass Params
```
env = "dev"
email_to = "[email protected]"
debug_mode = "N"
os.environ['env'] = env
logger = get_logger()
s3 = S3()
django = Django()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
```
## Read params from RS table
```
from zeno_etl_libs.helper.parameter.job_parameter import parameter
args = parameter.get_params(job_id=110)
# JOB EXCLUSIVE PARAMS
exclude_stores = args["exclude_stores"]
goodaid_ss_flag = args["goodaid_ss_flag"]
ga_inv_weight = args["ga_inv_weight"]
rest_inv_weight = args["rest_inv_weight"]
top_inv_weight = args["top_inv_weight"]
chronic_max_flag = args["chronic_max_flag"]
wh_gen_consolidation = args["wh_gen_consolidation"]
v5_active_flag = args["v5_active_flag"]
v6_active_flag = args["v6_active_flag"]
v6_type_list = args["v6_type_list"]
v6_ptr_cut_off = args["v6_ptr_cut_off"]
reset_date = args["reset_date"]
reset_stores = args["reset_stores"]
v3_active_flag = args["v3_active_flag"]
corrections_selling_probability_cutoff = args["corrections_selling_probability_cutoff"]
corrections_cumulative_probability_cutoff = args["corrections_cumulative_probability_cutoff"]
drug_type_list_v4 = args["drug_type_list_v4"]
agg_week_cnt = args["agg_week_cnt"]
kind = args["kind"]
omit_npi = args["omit_npi"]
keep_all_generic_comp = args["keep_all_generic_comp"]
```
## Get Stores and Type List
```
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled Non-IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(
str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# considering reset of stores aged (3 months < age < 1 year)
store_id = stores.loc[
(dt.datetime.now() - stores['opened_at'] > dt.timedelta(days=90)) &
(dt.datetime.now() - stores['opened_at'] <= dt.timedelta(days=365)),
'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']',')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
reset_stores = reset_store_ops['store_id'].unique()
type_list = None
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
```
## Execute Main Function
```
""" calling the main function """
status, order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, agg_week_cnt, kind, rs_db_read, rs_db_write,
read_schema, write_schema, s3, django, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
```
## Send Email Notification
```
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Non-IPC SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/non-ipc-ss-main/non_ipc_ss_main.ipynb | non_ipc_ss_main.ipynb |
```
!pip install zeno_etl_libs==1.0.30
"""
Script for updating and keeping standard drug info.
fields included:
['qty_sold_l2y', 'revenue_l2y', 'num_bills_l2y', 'std_qty', 'purchase_interval',
'avg_ptr', 'avg_selling_rate']
author: [email protected]
"""
import os
import sys
import pandas as pd
import datetime as dt
import numpy as np
import statistics as stats
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
import argparse
```
## Main Function
```
def main(debug_mode, rs_db, read_schema, write_schema, table_name, s3, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
try:
# get drug-patient data from mos
logger.info("Getting historical data from sales table, bill_flag='gross'")
q_mos_drugs = f"""
select "drug-id" , "bill-id" , "patient-id" , quantity ,
"revenue-value" as sales, date("created-at") as bill_date
from "{read_schema}".sales s
where "bill-flag" = 'gross'
and DATEDIFF(day, date("created-at"), current_date) < 730
and "store-id" not in (243)
"""
df_mos_drugs = rs_db.get_df(q_mos_drugs)
df_mos_drugs.columns = [c.replace('-', '_') for c in df_mos_drugs.columns]
df_mos_drugs["bill_date"] = pd.to_datetime(df_mos_drugs["bill_date"])
df_drugs = df_mos_drugs.drop(["bill_id", "patient_id", "quantity", "sales",
"bill_date"], axis=1).drop_duplicates()
dd_qty_sales = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"quantity": "sum", "sales": "sum"})
################################
# get purchase interval of drugs
################################
logger.info("Calculating patient-drug-interval")
df_mos_drugs["bill_date1"] = df_mos_drugs["bill_date"]
grp_pts_drug = df_mos_drugs.groupby(["patient_id", "drug_id"],
as_index=False).agg(
{"bill_date": "count", "bill_date1": "max"})
grp_pts_drug.rename(
{"bill_date": "bill_counts", "bill_date1": "latest_date"}, axis=1,
inplace=True)
# only drugs with atleast 4 bills taken
grp_pts_drug = grp_pts_drug.loc[grp_pts_drug["bill_counts"] > 3]
df_mos_drugs = df_mos_drugs.drop("bill_date1", axis=1)
# only latest 10 patient considered for purchase interval calculation
grp_drugs = grp_pts_drug.groupby(["drug_id"], as_index=False).agg(
{'patient_id': latest_20})
pts_drugs_to_consider = grp_drugs.explode('patient_id')
pts_drugs_to_consider = pts_drugs_to_consider.merge(df_mos_drugs,
on=["patient_id",
"drug_id"],
how="left")
interval_pts_drug = pts_drugs_to_consider.groupby(["patient_id", "drug_id"],
as_index=False).agg(
{"bill_date": pts_drug_interval})
interval_pts_drug.rename({"bill_date": "purchase_interval"}, axis=1,
inplace=True)
drug_intv = interval_pts_drug.groupby("drug_id", as_index=False).agg(
{"purchase_interval": median})
# handling edge cases
drug_intv["purchase_interval"] = np.where(
drug_intv["purchase_interval"] == 0, 180,
drug_intv["purchase_interval"])
drug_intv["purchase_interval"] = np.where(
drug_intv["purchase_interval"] > 180, 180,
drug_intv["purchase_interval"])
logger.info("patient-drug-interval calculation finished")
df_drugs = df_drugs.merge(dd_qty_sales, on="drug_id", how="left")
df_drugs.rename({"quantity": "qty_sold_l2y", "sales": "revenue_l2y"},
axis=1, inplace=True)
df_drugs = df_drugs.merge(drug_intv, on="drug_id", how="left")
df_drugs["purchase_interval"] = df_drugs["purchase_interval"].fillna(180)
dd = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"bill_id": count_unique})
df_drugs = df_drugs.merge(dd, on="drug_id", how="left")
df_drugs.rename({"bill_id": "num_bills_l2y"}, axis=1, inplace=True)
dd = df_mos_drugs.groupby("drug_id", as_index=False).agg({"quantity": mode})
df_drugs = df_drugs.merge(dd, on="drug_id", how="left")
df_drugs.rename({"quantity": "mode"}, axis=1, inplace=True)
dd = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"quantity": median})
df_drugs = df_drugs.merge(dd, on="drug_id", how="left")
df_drugs.rename({"quantity": "median"}, axis=1, inplace=True)
df_drugs["std_qty"] = np.where(df_drugs["mode"] > df_drugs["median"],
df_drugs["median"], df_drugs["mode"])
df_drugs["std_qty"] = np.where(df_drugs["num_bills_l2y"] <= 10, 1,
df_drugs["std_qty"])
df_drugs["std_qty"] = np.where(df_drugs["std_qty"] > 30, 1,
df_drugs["std_qty"])
df_drugs["std_qty"] = df_drugs["std_qty"].fillna(1)
df_drugs["revenue_l2y"] = df_drugs["revenue_l2y"].fillna(0)
df_drugs["qty_sold_l2y"] = df_drugs["qty_sold_l2y"].fillna(0)
df_drugs["std_qty"] = df_drugs["std_qty"].astype(int)
df_drugs["revenue_l2y"] = df_drugs["revenue_l2y"].astype(float)
df_drugs["qty_sold_l2y"] = df_drugs["qty_sold_l2y"].astype(int)
df_drugs.dropna(subset=['drug_id', 'num_bills_l2y'], inplace=True)
df_drugs["drug_id"] = df_drugs["drug_id"].astype(int)
df_drugs["num_bills_l2y"] = df_drugs["num_bills_l2y"].astype(int)
df_drugs["avg_selling_rate"] = df_drugs["revenue_l2y"] / df_drugs[
"qty_sold_l2y"]
################################
# get avg-ptr and drug-type info
################################
logger.info("Calculating other fields")
# get PTR info for all drugs
q_inv = f"""
SELECT "drug-id" as drug_id , AVG(ptr) as avg_ptr
from "{read_schema}"."inventory-1" i
where DATEDIFF(day, date("created-at"), current_date) < 730
group by "drug-id"
"""
df_inv = rs_db.get_df(q_inv)
df_drugs = df_drugs.merge(df_inv, on="drug_id", how="left")
# get necessary drug info from drugs master
q_drugs = f"""
SELECT id as drug_id, type
from "{read_schema}".drugs d
"""
df_drug_info = rs_db.get_df(q_drugs)
df_drugs = df_drugs.merge(df_drug_info, on="drug_id", how="left")
# default ptr value for generic=35 and rest=100
df_drugs["avg_ptr"] = np.where(
(df_drugs["avg_ptr"].isna()) & (df_drugs["type"] == "generic"), 35,
df_drugs["avg_ptr"])
df_drugs["avg_ptr"] = np.where(
(df_drugs["avg_ptr"].isna()) & (df_drugs["type"] != "generic"), 100,
df_drugs["avg_ptr"])
# required format for RS wrtie
df_drugs = df_drugs[['drug_id', 'qty_sold_l2y','revenue_l2y',
'num_bills_l2y', 'std_qty', 'purchase_interval',
'avg_ptr', 'avg_selling_rate']]
df_drugs.columns = [c.replace('_', '-') for c in df_drugs.columns]
df_drugs['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_drugs['created-by'] = 'etl-automation'
df_drugs['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_drugs['updated-by'] = 'etl-automation'
logger.info("All calculations complete")
if debug_mode == 'N':
logger.info(f"Truncating {table_name} in {write_schema}")
truncate_query = f"""
truncate table "{write_schema}"."{table_name}"
"""
rs_db.execute(truncate_query)
logger.info(f"Truncating {table_name} in {write_schema} successful")
logger.info("Writing table to RS-DB")
s3.write_df_to_db(df=df_drugs, table_name=table_name,
db=rs_db, schema=write_schema)
logger.info("Writing table to RS-DB completed")
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"Drug-Std-Info code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Drug-Std-Info code execution status: {status}")
return status
```
## Dependent Functions
```
def pts_drug_interval(pd_arr):
"""Purchase interval between buying on patient-drug level
considering median interval"""
df = pd.DataFrame(pd_arr, columns=["bill_date"])
df = df.sort_values(by='bill_date', ascending=True)
df["delta"] = (df['bill_date']-df['bill_date'].shift())
df = df.dropna()
median_days = df["delta"].median().days
return median_days
def latest_20(pd_arr):
"""To consider only latest 20 patients who bought drug in more than 4 qty
objective: to reduce run time"""
pts_list = list(pd_arr)[-20:]
return pts_list
def count_unique(pd_arr):
return len(pd_arr.unique())
def mode(pd_arr):
return min(pd_arr.mode())
def median(pd_arr):
return stats.median(pd_arr)
```
## Pass Params & Run Algo
```
email_to = "[email protected]"
debug_mode = "Y"
env = "dev"
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
s3 = S3()
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
table_name = 'drug-std-info'
# open RS connection
rs_db.open_connection()
""" calling the main function """
status = main(debug_mode=debug_mode, rs_db=rs_db, read_schema=read_schema,
write_schema=write_schema, table_name=table_name, s3=s3,
logger=logger)
# close RS connection
rs_db.close_connection()
```
## Send Email Notification
```
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
reset_date = dt.date.today().strftime("%Y-%m-%d")
email.send_email_file(
subject=f"Drug-STD-Info Update (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
""",
to_emails=email_to)
logger.info("Script ended")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/scripts/drug-std-info/drug_std_info.ipynb | drug_std_info.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\distributor-ranking2-main\\distributor_ranking2_main.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.c5.2xlarge",
env=env
)
# if env == "dev":
# import papermill as pm
# pm.execute_notebook(
# input_path=input_file,
# output_path=f"{output_path}/{result}",
# parameters=parameters
# )
# elif env in ("stage", "prod"):
# run_notebook.run_notebook(
# image=f"{env}-notebook-runner",
# notebook=input_file,
# output=output_path,
# parameters=parameters,
# timeout_in_sec=7200,
# in_vpc=True,
# env=env,
# instance_type="ml.c5.9xlarge"
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_distributor_ranking2_main.ipynb | run_distributor_ranking2_main.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\non-ipc-ss-main\\non_ipc_ss_main.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=7200,
in_vpc=True,
instance_type="ml.c5.9xlarge",
env=env
)
# if env == "dev":
# import papermill as pm
# pm.execute_notebook(
# input_path=input_file,
# output_path=f"{output_path}/{result}",
# parameters=parameters
# )
# elif env in ("stage", "prod"):
# run_notebook.run_notebook(
# image=f"{env}-notebook-runner",
# notebook=input_file,
# output=output_path,
# parameters=parameters,
# timeout_in_sec=7200,
# in_vpc=True,
# env=env,
# instance_type="ml.c5.9xlarge"
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_non_ipc_ss_main.ipynb | run_non_ipc_ss_main.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\warehouse\\wh_goodaid_forecast_343.ipynb"
parameters = {
"env": env
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=7200,
in_vpc=True,
env=env
)
# if env == "dev":
# import papermill as pm
# pm.execute_notebook(
# input_path=input_file,
# output_path=f"{output_path}/{result}",
# parameters=parameters
# )
# elif env in ("stage", "prod"):
# run_notebook.run_notebook(
# image=f"{env}-notebook-runner",
# notebook=input_file,
# output=output_path,
# parameters=parameters,
# timeout_in_sec=7200,
# in_vpc=True,
# env=env,
# instance_type="ml.c5.9xlarge"
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_wh_goodaid_forecast.ipynb | run_wh_goodaid_forecast.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\distributor-ranking-main\\distributor_ranking_main.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
env=env
)
# if env == "dev":
# import papermill as pm
# pm.execute_notebook(
# input_path=input_file,
# output_path=f"{output_path}/{result}",
# parameters=parameters
# )
# elif env in ("stage", "prod"):
# run_notebook.run_notebook(
# image=f"{env}-notebook-runner",
# notebook=input_file,
# output=output_path,
# parameters=parameters,
# timeout_in_sec=7200,
# in_vpc=True,
# env=env,
# instance_type="ml.c5.9xlarge"
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_distributor_ranking_main.ipynb | run_distributor_ranking_main.ipynb |
```
!pip uninstall zeno_etl_libs
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\playstore-review\\ecomm-playstore-reviews.ipynb"
parameters = {
"env": env,
"full_run":0
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.m5.large",
env=env
)
!aws s3 cp s3://sagemaker-ap-south-1-921939243643/papermill_output/ecomm-playstore-reviews-2022-06-17-11-18-31.ipynb .
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_playstore_reviews.ipynb | run_playstore_reviews.ipynb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.