code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import pandas as pd
import datetime
def forecast_patient_data(store_id_list, type_list, reset_date, db, schema,
logger=None, last_date=None):
''' FETCHING HISTORICAL PATIENT DATA'''
if last_date is None:
last_date = datetime.date(day=1, month=4, year=2019)
print('Date range', str(last_date), str(reset_date))
# store list
if type(store_id_list) is not list:
store_id_list = [store_id_list]
store_id_list = str(store_id_list).replace('[', '(').replace(']', ')')
# drug list
drug_list_query = """
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema)
drug_list = db.get_df(drug_list_query)
drug_list_tuple = tuple(drug_list['drug_id'])
# getting patient data
patient_data_query = """
select date(a."created-at") as "sales-date",
inv."drug-id",
count(distinct "patient-id") as "patient-count"
from "{schema}"."bills-1" f
join "{schema}"."bill-items-1" a on f.id = a."bill-id"
left join "{schema}"."inventory-1" inv on a."inventory-id" = inv.id
where date(a."created-at") >= '{last_date}'
and date(a."created-at") <= '{reset_date}'
and f."store-id" in {store_id_list}
and inv."drug-id" in {drug_list}
group by date(a."created-at"), inv."drug-id"
union all
select date(a."returned-at") as "sales-date",
inv."drug-id",
count(distinct "patient-id")*-1 as "patient-count"
from "{schema}"."customer-return-items-1" a
join "{schema}"."bills-1" b on a."bill-id" = b.id
left join "{schema}"."inventory-1" inv on a."inventory-id" = inv.id
where date(a."returned-at") >= '{last_date}'
and date(a."returned-at") <= '{reset_date}'
and b."store-id" in {store_id_list}
and inv."drug-id" in {drug_list}
group by date(a."returned-at"), inv."drug-id"
""".format(store_id_list=store_id_list, last_date=str(last_date),
reset_date=str(reset_date), drug_list=drug_list_tuple,
schema=schema)
patient_data = db.get_df(patient_data_query)
patient_data.columns = [col.replace('-', '_') for col in patient_data.columns]
'''CREATING DAY-DRUG patient_data CROSS TABLE'''
calendar_query = """
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
""".format(schema=schema)
calendar = db.get_df(calendar_query)
calendar.columns = [c.replace('-', '_') for c in calendar.columns]
calendar['date'] = pd.to_datetime(calendar['date'])
patient_data['sales_date'] = pd.to_datetime(patient_data['sales_date'])
print('Distinct drug count', patient_data.drug_id.nunique())
print('No of days', patient_data.sales_date.nunique())
cal_patient_weekly = calendar.loc[
(pd.to_datetime(calendar['date']) >= patient_data.sales_date.min()) &
(calendar['date'] <= patient_data.sales_date.max())]
# removing the first week if it has less than 7 days
min_year = cal_patient_weekly.year.min()
x = cal_patient_weekly.loc[(cal_patient_weekly.year == min_year)]
min_month = x.month.min()
x = x.loc[(x.month == min_month)]
min_week = x.week_of_year.min()
if x.loc[x.week_of_year == min_week].shape[0] < 7:
print('removing dates for', min_year, min_month, min_week)
cal_patient_weekly = cal_patient_weekly.loc[
~((cal_patient_weekly.week_of_year == min_week) &
(cal_patient_weekly.year == min_year))]
# removing the latest week if it has less than 7 days
max_year = cal_patient_weekly.year.max()
x = cal_patient_weekly.loc[(cal_patient_weekly.year == max_year)]
max_month = x.month.max()
x = x.loc[(x.month == max_month)]
max_week = x.week_of_year.max()
if x.loc[x.week_of_year == max_week].shape[0] < 7:
print('removing dates for', max_year, max_month, max_week)
cal_patient_weekly = cal_patient_weekly.loc[
~((cal_patient_weekly.week_of_year == max_week) &
(cal_patient_weekly.year == max_year))]
# adding week begin date
cal_patient_weekly['week_begin_dt'] = cal_patient_weekly.apply(
lambda x: x['date'] - datetime.timedelta(x['day_of_week']), axis=1)
drugs = patient_data[['drug_id']].drop_duplicates()
drugs['key'] = 1
cal_patient_weekly['key'] = 1
cal_drug_w = drugs.merge(cal_patient_weekly, on='key', how='inner')
cal_drug_w.drop('key', axis=1, inplace=True)
cal_drug_patient_w = cal_drug_w.merge(
patient_data, left_on=['drug_id', 'date'],
right_on=['drug_id', 'sales_date'],
how='left')
cal_drug_patient_w.drop('sales_date', axis=1, inplace=True)
cal_drug_patient_w.patient_count.fillna(0, inplace=True)
# assertion test to check no of drugs * no of days equals total entries
drug_count = cal_drug_patient_w.drug_id.nunique()
day_count = cal_drug_patient_w.date.nunique()
print('Distinct no of drugs', drug_count)
print('Distinct dates', day_count)
print('DF shape', cal_drug_patient_w.shape[0])
# assert drug_count*day_count == cal_drug_sales.shape[0]
# checking for history available and store opening date
first_bill_query = """
select min(date("created-at")) as bill_date
from "{schema}"."bills-1"
where "store-id" in {0}
""".format(store_id_list, schema=schema)
first_bill_date = db.get_df(first_bill_query).values[0][0]
print(first_bill_date)
cal_drug_patient_w = cal_drug_patient_w.query(
'date >= "{}"'.format(first_bill_date))
cal_drug_patient_weekly = cal_drug_patient_w.groupby(
['drug_id', 'week_begin_dt', 'week_of_year']
)['patient_count'].sum().reset_index()
cal_drug_patient_weekly.rename(
columns={'week_begin_dt': 'date'}, inplace=True)
return cal_drug_patient_weekly | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/non_ipc/data_prep/patient_data.py | patient_data.py |
import datetime
from zeno_etl_libs.utils.ipc.data_prep import forecast_data_prep
from zeno_etl_libs.utils.non_ipc.data_prep.patient_data import forecast_patient_data
from zeno_etl_libs.utils.ipc.item_classification import abc_xyz_classification
def non_ipc_data_prep(store_id_list, reset_date, type_list, db, schema,
agg_week_cnt=4, logger=None):
# getting demand data
cal_drug_sales_weekly, _, _ = forecast_data_prep(
store_id_list, type_list, reset_date, db, schema)
# getting patient data
cal_drug_patient_weekly = forecast_patient_data(
store_id_list, type_list, reset_date, db, schema)
# merging patient and demand data
cal_drug_data_weekly = cal_drug_sales_weekly.merge(cal_drug_patient_weekly)
'''ADDITIONAL CHECKS'''
n = 12
prev_n_week_dt = (
cal_drug_data_weekly['date'].max() - datetime.timedelta(n*7))
logger.info('Previous week date for last 12 weeks' + str(prev_n_week_dt))
prev_n_week_sales = cal_drug_data_weekly[
cal_drug_data_weekly['date'] > prev_n_week_dt].\
groupby('drug_id')['net_sales_quantity'].sum().reset_index()
prev_no_sales_drug_weekly = prev_n_week_sales.loc[
prev_n_week_sales['net_sales_quantity'] <= 0, 'drug_id'].values
prev_sales_drug_weekly = prev_n_week_sales.loc[
prev_n_week_sales['net_sales_quantity'] > 0, 'drug_id'].values
logger.info('No net sales of drugs within last 12 weeks' +
str(len(prev_no_sales_drug_weekly)))
logger.info('Sales of drugs within last 12 weeks' +
str(len(prev_sales_drug_weekly)))
# getting drug id with atleast one sale in last 12 weeks
cal_drug_data_weekly = cal_drug_data_weekly[
cal_drug_data_weekly.drug_id.isin(prev_sales_drug_weekly)]
'''4 WEEKS AGGREGATION'''
cal_drug_data_weekly['week_number'] = cal_drug_data_weekly.\
groupby('drug_id')['date'].rank(ascending=False) - 1
cal_drug_data_weekly['agg_wk_count'] = (
cal_drug_data_weekly['week_number']/agg_week_cnt).astype(int) + 1
agg_wk_ct_lt_4 = cal_drug_data_weekly.\
groupby('agg_wk_count')['week_number'].nunique().reset_index()
agg_wk_ct_lt_4 = agg_wk_ct_lt_4.query('week_number < 4')['agg_wk_count']
# removing incomplete 4-week period
cal_drug_data_weekly = cal_drug_data_weekly[
~cal_drug_data_weekly['agg_wk_count'].isin(agg_wk_ct_lt_4)]
cal_drug_data_agg_weekly = cal_drug_data_weekly.\
groupby(['drug_id', 'agg_wk_count']).\
agg({'date': 'min', 'net_sales_quantity': 'sum', 'patient_count': 'sum'
}).\
reset_index()
cal_drug_data_agg_weekly.sort_values(['drug_id', 'date'], inplace=True)
'''SKU CLASSIFICATIONS'''
# Taking latest 3 4-week period for classification
bucket_period = 3
agg_wk_classification = cal_drug_data_agg_weekly.loc[
cal_drug_data_agg_weekly['agg_wk_count'] <= bucket_period, 'date'
].dt.date.unique()
cal_drug_data_classification = cal_drug_data_agg_weekly[
cal_drug_data_agg_weekly['date'].isin(agg_wk_classification)]
cal_drug_data_classification.rename(
columns={'date': 'month_begin_dt'}, inplace=True)
drug_class, bucket_sales = abc_xyz_classification(
cal_drug_data_classification)
return cal_drug_data_agg_weekly, cal_drug_data_weekly, drug_class,\
bucket_sales | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/non_ipc/data_prep/non_ipc_data_prep.py | non_ipc_data_prep.py |
import numpy as np
import pandas as pd
from functools import reduce
from scipy.optimize import minimize, LinearConstraint
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc,\
train_error
def optimise_ab_mae(weights, naive_fcst, ma_fcst, ets_fcst, actual):
fcst = weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst
return np.sum(abs((fcst - actual)))/len(naive_fcst)
def optimise_ab_sse(weights, naive_fcst, ma_fcst, ets_fcst, actual):
fcst = weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst
return np.sum((fcst - actual)**2)
def optimise_c_mae(
weights, naive_fcst, ma_fcst, ets_fcst, croston_fcst, actual):
fcst = (
weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst +
weights[3]*croston_fcst)
return np.sum(abs((fcst - actual)))/len(naive_fcst)
def optimise_c_sse(
weights, naive_fcst, ma_fcst, ets_fcst, croston_fcst, actual):
fcst = (
weights[0]*naive_fcst + weights[1]*ma_fcst + weights[2]*ets_fcst +
weights[3]*croston_fcst)
return np.sum((fcst - actual)**2)
def ensemble_minimisation(
train, error, predict, kind='mae', logger=None):
# mergring train dfs for weighted average of mdels
train = train.copy()
train_cols = ['drug_id', 'month_begin_dt', 'year', 'month',
'actual', 'fcst', 'std', 'ape', 'ae']
train = [x[train_cols] for x in train]
all_train = reduce(
lambda left, right: pd.merge(
left, right,
on=['drug_id', 'month_begin_dt', 'year', 'month'], how='outer'),
train)
all_train.columns = [
'drug_id', 'month_begin_dt', 'year', 'month',
'actual', 'fcst_naive', 'std_naive', 'ape_naive', 'ae_naive',
'actual_ma', 'fcst_ma', 'std_ma', 'ape_ma', 'ae_ma',
'actual_ets', 'fcst_ets', 'std_ets', 'ape_ets', 'ae_ets',
'actual_croston', 'fcst_croston', 'std_croston', 'ape_croston', 'ae_croston']
all_train.drop(
['actual_ma', 'actual_ets', 'actual_croston'], axis=1, inplace=True)
# mergring predict dfs for forecast
predict = predict.copy()
predict_cols = ['drug_id', 'month_begin_dt', 'year', 'month',
'fcst', 'std']
predict = [x[predict_cols] for x in predict]
all_predict = reduce(
lambda left, right: pd.merge(
left, right,
on=['drug_id', 'month_begin_dt', 'year', 'month'], how='outer'),
predict)
all_predict.columns = [
'drug_id', 'month_begin_dt', 'year', 'month',
'fcst_naive', 'std_naive', 'fcst_ma', 'std_ma',
'fcst_ets', 'std_ets', 'fcst_croston', 'std_croston']
'''BASE MODELS WEIGHT OPTIMISATION - A/B'''
all_train_ab = all_train[all_train['ape_croston'].isna()]
all_predict_ab = all_predict[all_predict['fcst_croston'].isna()]
# individial forecast and actuals
naive_fcst_ab = all_train_ab['fcst_naive'].values
ma_fcst_ab = all_train_ab['fcst_ma'].values
ets_fcst_ab = all_train_ab['fcst_ets'].values
actual_ab = all_train_ab['actual'].values
# initialisation
weights_ab = np.array([1/3, 1/3, 1/3])
bounds_ab = ((0, 1), (0, 1), (0, 1))
# constrains on weights: sum(wi) = 1
constrain_ab = LinearConstraint([1, 1, 1], [1], [1])
# minimising errors for A/B buckets
if kind == 'mae':
results = minimize(
optimise_ab_mae, x0=weights_ab, bounds=bounds_ab,
constraints=constrain_ab,
args=(naive_fcst_ab, ma_fcst_ab, ets_fcst_ab, actual_ab))
final_weights_ab = results.x
elif kind == 'sse':
results = minimize(
optimise_ab_sse, x0=weights_ab, bounds=bounds_ab,
constraints=constrain_ab,
args=(naive_fcst_ab, ma_fcst_ab, ets_fcst_ab, actual_ab))
final_weights_ab = results.x
else:
final_weights_ab = weights_ab
# creating final train, error and predict dataset
all_train_ab['fcst'] = np.round(
final_weights_ab[0]*naive_fcst_ab + final_weights_ab[1]*ma_fcst_ab +
final_weights_ab[2]*ets_fcst_ab)
all_train_ab['std'] = np.round(np.sqrt(
(final_weights_ab[0]*naive_fcst_ab)**2 +
(final_weights_ab[1]*ma_fcst_ab)**2 +
(final_weights_ab[2]*ets_fcst_ab)**2))
all_train_ab['hyper_params'] = str(tuple(final_weights_ab))
all_train_ab['model'] = kind
all_predict_ab['fcst'] = np.round(
final_weights_ab[0]*all_predict_ab['fcst_naive'] +
final_weights_ab[1]*all_predict_ab['fcst_ma'] +
final_weights_ab[2]*all_predict_ab['fcst_ets'])
all_predict_ab['std'] = np.round(np.sqrt(
(final_weights_ab[0]*all_predict_ab['fcst_naive'])**2 +
(final_weights_ab[1]*all_predict_ab['fcst_ma'])**2 +
(final_weights_ab[2]*all_predict_ab['fcst_ets'])**2))
all_predict_ab['model'] = kind
'''BASE MODELS WEIGHT OPTIMISATION - C'''
all_train_c = all_train[~all_train['ape_croston'].isna()]
all_predict_c = all_predict[~all_predict['fcst_croston'].isna()]
# individial forecast and actuals
naive_fcst_c = all_train_c['fcst_naive'].values
ma_fcst_c = all_train_c['fcst_ma'].values
ets_fcst_c = all_train_c['fcst_ets'].values
croston_fcst_c = all_train_c['fcst_croston'].values
actual_c = all_train_c['actual'].values
# initialisation
weights_c = np.array([1/4, 1/4, 1/4, 1/4])
bounds_c = ((0, 1), (0, 1), (0, 1), (0, 1))
# constrains on weights: sum(wi) = 1
constrain_c = LinearConstraint([1, 1, 1, 1], [1], [1])
# minimising errors for C buckets
if kind == 'mae':
results = minimize(
optimise_c_mae, x0=weights_c, bounds=bounds_c,
constraints=constrain_c,
args=(naive_fcst_c, ma_fcst_c, ets_fcst_c,
croston_fcst_c, actual_c))
final_weights_c = results.x
elif kind == 'sse':
results = minimize(
optimise_c_sse, x0=weights_c, bounds=bounds_c,
constraints=constrain_c,
args=(naive_fcst_c, ma_fcst_c, ets_fcst_c,
croston_fcst_c, actual_c))
final_weights_c = results.x
else:
final_weights_c = weights_c
# creating final train, error and predict dataset
all_train_c['fcst'] = np.round(
final_weights_c[0]*naive_fcst_c + final_weights_c[1]*ma_fcst_c +
final_weights_c[2]*ets_fcst_c + final_weights_c[3]*croston_fcst_c)
all_train_c['std'] = np.round(np.sqrt(
(final_weights_c[0]*naive_fcst_c)**2 +
(final_weights_c[1]*ma_fcst_c)**2 +
(final_weights_c[2]*ets_fcst_c)**2 +
(final_weights_c[3]*croston_fcst_c)**2))
all_train_c['hyper_params'] = str(tuple(final_weights_c))
all_train_c['model'] = kind
all_predict_c['fcst'] = np.round(
final_weights_c[0]*all_predict_c['fcst_naive'] +
final_weights_c[1]*all_predict_c['fcst_ma'] +
final_weights_c[2]*all_predict_c['fcst_ets'] +
final_weights_c[3]*all_predict_c['fcst_croston'])
all_predict_c['std'] = np.round(np.sqrt(
(final_weights_c[0]*all_predict_c['fcst_naive'])**2 +
(final_weights_c[1]*all_predict_c['fcst_ma'])**2 +
(final_weights_c[2]*all_predict_c['fcst_ets'])**2 +
(final_weights_c[3]*all_predict_c['fcst_croston'])**2))
all_predict_c['model'] = kind
'''COMPILING TRAINING AND FORECAST '''
# train
ensemble_train = pd.concat([all_train_ab, all_train_c], axis=0)
ensemble_train['ape'] = ensemble_train.apply(
lambda row: ape_calc(row['actual'], row['fcst']), axis=1)
ensemble_train['ae'] = ensemble_train.apply(
lambda row: ae_calc(row['actual'], row['fcst']), axis=1)
cols = train_cols + ['hyper_params', 'model']
ensemble_train = ensemble_train[cols]
# train error
ensemble_train_error = ensemble_train.groupby('drug_id').\
apply(train_error).\
reset_index(drop=True)
ensemble_train_error['model'] = kind
# predict
ensemble_predict = pd.concat([all_predict_ab, all_predict_c], axis=0)
cols = predict_cols + ['model']
ensemble_predict = ensemble_predict[cols]
return ensemble_train, ensemble_train_error, ensemble_predict | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/non_ipc/forecast/ensemble_minimisation.py | ensemble_minimisation.py |
import numpy as np
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import make_future_df
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc
def croston_tsb(ts, horizon=1, alpha=0.5, beta=0.7):
# Transform the input into a numpy array
d = np.array(ts)
# Historical period length
cols = len(d)
# Append np.nan into the demand array to cover future periods
d = np.append(d, [np.nan]*horizon)
# level (a), probability(p) and forecast (f)
a, p, f = np.full((3, cols+horizon), np.nan)
# Initialization
first_occurence = np.argmax(d[:cols] > 0)
a[0] = d[first_occurence]
p[0] = 1/(1 + first_occurence)
f[0] = p[0]*a[0]
# Create all the t forecasts
for t in range(0, cols):
if d[t] > 0:
a[t+1] = alpha*d[t] + (1-alpha)*a[t]
p[t+1] = beta*(1) + (1-beta)*p[t]
else:
a[t+1] = a[t]
p[t+1] = (1-beta)*p[t]
f[t+1] = p[t+1]*a[t+1]
# creating forecast
for t in range(cols, cols+horizon-1):
if f[t] > 1:
a[t+1] = alpha*f[t] + (1-alpha)*a[t]
p[t+1] = beta*(1) + (1-beta)*p[t]
else:
a[t+1] = a[t]
p[t+1] = (1-beta)*p[t]
f[t+1] = p[t+1]*a[t+1]
# Future Forecast
# a[cols+1:cols+horizon] = a[cols]
# p[cols+1:cols+horizon] = p[cols]
# f[cols+1:cols+horizon] = f[cols]
# df = pd.DataFrame.from_dict(
# {"Demand":d,"Forecast":f,"Period":p,"Level":a,"Error":d-f})
return np.round(f), d-f
def croston_train_weekly(df, out_of_sample=4, horizon=4, params=None):
if params is not None:
alpha = params[0]
beta = params[1]
else:
alpha = 0.5
beta = 0.7
train = df.copy()
train.drop(train.tail(out_of_sample).index, inplace=True)
# dividing the series into train and validation set
input_series = train['net_sales_quantity'].values
validation = df['net_sales_quantity'].tail(out_of_sample).values
train_forecast, train_error = croston_tsb(
input_series, horizon, alpha, beta)
fcst = train_forecast[-out_of_sample:]
error = train_forecast[:-out_of_sample]
std = np.sqrt((np.std(input_series)**2 + sum(np.square(error))/len(error)))
predict_df = make_future_df(train[:-out_of_sample+1], 1)
predict_df['fcst'] = sum(fcst)
predict_df['std'] = np.round(std*np.sqrt(horizon))
predict_df['actual'] = sum(validation)
predict_df['ape'] = [
ape_calc(actual, forecast) for actual, forecast in zip(
predict_df['actual'], predict_df['fcst'])]
predict_df['ae'] = [
ae_calc(actual, forecast) for actual, forecast in zip(
predict_df['actual'], predict_df['fcst'])]
predict_df['hyper_params'] = str(params)
return predict_df
def croston_predict_weekly(df, out_of_sample=4, horizon=4, params=None):
if params is not None:
alpha = params[0]
beta = params[1]
else:
alpha = 0.5
beta = 0.7
train = df.copy()
# dividing the series into train and validation set
input_series = train['net_sales_quantity'].values
train_forecast, train_error = croston_tsb(
input_series, horizon, alpha, beta)
fcst = train_forecast[-out_of_sample:]
error = train_forecast[:-out_of_sample]
std = np.sqrt((np.std(input_series)**2 + sum(np.square(error))/len(error)))
predict_df = make_future_df(train[:-out_of_sample+1], 1)
predict_df['fcst'] = sum(fcst)
predict_df['std'] = np.round(std*np.sqrt(horizon))
return predict_df | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/non_ipc/forecast/croston.py | croston.py |
import time
import pandas as pd
from itertools import product
from zeno_etl_libs.utils.warehouse.forecast.moving_average import ma_train_monthly,\
ma_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.ets import ets_train_monthly,\
ets_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.errors import train_error
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import apply_parallel_ets
from zeno_etl_libs.utils.non_ipc.forecast.\
helper_functions import non_ipc_error_report, apply_parallel_croston
from zeno_etl_libs.utils.non_ipc.forecast.croston import croston_train_weekly,\
croston_predict_weekly
from zeno_etl_libs.utils.non_ipc.forecast.\
ensemble_champion import ensemble_champion
from zeno_etl_libs.utils.non_ipc.forecast.\
ensemble_minimisation import ensemble_minimisation
def non_ipc_forecast(
drug_sales_monthly, drug_data_weekly, drug_class, out_of_sample,
horizon, train_flag, logger=None, kind='mae'):
if train_flag:
'''BASE FORECASTING - NAIVE'''
logger.info('STARTING NAIVE TRAINING AND FORECAST')
# making copy for data
naive_train_data = drug_sales_monthly.copy()
naive_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
k = 1 # for naive using the ma train function
# train
start = time.time()
naive_train = naive_train_data.groupby('drug_id').apply(
ma_train_monthly, k, out_of_sample).\
reset_index(drop=True)
end = time.time()
logger.info('Naive Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
naive_train_error = naive_train.groupby('drug_id').apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('Naive Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
naive_predict = naive_train_data.groupby('drug_id').\
apply(ma_predict_monthly, k, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('Naive Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Naive error reports
# _ = non_ipc_error_report(naive_train_error, naive_train, drug_class)
# model informations
naive_train['hyper_params'] = ''
naive_train['model'] = 'naive'
naive_train_error['model'] = 'naive'
naive_predict['model'] = 'naive'
'''BASE FORECASTING - MOVING AVERAGE'''
logger.info('STARTING MOVING AVERAGE TRAINING AND FORECAST')
ma_train_data = drug_sales_monthly.copy()
ma_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
k = 3 # for MA3
# train
start = time.time()
ma_train = ma_train_data.groupby('drug_id').apply(
ma_train_monthly, k, out_of_sample).\
reset_index(drop=True)
end = time.time()
logger.info('MA Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
ma_train_error = ma_train.groupby('drug_id').apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('MA Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
ma_predict = ma_train_data.groupby('drug_id').\
apply(ma_predict_monthly, k, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('MA Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Moving average error reports
# _ = non_ipc_error_report(ma_train_error, ma_train, drug_class)
# model informations
ma_train['hyper_params'] = ''
ma_train['model'] = 'ma'
ma_train_error['model'] = 'ma'
ma_predict['model'] = 'ma'
'''BASE FORECASTING - EXPONENTIAL SMOOTHING'''
logger.info('STARTING ESM TRAINING AND FORECAST')
# model parameters
# holts winter implementation - single, double and triple exponential
trend = ['additive', None]
seasonal = ['additive', None]
damped = [True, False]
seasonal_periods = [12]
use_boxcox = [True, False]
ets_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
ets_train_data = drug_sales_monthly.copy()
ets_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
# train
start = time.time()
ets_train = apply_parallel_ets(
ets_train_data.groupby('drug_id'), ets_train_monthly,
ets_params, horizon, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('ETS Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
ets_train_error = ets_train.groupby('drug_id').apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('ETS Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
ets_predict = apply_parallel_ets(
ets_train_data.groupby('drug_id'), ets_predict_monthly,
ets_train, horizon, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('ETS Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Exponential smoothing error reports
# _ = non_ipc_error_report(ets_train_error, ets_train, drug_class)
# model information
ets_train['model'] = 'ets'
ets_train_error['model'] = 'ets'
ets_predict['model'] = 'ets'
'''BASE FORECASTING - CROSTON FOR C BUCKET'''
logger.info('STARTING CROSTON TRAINING AND FORECAST')
# getting drug list for C bucket
c_bucket_drug_list = list(
drug_class[drug_class['bucket_abc'] == 'C']['drug_id'])
logger.info('No of drugs in Bucket C for Croston' +
str(len(c_bucket_drug_list)))
croston_train_data = drug_data_weekly.copy()
croston_train_data = croston_train_data[
croston_train_data['drug_id'].isin(c_bucket_drug_list)]
croston_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
# Runtime parameters
croston_out_of_sample = 4
croston_horizon = 4
croston_params = (0.5, 0.5)
# train
start = time.time()
croston_train = apply_parallel_croston(
croston_train_data.groupby('drug_id'), croston_train_weekly,
croston_horizon, croston_out_of_sample, croston_params).\
reset_index(drop=True)
end = time.time()
logger.info('Croston Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
croston_train_error = croston_train.groupby('drug_id').\
apply(train_error).\
reset_index(drop=True)
end = time.time()
logger.info('Croston Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
croston_predict = apply_parallel_croston(
croston_train_data.groupby('drug_id'), croston_predict_weekly,
croston_horizon, croston_out_of_sample, croston_params).\
reset_index(drop=True)
end = time.time()
logger.info('Croston Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# Croston error reports
# _ = non_ipc_error_report(croston_train_error, croston_train, drug_class)
# model information
croston_train['model'] = 'croston'
croston_train_error['model'] = 'croston'
croston_predict['model'] = 'croston'
'''BASE MODELS: COMBINING'''
train = [naive_train, ma_train, ets_train, croston_train]
error = [
naive_train_error, ma_train_error, ets_train_error,
croston_train_error]
predict = [naive_predict, ma_predict, ets_predict, croston_predict]
base_train = pd.concat(train, axis=0)
base_train['final_fcst'] = 'N'
base_train_error = pd.concat(error, axis=0)
base_train_error['final_fcst'] = 'N'
base_predict = pd.concat(predict, axis=0)
base_predict['final_fcst'] = 'N'
'''ENSEMBLE FORECASTING - CHAMPION MODEL'''
logger.info('STARTING ENSEMBLE CHAMPION MODEL SELECTION')
champion_train, champion_train_error, champion_predict = ensemble_champion(
train, error, predict, logger)
champion_train['model'] = 'champion_' + champion_train['model']
champion_train_error['model'] = 'champion_' + champion_train_error['model']
champion_predict['model'] = 'champion_' + champion_predict['model']
champion_train['final_fcst'] = 'Y'
champion_train_error['final_fcst'] = 'Y'
champion_predict['final_fcst'] = 'Y'
# Champion model ensmeble training errors
# _ = non_ipc_error_report(champion_train_error, champion_train, drug_class)
'''ENSEMBLE FORECASTING - SSE MINIMISATION'''
optimised_train, optimised_train_error,\
optimised_predict = ensemble_minimisation(
train, error, predict, kind, logger)
optimised_train['final_fcst'] = 'N'
optimised_train_error['final_fcst'] = 'N'
optimised_predict['final_fcst'] = 'N'
# Optimised errors model ensmeble training errors
# _ = non_ipc_error_report(
# optimised_train_error, optimised_train, drug_class)
'''BASE MODELS: COMBINING'''
ensemble_train = [champion_train, optimised_train]
ensemble_error = [champion_train_error, optimised_train_error]
ensemble_predict = [champion_predict, optimised_predict]
ensemble_train = pd.concat(ensemble_train, axis=0)
ensemble_error = pd.concat(ensemble_error, axis=0)
ensemble_predict = pd.concat(ensemble_predict, axis=0)
else:
'''BASE FORECASTING - SIMPLE EXPONENTIAL SMOOTHING'''
logger.info('STARTING SES FORECAST')
# model parameters
# holts winter implementation - single exponential
trend = [None]
seasonal = [None]
damped = [False]
seasonal_periods = [12]
use_boxcox = [False]
ses_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
ses_train_data = drug_sales_monthly.copy()
ses_train_data.rename(columns={'date': 'month_begin_dt'}, inplace=True)
ses_train_data['hyper_params'] = str(ses_params[0])
# predict
start = time.time()
ses_predict = apply_parallel_ets(
ses_train_data.groupby('drug_id'), ets_predict_monthly,
ses_train_data, horizon, out_of_sample).reset_index(drop=True)
end = time.time()
logger.info('ETS Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
# model information
ses_predict['model'] = 'ses'
# creating final return df
base_train = pd.DataFrame()
base_train_error = pd.DataFrame()
base_predict = pd.DataFrame()
ensemble_train = pd.DataFrame()
ensemble_error = pd.DataFrame()
ensemble_predict = ses_predict
ensemble_predict['final_fcst'] = 'Y'
return base_train, base_train_error,\
base_predict, ensemble_train, ensemble_error, ensemble_predict | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/non_ipc/forecast/forecast_main.py | forecast_main.py |
import pandas as pd
import numpy as np
from functools import reduce
def ensemble_champion(train, error, predict, logger=None):
# mergring error dfs for best model selection
all_train_error = reduce(
lambda left, right: pd.merge(left, right, on='drug_id', how='outer'),
error)
all_train_error.columns = [
'drug_id', 'mae_naive', 'mape_naive', 'model_naive', 'mae_ma',
'mape_ma', 'model_ma', 'mae_ets', 'mape_ets', 'model_ets',
'mae_croston', 'mape_croston', 'model_croston']
# Best model selection
all_train_error['mape_best'] = all_train_error[[
'mape_naive', 'mape_ma', 'mape_ets', 'mape_croston']].min(axis=1)
all_train_error['model_best'] = np.select([
all_train_error['mape_best'] == all_train_error['mape_ets'],
all_train_error['mape_best'] == all_train_error['mape_ma'],
all_train_error['mape_best'] == all_train_error['mape_croston'],
all_train_error['mape_best'] == all_train_error['mape_naive']],
['ets', 'ma', 'croston', 'naive']
)
# Different models concatenation
naive_drug_best = all_train_error[all_train_error['model_best'] == 'naive']
ma_drug_best = all_train_error[all_train_error['model_best'] == 'ma']
ets_drug_best = all_train_error[all_train_error['model_best'] == 'ets']
croston_drug_best = all_train_error[
all_train_error['model_best'] == 'croston']
print(
len(all_train_error), len(naive_drug_best), len(ma_drug_best),
len(ets_drug_best), len(croston_drug_best))
logger.info('Total drugs: ' + str(len(all_train_error)))
logger.info('Naive drugs: ' + str(len(naive_drug_best)))
logger.info('MA drugs: ' + str(len(ma_drug_best)))
logger.info('ETS drugs: ' + str(len(ets_drug_best)))
logger.info('Croston drugs: ' + str(len(croston_drug_best)))
# creating ensemble dfs
naive_train_best = train[0][train[0]['drug_id'].isin(
naive_drug_best['drug_id'])]
naive_train_error_best = error[0][error[0]['drug_id'].isin(
naive_drug_best['drug_id'])]
naive_predict_best = predict[0][predict[0]['drug_id'].isin(
naive_drug_best['drug_id'])]
ma_train_best = train[1][train[1]['drug_id'].isin(
ma_drug_best['drug_id'])]
ma_train_error_best = error[1][error[1]['drug_id'].isin(
ma_drug_best['drug_id'])]
ma_predict_best = predict[1][predict[1]['drug_id'].isin(
ma_drug_best['drug_id'])]
ets_train_best = train[2][train[2]['drug_id'].isin(
ets_drug_best['drug_id'])]
ets_train_error_best = error[2][error[2]['drug_id'].isin(
ets_drug_best['drug_id'])]
ets_predict_best = predict[2][predict[2]['drug_id'].isin(
ets_drug_best['drug_id'])]
croston_train_best = train[3][train[3]['drug_id'].isin(
croston_drug_best['drug_id'])]
croston_train_error_best = error[3][error[3]['drug_id'].isin(
croston_drug_best['drug_id'])]
croston_predict_best = predict[3][predict[3]['drug_id'].isin(
croston_drug_best['drug_id'])]
ensemble_train = pd.concat(
[naive_train_best, ma_train_best, ets_train_best, croston_train_best],
axis=0)
ensemble_train_error = pd.concat(
[naive_train_error_best, ma_train_error_best, ets_train_error_best,
croston_train_error_best], axis=0)
ensemble_predict = pd.concat(
[naive_predict_best, ma_predict_best, ets_predict_best,
croston_predict_best], axis=0)
return ensemble_train, ensemble_train_error, ensemble_predict | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/non_ipc/forecast/ensemble_champion.py | ensemble_champion.py |
import numpy as np
import pandas as pd
from calendar import monthrange
from dateutil.relativedelta import relativedelta
from datetime import datetime
from dateutil.tz import gettz
from zeno_etl_libs.db.db import MSSql
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.utils.warehouse.data_prep.wh_data_prep import get_launch_stock_per_store
def lead_time():
mssql = MSSql(connect_via_tunnel=False)
cnxn = mssql.open_connection()
cursor = cnxn.cursor()
# Reading lead time data
# Last 90 days on purchase date
# excluding TEPL distributor
# Diff between PO created date and gatepass date
sql_bhw = '''
SELECT
*
FROM
(
SELECT
199 as "wh_id",
i.Barcode as "drug_id" ,
i.name as "drug_name",
a.Altercode as "distributor_id",
a.Name as "distributor_name",
a2.vdt as "gate-pass-date",
--sp.Vdt as "purchase_date",
--sp.RefVdt as "po_opend_date",
s.PBillDt as "po_created_date",
s.UpdatedOn as "purchase_confirm_date",
sp.Qty as "quantity" ,
DATEDIFF(day, s.PBillDt , a2.vdt) as "lead_time"
FROM
SalePurchase2 sp
left join Item i on
sp.Itemc = i.code
left join Salepurchase1 s on
sp.Vtype = s.Vtyp
and sp.Vno = s.Vno
and sp.Vdt = s.Vdt
left join Acknow a2 on sp.Pbillno =a2.Pbillno
left join acm a on
sp.Acno = a.code
Where
sp.Vtype = 'PB'
and sp.Vdt >= cast(DATEADD(day, -91, GETDATE()) as date)
and sp.Vdt <= cast(DATEADD(day, -1, GETDATE()) as date)
and i.Compname NOT IN ('GOODAID', 'PURE & C')
and i.Barcode NOT LIKE '%[^0-9]%'
and isnumeric(i.Barcode) = 1
and a.code NOT IN (59468, 59489)) a
Where
(a."lead_time">0
and a."lead_time"<7);
'''
data_bhw = pd.read_sql(sql_bhw, cnxn)
data_bhw[['drug_id']] \
= data_bhw[['drug_id']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
#TEPL Data
mssql_tepl = MSSql(connect_via_tunnel=False, db='Esdata_TEPL')
cnxn = mssql_tepl.open_connection()
cursor = cnxn.cursor()
sql_tepl = '''
SELECT
*
FROM
(
SELECT
342 as "wh_id",
i.Barcode as "drug_id" ,
i.name as "drug_name",
a.Altercode as "distributor_id",
a.Name as "distributor_name",
a2.vdt as "gate-pass-date",
--sp.Vdt as "purchase_date",
--sp.RefVdt as "po_opend_date",
s.PBillDt as "po_created_date",
s.UpdatedOn as "purchase_confirm_date",
sp.Qty as "quantity" ,
DATEDIFF(day, s.PBillDt , a2.vdt) as "lead_time"
FROM
SalePurchase2 sp
left join Item i on
sp.Itemc = i.code
left join Salepurchase1 s on
sp.Vtype = s.Vtyp
and sp.Vno = s.Vno
and sp.Vdt = s.Vdt
left join Acknow a2 on sp.Pbillno =a2.Pbillno
left join acm a on
sp.Acno = a.code
Where
sp.Vtype = 'PB'
and sp.Vdt >= cast(DATEADD(day, -91, GETDATE()) as date)
and sp.Vdt <= cast(DATEADD(day, -1, GETDATE()) as date)
and i.Compname NOT IN ('GOODAID', 'PURE & C')
and i.Barcode NOT LIKE '%[^0-9]%'
and isnumeric(i.Barcode) = 1) a
Where
(a."lead_time">0
and a."lead_time"<7);
'''
data_tepl = pd.read_sql(sql_tepl, cnxn)
data_tepl[['drug_id']] \
= data_tepl[['drug_id']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
data=pd.concat([data_bhw,data_tepl],sort=False,ignore_index=False)
run_date = str(datetime.now(tz=gettz('Asia/Kolkata')))
lead_time_data = 'warehouse_lead_time/lead_time_data_dump_{}.csv'.format(run_date)
s3 = S3()
s3.save_df_to_s3(df=data, file_name=lead_time_data)
data=data.drop(["wh_id"],axis=1)
# Reading Preferred distributor from S3
s3 = S3()
preferred_distributor = pd.read_csv(s3.download_file_from_s3(file_name="warehouse/preferred_distributors.csv"))
df_new = pd.merge(data, preferred_distributor, how='left', on='drug_id')
df_new[["lead_time", "distributor_1"]] = df_new[["lead_time", "distributor_1"]].fillna(0)
df_new[["distributor_1"]] = df_new[["distributor_1"]].astype('int')
# function for weighted mean
def w_avg(df, values, weights):
d = df[values]
w = df[weights]
return (d * w).sum() / w.sum()
df_new_1 = df_new.groupby(["drug_id", "distributor_id"]).apply(w_avg, 'lead_time', 'quantity').rename(
'weighted_lead_time').reset_index()
df_std = df_new.groupby(["drug_id", "distributor_id"])[["lead_time"]].std().reset_index()
df_std.rename(columns={'lead_time': 'lead_time_std'}, inplace=True)
df_drug_distributor = pd.merge(df_new_1, df_std, how='left', on=['drug_id', 'distributor_id'])
df_drug_distributor = pd.merge(df_drug_distributor, preferred_distributor, how='left', on=["drug_id"])
df_drug_distributor[["distributor_1", "lead_time_std"]] = df_drug_distributor[
["distributor_1", "lead_time_std"]].fillna(0)
df_drug_distributor[["distributor_1"]] = df_drug_distributor[["distributor_1"]].astype('int')
# lead time mean Capping 7 days.
df_drug_distributor['weighted_lead_time'] = np.where(df_drug_distributor['weighted_lead_time'] > 7, 7,
df_drug_distributor['weighted_lead_time'])
# minimum lead time 2 days
df_drug_distributor['weighted_lead_time'] = np.where(df_drug_distributor['weighted_lead_time'] < 2, 2,
df_drug_distributor['weighted_lead_time'])
# Lead time Std capping of 2 days
df_drug_distributor['lead_time_std'] = np.where(df_drug_distributor['lead_time_std'] > 2, 2,
df_drug_distributor['lead_time_std'])
# Minimum Lead time std is 1 day
df_drug_distributor['lead_time_std'] = np.where(df_drug_distributor['lead_time_std'] < 1, 1,
df_drug_distributor['lead_time_std'])
df_drug_distributor[["distributor_id"]] = df_drug_distributor[["distributor_id"]].astype('int')
df_drug_distributor['same_distributor'] = np.where(
df_drug_distributor['distributor_id'] == df_drug_distributor["distributor_1"], True, False)
preferred_distributor_drug = df_drug_distributor[df_drug_distributor["same_distributor"] == True]
other_distributor_drug = df_drug_distributor[df_drug_distributor["same_distributor"] == False]
# Drugs not in preferred distributor
drugs_not_in_preferred_distributor = df_drug_distributor[
~df_drug_distributor['drug_id'].isin(preferred_distributor_drug['drug_id'])]
drugs_not_in_preferred_distributor_mean = drugs_not_in_preferred_distributor.groupby(["drug_id"])[
["weighted_lead_time"]].mean().reset_index()
drugs_not_in_preferred_distributor_std = drugs_not_in_preferred_distributor.groupby(["drug_id"])[
["weighted_lead_time"]].std().reset_index()
drugs_not_in_preferred_distributor_1 = pd.merge(drugs_not_in_preferred_distributor_mean,
drugs_not_in_preferred_distributor_std, how='left', on='drug_id')
drugs_not_in_preferred_distributor_1 = drugs_not_in_preferred_distributor_1.fillna(0)
# Capping
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] > 7, 7,
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'])
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'] < 2, 2,
drugs_not_in_preferred_distributor_1['weighted_lead_time_x'])
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] > 2, 2,
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'])
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] = np.where(
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'] < 1, 1,
drugs_not_in_preferred_distributor_1['weighted_lead_time_y'])
drugs_not_in_preferred_distributor_1.rename(
columns={'weighted_lead_time_x': 'weighted_lead_time', 'weighted_lead_time_y': 'lead_time_std'}, inplace=True)
drug_in_preferred_distributor = preferred_distributor_drug.drop(
['drug_name', 'distributor_id', 'distributor_1', 'distributor_name_1', 'same_distributor'], axis=1)
drug_lead_time_std = pd.concat([drug_in_preferred_distributor, drugs_not_in_preferred_distributor_1], sort=False,
ignore_index=True)
weighted_lead_time_mean = drug_lead_time_std[["drug_id", "weighted_lead_time"]]
weighted_lead_time_std = drug_lead_time_std[["drug_id", "lead_time_std"]]
#Assumption barcoding lead time 2 days and barcoding lead time std of 0.5 days
barcoding_lead_time=2
barcoding_lead_time_std=0.5
weighted_lead_time_mean['barcoding_lead_time']=barcoding_lead_time
weighted_lead_time_std['barcoding_lead_time_std']=barcoding_lead_time_std
weighted_lead_time_mean['weighted_lead_time'] = weighted_lead_time_mean['weighted_lead_time'] + \
weighted_lead_time_mean['barcoding_lead_time']
weighted_lead_time_std['lead_time_std'] = np.sqrt(
weighted_lead_time_std['lead_time_std'] * weighted_lead_time_std['lead_time_std'] +
weighted_lead_time_std['barcoding_lead_time_std'] * weighted_lead_time_std['barcoding_lead_time_std'])
weighted_lead_time_mean=weighted_lead_time_mean.drop(['barcoding_lead_time'],axis=1)
weighted_lead_time_std=weighted_lead_time_std.drop(['barcoding_lead_time_std'],axis=1)
return weighted_lead_time_mean, weighted_lead_time_std
def review_time():
# Review Time for distributor
s3 = S3()
df_1 = pd.read_csv(s3.download_file_from_s3(file_name="warehouse/review_time_warehouse_distributor.csv"))
# Preferred distributor
df_2 = pd.read_csv(s3.download_file_from_s3(file_name="warehouse/preferred_distributors.csv"))
# If null then take 4 days of review time
df_1 = df_1.fillna(4)
df_1['review_time'] = df_1['review_time'].astype('int')
review_time_new = pd.merge(df_2, df_1, left_on='distributor_1', right_on='distributor_id', how='left')
review_time_new = review_time_new.drop(
["drug_name", "distributor_1", "distributor_name_1", "distributor_id", "distributor_name"], axis=1)
return review_time_new
def wh_safety_stock_calc(
ss_runtime_var, wh_drug_list, forecast, last_month_sales, demand_daily_deviation, current_month_date,
forecast_date, reset_date, logger=None, expected_nso=0, nso_history_days=90, rs_db=None):
""" Safety stock calculation for warehouse """
# Lead time mean & Std
lead_time_mean, lead_time_std = lead_time()
service_level = ss_runtime_var['service_level'] # 0.95
ordering_freq = ss_runtime_var['ordering_freq'] # 4
max_review_period = review_time()
z = ss_runtime_var['z']
cap_ss_days = ss_runtime_var['cap_ss_days']
if cap_ss_days == 0:
cap_ss_days = 100000
# getting latest month forecast
forecast['month_begin_dt'] = pd.to_datetime(
forecast['month_begin_dt']).dt.date
first_month = forecast['month_begin_dt'].min()
forecast_first_month = forecast[forecast['month_begin_dt'] == first_month]
# creating inventory level dataframe
repln = forecast_first_month.copy()
repln = pd.merge(repln, lead_time_mean, how='left', on='drug_id') # merge lead time mean
repln = pd.merge(repln, lead_time_std, how='left', on='drug_id') # merge lead time std
repln = pd.merge(repln, max_review_period, how='left', on='drug_id') # merge review time
# rename the columns
repln.rename(columns={'weighted_lead_time': 'lead_time_mean', 'review_time': 'max_review_period'}, inplace=True)
# Use default of 4 , 2 and 4 lead time mean , std and r.t if data is missing
repln['lead_time_mean'] = repln['lead_time_mean'].fillna(4)
repln['lead_time_std'] = repln['lead_time_std'].fillna(2)
repln['max_review_period'] = repln['max_review_period'].fillna(4)
repln['ordering_freq'] = ordering_freq
repln['service_level'] = service_level
repln['z_value'] = z
repln = wh_drug_list.merge(repln, on='drug_id')
num_days = monthrange(first_month.year, first_month.month)[1]
repln['demand_daily'] = repln['fcst'] / num_days
# check to see if forecast error is to be used instead of actual demand daily deviation
if ss_runtime_var['use_fcst_error'] == 'Y':
hist_fcst_err = get_forecast_error(rs_db, ss_runtime_var['fcst_hist_to_use'], last_month_sales,
current_month_date, forecast_date, num_days)
hist_fcst_err['demand_daily_deviation'] = hist_fcst_err['demand_daily_deviation'] / np.sqrt(num_days)
repln = repln.merge(hist_fcst_err, on='drug_id', how='left')
print("used forecast error instead of demand deviation")
else:
repln = repln.merge(demand_daily_deviation, on='drug_id', how='left')
repln['demand_daily_deviation'].fillna(0, inplace=True)
# warehouse overall safety stock
repln['ss_wo_cap'] = np.round(repln['z_value'] * np.sqrt(
(
repln['lead_time_mean'] *
repln['demand_daily_deviation'] *
repln['demand_daily_deviation']
) +
(
repln['lead_time_std'] *
repln['lead_time_std'] *
repln['demand_daily'] *
repln['demand_daily']
)))
repln = repln.merge(last_month_sales, on='drug_id', how='left')
repln['safety_stock_days'] = np.round(
repln['ss_wo_cap'] * num_days / repln['fcst'], 1)
# calculate capping days
repln['cap_ss_days'] = np.round(repln['lead_time_mean'] +
repln['z_value'] * repln['lead_time_std'] +
repln['max_review_period'])
repln['cap_ss_days'] = np.where(repln['cap_ss_days'] > cap_ss_days, cap_ss_days, repln['cap_ss_days'])
# capping SS days based in forecasted sales
repln['safety_stock'] = np.where(repln['safety_stock_days'] > repln['cap_ss_days'],
np.round(repln['cap_ss_days'] * repln['fcst'] / num_days),
repln['ss_wo_cap'])
# setting min SS at 2 days based on forecasted sales
repln['safety_stock'] = np.where(repln['safety_stock_days'] < 2, np.round(2 * repln['fcst'] / num_days),
repln['safety_stock'])
# capping SS days based on last month's sales
repln['safety_stock'] = np.where(repln['safety_stock'] * num_days / repln['last_month_sales'] > cap_ss_days,
np.round(cap_ss_days * repln['last_month_sales'] / num_days),
repln['safety_stock'])
repln['rop_without_nso'] = np.round(repln['safety_stock'] + repln['demand_daily'] * (repln['lead_time_mean'] +
repln['max_review_period']))
#tweaking ROP to include launch stock
launch_stock_per_store = get_launch_stock_per_store(rs_db, nso_history_days, reset_date)
repln = repln.merge(launch_stock_per_store, on='drug_id', how='left')
repln['launch_stock_per_store'].fillna(0, inplace=True)
repln['expected_nso'] = expected_nso
repln['reorder_point'] = repln['rop_without_nso'] + \
np.round((repln['lead_time_mean'] + repln['max_review_period']) *
repln['expected_nso'] / num_days) * \
repln['launch_stock_per_store']
repln['reorder_point'] = np.round(repln['reorder_point'])
repln['oup_without_nso'] = np.round(
repln['rop_without_nso'] +
repln['demand_daily'] * repln['ordering_freq'])
repln['order_upto_point'] = np.round(
repln['reorder_point'] +
repln['demand_daily'] * repln['ordering_freq'])
# shelf safety stock
repln['shelf_min'] = np.round(repln['safety_stock'] / 2)
repln['shelf_max'] = repln['safety_stock']
# days of safety stock, reorder point and order upto point calculations
repln['last_month_sales'].fillna(0, inplace=True)
repln['safety_stock_days'] = np.round(
repln['safety_stock'] * num_days / repln['last_month_sales'], 1)
repln['reorder_point_days'] = np.round(
repln['reorder_point'] * num_days / repln['last_month_sales'], 1)
repln['order_upto_days'] = np.round(
repln['order_upto_point'] * num_days / repln['last_month_sales'], 1)
return repln
def get_forecast_error(rs_db, fcst_hist_to_use, last_month_sales, current_month_date, forecast_date, num_days):
first_forecast_month = str(current_month_date - relativedelta(months=fcst_hist_to_use))
q = """
select
wss."drug-id" as drug_id,
wss."month-begin-dt" as month_forecasted,
wss."fcst" as forecast,
(
select
wss1."last-month-sales"
from
"prod2-generico"."wh-safety-stock" wss1
where
wss."drug-id" = wss1."drug-id"
and date(add_months(wss."month-begin-dt",
1))= wss1."month-begin-dt"
order by
wss1."drug-id",
wss1."month-begin-dt"
limit 1
) as actual
from
"prod2-generico"."wh-safety-stock" wss
where
1 = 1
and wss.fcst notnull
and wss."month-begin-dt" >= '{}'
and wss."month-begin-dt" >= '2022-02-01'
order by
wss."drug-id",
wss."month-begin-dt"
""".format(first_forecast_month)
hist_fcst_err = rs_db.get_df(q)
last_month_date = pd.to_datetime(forecast_date) - relativedelta(months=1)
last_month_sales['last_month_date'] = last_month_date.date()
hist_fcst_err = hist_fcst_err.merge(last_month_sales, left_on=['drug_id', 'month_forecasted'],
right_on=['drug_id', 'last_month_date'], how='left')
hist_fcst_err['actual'] = np.where(np.isnan(hist_fcst_err['actual']), hist_fcst_err['last_month_sales'],
hist_fcst_err['actual'])
hist_fcst_err.drop(columns=['last_month_sales', 'last_month_date'], inplace=True)
hist_fcst_err = hist_fcst_err[np.isnan(hist_fcst_err['actual']) == False]
hist_fcst_err['squared_error'] = (hist_fcst_err['forecast'] - hist_fcst_err['actual']) ** 2
hist_fcst_err = hist_fcst_err.groupby('drug_id').apply(get_rmse).reset_index()
hist_fcst_err['demand_daily_deviation'] = hist_fcst_err['rmse'] / np.sqrt(num_days)
return hist_fcst_err[['drug_id', 'demand_daily_deviation']]
def get_rmse(df):
if len(df) >= 2:
rmse = np.sqrt(df['squared_error'].sum() / len(df))
else:
rmse = None
return pd.Series(dict(rmse=rmse)) | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/safety_stock/wh_safety_stock.py | wh_safety_stock.py |
import pandas as pd
def stores_ss_consolidation(safety_stock_df, db, schema,
min_column='safety_stock',
ss_column='reorder_point',
max_column='order_upto_point'):
# getting list of SKUs to be substituted and substituted with
wh_list_query = f"""
select "drug-id" , "drug-id-replaced" , "same-release"
from "{schema}"."wh-sku-subs-master"
where "add-wh" = 'No'
"""
wh_list = db.get_df(wh_list_query)
wh_list.columns = [c.replace('-', '_') for c in wh_list.columns]
# 3 lists - to not keep, to substitute and to substitute with
sku_reject_list = wh_list.loc[
wh_list['same_release'] == 'NO', 'drug_id']
sku_to_replace_list = wh_list.loc[
wh_list['same_release'] == 'YES', 'drug_id']
sku_substitute_list = wh_list.loc[
wh_list['same_release'] == 'YES', 'drug_id_replaced']
# seperating safety_stock_df where change will happen and where it wont
sku_cnsld_list = list(sku_reject_list) + list(sku_to_replace_list) + list(sku_substitute_list)
safety_stock_df_cnsld = safety_stock_df[
(safety_stock_df['drug_id'].isin(sku_cnsld_list))
]
print('SS to be changed due to WH ', safety_stock_df_cnsld.shape[0])
safety_stock_df_rest = safety_stock_df[
~(safety_stock_df['drug_id'].isin(sku_cnsld_list))
]
if len(safety_stock_df_cnsld) > 0:
# SKU to be changed - not to keep and substitute with
sku_reject = safety_stock_df_cnsld.merge(
wh_list.query('same_release == "NO"')[
['drug_id']].drop_duplicates(),
how='inner', on='drug_id')
sku_to_replace = safety_stock_df_cnsld.merge(
wh_list.query('same_release == "YES"')[
['drug_id', 'drug_id_replaced']].drop_duplicates(),
how='inner', on='drug_id')
sku_substitute = safety_stock_df_cnsld.merge(
wh_list.query('same_release == "YES"')[
['drug_id_replaced']].drop_duplicates(),
how='inner', left_on='drug_id', right_on='drug_id_replaced')
sku_substitute.drop('drug_id_replaced', axis=1, inplace=True)
print('SKU rejected ', sku_reject.shape[0])
print('SKU replace ', sku_to_replace.shape[0])
print('SKU substitute ', sku_substitute.shape[0])
# updated ss calculation - to reject
sku_reject_new = sku_reject.copy()
sku_reject_new[min_column] = 0
sku_reject_new[ss_column] = 0
sku_reject_new[max_column] = 0
# updated ss calculation - to replace with wh skus
sku_substitute_new = sku_to_replace.groupby('drug_id_replaced')[
[min_column, ss_column, max_column]].sum().reset_index()
sku_substitute_new.rename(columns={'drug_id_replaced': 'drug_id'}, inplace=True)
sku_to_replace_new = sku_to_replace.copy()
sku_to_replace_new.drop('drug_id_replaced', axis=1, inplace=True)
sku_to_replace_new[min_column] = 0
sku_to_replace_new[ss_column] = 0
sku_to_replace_new[max_column] = 0
# updated ss calculation - to substitute with
sku_substitute_new = sku_substitute.merge(
sku_substitute_new[['drug_id', min_column, ss_column, max_column]],
on='drug_id', suffixes=('', '_y'), how='left')
sku_substitute_new[min_column + '_y'].fillna(0, inplace=True)
sku_substitute_new[ss_column + '_y'].fillna(0, inplace=True)
sku_substitute_new[max_column + '_y'].fillna(0, inplace=True)
sku_substitute_new[min_column] = (
sku_substitute_new[min_column] +
sku_substitute_new[min_column + '_y'])
sku_substitute_new[ss_column] = (
sku_substitute_new[ss_column] +
sku_substitute_new[ss_column + '_y'])
sku_substitute_new[max_column] = (
sku_substitute_new[max_column] +
sku_substitute_new[max_column + '_y'])
sku_substitute_new.drop(
[min_column + '_y', ss_column + '_y', max_column + '_y'],
axis=1, inplace=True)
# merging final dataframe
safety_stock_df_prev = pd.concat(
[sku_reject, sku_to_replace, sku_substitute],
axis=0, ignore_index=True)
safety_stock_df_new = pd.concat(
[safety_stock_df_rest, sku_reject_new, sku_to_replace_new,
sku_substitute_new], axis=0, ignore_index=True)
else:
safety_stock_df_new = safety_stock_df.copy()
safety_stock_df_prev = pd.DataFrame()
# test cases 1- pre and post count should be same
pre_drug_count = safety_stock_df.shape[0]
post_drug_count = safety_stock_df_new.shape[0]
pre_max_qty = safety_stock_df[max_column].sum()
post_max_qty = safety_stock_df_new[max_column].sum()
if pre_drug_count == post_drug_count:
print('WARNING: SKU count dont match after consolidation')
print('Reduction in max quantity:',
str(round(100*(1 - post_max_qty/pre_max_qty), 2)) + '%')
return safety_stock_df_new, safety_stock_df_prev | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/wh_intervention/store_portfolio_consolidation.py | store_portfolio_consolidation.py |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from calendar import monthrange
from zeno_etl_libs.utils.ipc.data_prep import forecast_data_prep
def data_checks(drug_sales_monthly, wh_drug_list, reset_date, logger,
rs_db):
# MONTHLY CHECKS
logger.info(
str(drug_sales_monthly.drug_id.nunique()) +
str(drug_sales_monthly['month_begin_dt'].nunique()))
logger.info(str(
drug_sales_monthly.drug_id.nunique() *
drug_sales_monthly['month_begin_dt'].nunique()))
assert (drug_sales_monthly.drug_id.nunique() *
drug_sales_monthly['month_begin_dt'].nunique()
== len(drug_sales_monthly))
# CHECKING FOR DRUGS NOT IN SALES DATA MONTHLY
drug_missed_fcst = wh_drug_list[
~wh_drug_list.drug_id.isin(drug_sales_monthly['drug_id'])]['drug_id']
drug_missed_fcst = str(list(drug_missed_fcst))
drug_missed_fcst = drug_missed_fcst.replace('[', '(').replace(']', ')')
if len(drug_missed_fcst) > 2:
drug_missed_fcst = rs_db.get_df('''
select
id as drug_id,
"drug-name" as drug_name,
type,
date("created-at") as creation_date
from
"prod2-generico".drugs
where
id in {}
'''.format(drug_missed_fcst))
drug_missed_sale_history = rs_db.get_df('''
select
"drug-id" as drug_id,
date(max("created-at")) as last_sale_date
from
"prod2-generico".sales
where
"created-at" < {reset_date}
and quantity > 0
and "drug-id" in {drug_id_list}
group by
"drug-id"
'''.format(drug_id_list = str(
list(drug_missed_fcst['drug_id'])).replace('[', '(').replace(
']', ')'), reset_date = str(reset_date)))
drug_missed_fcst = drug_missed_fcst.merge(
drug_missed_sale_history, on='drug_id', how='inner')
logger.info(
'Drug in SKU list but with no history' + str(drug_missed_fcst))
# DRUGS NOT -> DISCONTINUIED/BANNED OR NULL & SALE NOT ZERO IN 6 MONTH
days = 152
logger.info('Total missing sales' + str(len(drug_missed_fcst)))
logger.info(
'Removing unnecessary drug types' +
str(drug_missed_fcst[
drug_missed_fcst.type.isin(
['discontinued-products', 'banned', ''])
].shape[0]))
logger.info(
'Removing drugs with no sales in last 6 months' +
str(drug_missed_fcst[
drug_missed_fcst['last_sale_date'] <=
(reset_date - timedelta(days=days))].shape[0]))
drug_missed_fcst_list = drug_missed_fcst[
(~drug_missed_fcst.type.isin(
['discontinued-products', 'banned', ''])) &
(drug_missed_fcst['last_sale_date'] >
(reset_date - timedelta(days=days)))
].sort_values('last_sale_date')
logger.info('Missing drug list' + str(drug_missed_fcst_list))
return 0
def get_product_list(rs_db):
'''Getting product list to be kept in warehousee'''
# TODO - IN FUTURE TO BE COMIING FROM WMS DB
wh_drug_list_query = '''
select
wssm."drug-id" as drug_id,
d."drug-name" drug_name,
d."type",
d.category,
d.company,
'NA' as bucket
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products')
and d.company <> 'GOODAID'
'''
wh_drug_list = rs_db.get_df(wh_drug_list_query)
return wh_drug_list
def wh_data_prep(
store_id_list, current_month_date, reset_date, type_list, rs_db, logger,
ss_runtime_var, schema):
'''Getting data prepared for warehouse forecasting'''
# CALLING STORES DATA PREP FOR ALL STORES AS LOGIC IS SAME
last_date = datetime(day=1, month=4, year=2021).date()
next_month_date = datetime(current_month_date.year +
int(current_month_date.month / 12),
((current_month_date.month % 12) + 1), 1).date()
_, drug_sales_monthly, _, demand_daily_deviation = forecast_data_prep(
store_id_list, type_list, reset_date, rs_db, schema, logger, last_date=None,
is_wh='Y')
# GETTING PRODUCT LIST
wh_drug_list = get_product_list(rs_db)
logger.info('# of Drugs in WH list' + str(len(wh_drug_list)))
# FILTERING OUT DRUG ID NOT CONSIDERED IN ABX-XYZ CLASSIFICATION
drug_sales_monthly = drug_sales_monthly[
drug_sales_monthly.drug_id.isin(wh_drug_list['drug_id'])]
# Extrapolate current month's sales but with condition
if ss_runtime_var['for_next_month'] == 'Y':
if ss_runtime_var['debug_mode'] == 'Y':
curr_day = pd.to_datetime(reset_date).day - 1
curr_month_days = monthrange(
current_month_date.year, current_month_date.month)[1]
else:
curr_day = datetime.now().day - 1
curr_month_days = monthrange(
current_month_date.year, current_month_date.month)[1]
drug_sales_monthly['net_sales_quantity'] = np.where(
drug_sales_monthly['month_begin_dt'] == str(current_month_date),
round(drug_sales_monthly['net_sales_quantity'] *
curr_month_days / curr_day),
drug_sales_monthly['net_sales_quantity'])
else:
drug_sales_monthly = drug_sales_monthly[
drug_sales_monthly['month_begin_dt'] != str(current_month_date)]
# DATA CHECKS
_ = data_checks(
drug_sales_monthly, wh_drug_list, current_month_date, logger, rs_db)
# FILTERING OUT LENGTH OF TIME SERIES BASED ON FIRST BILL DATE
drug_list = drug_sales_monthly.drug_id.unique()
bill_date_query = '''
select
i."drug-id" as drug_id,
min(date(bi."created-at")) as "first_bill_date"
from
"prod2-generico"."bill-items-1" bi
join "prod2-generico"."inventory-1" i on
i.id = bi."inventory-id"
where
i."drug-id" in {}
group by
i."drug-id"
'''.format(tuple(drug_list) + (0, 0))
bill_date = rs_db.get_df(bill_date_query)
bill_date['first_bill_date'] = pd.to_datetime(bill_date['first_bill_date'])
bill_date['bill_month'] = [
datetime(b_date.year, b_date.month, 1).date()
for b_date in bill_date['first_bill_date']]
# TAKING HISTORY FROM THE POINT FIRST SALE IS MADE
drug_sales_monthly = drug_sales_monthly.merge(
bill_date, how='left', on='drug_id')
assert sum(drug_sales_monthly['first_bill_date'].isna()) == 0
drug_sales_monthly = drug_sales_monthly.query(
'month_begin_dt >= bill_month')
# EXPLORING HISTORY OF DRUGS
drug_history = drug_sales_monthly. \
groupby('drug_id')['net_sales_quantity'].count().reset_index()
drug_history.columns = ['drug_id', 'month_history']
logger.info('Total Drugs' + str(len(drug_history)))
logger.info('History >= 12 months' + str(
len(drug_history.query('month_history >=12'))))
logger.info('History 3-11 months' + str(
len(drug_history.query('month_history < 12').
query('month_history >=3'))))
logger.info('History < 3 months' + str(
len(drug_history.query('month_history < 3'))))
return drug_sales_monthly, wh_drug_list, drug_history, demand_daily_deviation
def get_launch_stock_per_store(rs_db, days, reset_date):
new_stores_list_query = """
select
id as store_id,
date("opened-at") as opened_at
from
"prod2-generico".stores s
where
"opened-at" >= '{reset_date}' - {days}
and "opened-at" <= '{reset_date}'
and id not in (281, 297)
and "franchisee-id" = 1
""".format(reset_date=reset_date, days=days)
new_stores_list = rs_db.get_df(new_stores_list_query)
store_ids_list = tuple(new_stores_list['store_id'].astype(str))+('0','0')
# get shortbook launch orders
sb_orders_query = '''
select
distinct sb."store-id" as store_id,
sb."drug-id" as drug_id,
date(sb."created-at") as created_at,
sb.quantity as ordered_quantity,
date(s2."opened-at") as opened_at
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico".stores s2 on
s2.id = sb."store-id"
where
"store-id" in {store_ids}
and date(sb."created-at") < date(s2."opened-at")
'''.format(store_ids=store_ids_list, days=days)
sb_orders = rs_db.get_df(sb_orders_query)
wh_drug_list = get_product_list(rs_db)
df = sb_orders.copy()
df = df[df['drug_id'].isin(wh_drug_list['drug_id'])]
df = df[['store_id', 'drug_id', 'ordered_quantity']]
df.drop_duplicates(inplace=True)
new_stores_count = sb_orders['store_id'].nunique()
df = df[['drug_id', 'ordered_quantity']]
launch_stock = df.groupby('drug_id').sum().reset_index()
launch_stock_per_store = launch_stock.copy()
launch_stock_per_store['ordered_quantity'] = \
launch_stock['ordered_quantity'] / new_stores_count
launch_stock_per_store.rename(
columns={'ordered_quantity': 'launch_stock_per_store'}, inplace=True)
return launch_stock_per_store | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/data_prep/wh_data_prep.py | wh_data_prep.py |
import numpy as np
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import make_future_df,\
ape_calc, ae_calc
from fbprophet import Prophet
# prophet train
def prophet_train_monthly(
df, n_changepoints_factor=4, changepoint_prior_scale=0.2, growth='linear',
changepoint_range=1, interval_width=0.68, mcmc_samples=0, horizon=3,
out_of_sample=3):
# params
n_changepoints = int(np.round(len(df)/n_changepoints_factor))
# dividing the series into train and validation set
df = df.copy()
df['days'] = df['month_begin_dt'].dt.daysinmonth
df.rename(columns={'month_begin_dt': 'ds', 'net_sales_quantity': 'y'},
inplace=True)
train_df = df.drop(df.tail(out_of_sample).index)
validation_df = df.tail(out_of_sample)
# model building
model = Prophet(
growth=growth,
n_changepoints=n_changepoints,
changepoint_prior_scale=changepoint_prior_scale,
changepoint_range=changepoint_range,
interval_width=interval_width,
mcmc_samples=mcmc_samples,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False)
model.add_seasonality(name='yearly_e', period=365.25, fourier_order=12)
model.add_regressor(name='days', mode='multiplicative')
fit = model.fit(train_df)
validation_fcst = fit.predict(validation_df)[[
'yhat', 'yhat_upper', 'yhat_lower']]
# calculating standard deviation of additive terms and tremd
validation_std = (
validation_fcst['yhat_upper'].values -
validation_fcst['yhat_lower'].values)
# writing to final df
predict_df = validation_df
predict_df['fcst'] = np.round(validation_fcst['yhat'].values)
predict_df['std'] = np.round(validation_std)
# calculating errors
predict_df['ape'] = [
ape_calc(actual, forecast)
for actual, forecast in zip(predict_df['y'], predict_df['fcst'])]
predict_df['ae'] = [
ae_calc(actual, forecast)
for actual, forecast in zip(predict_df['y'], predict_df['fcst'])]
predict_df.rename(columns={'ds': 'month_begin_dt', 'y': 'actual'},
inplace=True)
predict_df.drop('days', axis=1, inplace=True)
return predict_df # , fit
# prophet train
def prophet_predict_monthly(
df, n_changepoints_factor=4, changepoint_prior_scale=0.2, growth='linear',
changepoint_range=1, interval_width=0.68, mcmc_samples=0, horizon=3,
out_of_sample=3):
# params
n_changepoints = int(np.round(len(df)/n_changepoints_factor))
# creating predict df
df = df.copy()
df['days'] = df['month_begin_dt'].dt.daysinmonth
predict_df = make_future_df(df, out_of_sample)
predict_df['days'] = predict_df['month_begin_dt'].dt.daysinmonth
# column name change for prophet
df.rename(columns={'month_begin_dt': 'ds', 'net_sales_quantity': 'y'},
inplace=True)
predict_df.rename(
columns={'month_begin_dt': 'ds', 'net_sales_quantity': 'y'},
inplace=True)
# model building
model = Prophet(
growth=growth,
n_changepoints=n_changepoints,
changepoint_prior_scale=changepoint_prior_scale,
changepoint_range=changepoint_range,
interval_width=interval_width,
mcmc_samples=mcmc_samples,
yearly_seasonality=True,
weekly_seasonality=False,
daily_seasonality=False)
fit = model.fit(df)
forecast = fit.predict(predict_df)[[
'yhat', 'yhat_upper', 'yhat_lower']]
# calculating standard deviation of additive terms and tremd
forecast_std = (
forecast['yhat_upper'].values - forecast['yhat_lower'].values)
# writing to final df
predict_df['fcst'] = np.round(forecast['yhat'].values)
predict_df['std'] = np.round(forecast_std)
predict_df.rename(columns={'ds': 'month_begin_dt', 'y': 'actual'},
inplace=True)
predict_df.drop('days', axis=1, inplace=True)
return predict_df # , fit | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/forecast/prophet.py | prophet.py |
import pandas as pd
from dateutil.relativedelta import relativedelta
from joblib import Parallel, delayed
from multiprocessing import cpu_count
# weekly vs monthly demand pattern
def month_week_plt(monthly_data, weekly_data, drug_id, drug_name, bucket):
week = weekly_data.loc[
weekly_data['drug_id'] == drug_id]
week.rename(columns={'week_begin_dt': 'date'}, inplace=True)
ax = week[['date', 'net_sales_quantity']].set_index('date').plot()
ax.set_title('{} {} {}'.format(drug_id, drug_name, bucket), )
month = monthly_data.loc[
monthly_data['drug_id'] == drug_id]
month.rename(columns={'month_begin_dt': 'date'}, inplace=True)
ax = month[['date', 'net_sales_quantity']].set_index('date').plot()
ax.set_title('{} {} {}'.format(drug_id, drug_name, bucket))
return 0
# make forward looking data frame for forecast
def make_future_df(df, horizon):
df = df.copy()
drug_id = df['drug_id'].values[-1]
# prev_month_dt = df['month_begin_dt'].dt.date.values[-1]
prev_month_dt = pd.to_datetime(df['month_begin_dt'].values[-1])
if horizon == 3:
predict_month_dt = [
prev_month_dt + relativedelta(months=h)
for h in range(1, horizon + 1)]
predict_year = [
(prev_month_dt + relativedelta(months=h)).year
for h in range(1, horizon + 1)]
predict_month = [
(prev_month_dt + relativedelta(months=h)).month
for h in range(1, horizon + 1)]
else:
predict_month_dt = [
prev_month_dt + relativedelta(days=28*h)
for h in range(1, horizon + 1)]
predict_year = [
(prev_month_dt + relativedelta(days=28*h)).year
for h in range(1, horizon + 1)]
predict_month = [
(prev_month_dt + relativedelta(days=28*h)).month
for h in range(1, horizon + 1)]
predict_df = pd.DataFrame()
predict_df['drug_id'] = pd.Series([drug_id] * horizon)
predict_df['month_begin_dt'] = pd.to_datetime(pd.Series(predict_month_dt))
predict_df['year'] = pd.Series(predict_year)
predict_df['month'] = pd.Series(predict_month)
predict_df['fcst'] = 0
return predict_df
# forecast visualisation
def forecast_viz(train, forecast, drug_id, drug_name, bucket, model, k=3):
train = train.copy()
forecast = forecast.copy()
train = train[['drug_id', 'month_begin_dt', 'net_sales_quantity']]
foreacast = forecast[['drug_id', 'month_begin_dt', 'fcst']]
merged = train.merge(
foreacast, how='outer', on=['drug_id', 'month_begin_dt'])
merged.drop('drug_id', axis=1, inplace=True)
ax = merged.set_index('month_begin_dt').plot()
ax.set_title('{} {} {} {}'.format(drug_id, drug_name, bucket, model))
return 0
# parallel thread execution
def apply_parallel_ets(
dfGrouped, func, ets_params, horizon=3, out_of_sample=3):
retLst = Parallel(n_jobs=cpu_count() - 4, verbose=10)(
delayed(func)(
group, ets_params, horizon, out_of_sample)
for name, group in dfGrouped)
return pd.concat(retLst)
def apply_parallel_prophet(
dfGrouped, func, n_changepoints_factor, changepoint_prior_scale,
growth, changepoint_range, interval_width, mcmc_samples, horizon,
out_of_sample):
retLst = Parallel(n_jobs=cpu_count() - 4, verbose=10)(
delayed(func)(
group, n_changepoints_factor, changepoint_prior_scale, growth,
changepoint_range, interval_width, mcmc_samples, horizon,
out_of_sample)
for name, group in dfGrouped)
return pd.concat(retLst) | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/forecast/helper_functions.py | helper_functions.py |
import time
import pandas as pd
import numpy as np
from itertools import product
from zeno_etl_libs.utils.warehouse.forecast.errors import train_error, \
train_error_ets_h1
from zeno_etl_libs.utils.warehouse.forecast.moving_average import \
ma_train_monthly, \
ma_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.ets import ets_train_monthly, \
ets_predict_monthly
# from zeno_etl_libs.utils.warehouse.forecast.prophet import prophet_train_monthly,\
# prophet_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.naive import naive_predict_monthly
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import \
apply_parallel_ets
# from scripts.ops.warehouse.forecast.\
# helper_functions import apply_parallel_prophet
def wh_forecast(drug_sales_monthly, wh_drug_list, drug_history, logger=None):
"""
Bucketing based on History
1. For drugs with history < 3 months -> Naive
2. For drugs with history 3-11 month -> MA, SES (Simple exponential smoothing)
3. For drugs with history >= 12 months -> MA, ETS (Error, Trend,Seasonality)
"""
# BUCKET BASED ON HISTORY
bucket_h3 = drug_history[drug_history['month_history'] < 3]
bucket_h2minus = drug_history[
(drug_history['month_history'] >= 3) &
(drug_history['month_history'] <= 5)]
bucket_h2 = drug_history[
(drug_history['month_history'] >= 6) &
(drug_history['month_history'] < 12)]
bucket_h1 = drug_history[drug_history['month_history'] >= 12]
bucket_log = '''
Bucket H1 12+ months history - {},
Bucket H2 6-11 months history - {},
Bucket H2- 3-5 months history - {},
Bucket H3 <3 months history - {}'''.format(
len(bucket_h1), len(bucket_h2), len(bucket_h2minus), len(bucket_h3)
)
logger.info(bucket_log)
# SUBSETTING SALE HISTORY DATA FOR BUCKETS
drug_sales_monthly_bucket_h1 = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h1['drug_id'])]
drug_sales_monthly_bucket_h2 = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h2['drug_id'])]
drug_sales_monthly_bucket_h2minus = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h2minus['drug_id'])]
drug_sales_monthly_bucket_h3 = drug_sales_monthly[
drug_sales_monthly['drug_id'].isin(bucket_h3['drug_id'])]
''' H1 bucket - Train and Forecast'''
logger.info(
'Drugs for training' +
str(drug_sales_monthly_bucket_h1.drug_id.nunique()))
# FORECASTING MODULES: MOVING AVERAGES K=3
ma_train_data_h1 = drug_sales_monthly_bucket_h1.copy()
ma_train_data_h1 = ma_train_data_h1[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
# k = 3 # N moving average
horizon = 3 # future forecast
# train
start = time.time()
ma_train_h1 = ma_train_data_h1.groupby('drug_id').apply(ma_train_monthly). \
reset_index(drop=True)
end = time.time()
logger.info('H1 MA Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ma_train_error_h1 = ma_train_h1.groupby('drug_id').apply(train_error). \
reset_index(drop=True)
end = time.time()
logger.info('H1 MA Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ma_predict_h1 = ma_train_data_h1.groupby('drug_id'). \
apply(ma_predict_monthly).reset_index(drop=True)
end = time.time()
logger.info('H1 MA Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# FORECASTING MODULES: EXPONENTIAL SMOOTHING
ets_train_data_h1 = drug_sales_monthly_bucket_h1.copy()
ets_train_data_h1 = ets_train_data_h1[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
horizon = 3 # future forecast
out_of_sample = 3 # out of sample forecast
# holts winter implementation
trend = ['additive', None]
seasonal = ['additive', None]
damped = [True, False]
seasonal_periods = [12]
use_boxcox = [True, False]
ets_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
# train
start = time.time()
ets_train_h1 = apply_parallel_ets(
ets_train_data_h1.groupby('drug_id'), ets_train_monthly,
ets_params).reset_index(drop=True)
end = time.time()
logger.info('H1 ETS Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ets_train_error_h1 = ets_train_h1.groupby('drug_id').apply(
train_error_ets_h1). \
reset_index(drop=True)
end = time.time()
logger.info('H1 ETS Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ets_predict_h1 = apply_parallel_ets(
ets_train_data_h1.groupby('drug_id'), ets_predict_monthly,
ets_train_h1).reset_index(drop=True)
end = time.time()
logger.info('H1 ETS Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
''' # TODO - PROPHET TO BE INTG. LATER
# FORECASTING MODULES: PROPHET
prophet_train_data_h1 = drug_sales_monthly_bucket_h1.copy()
prophet_train_data_h1 = prophet_train_data_h1[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
horizon = 3 # future forecast
# holts winter implementation
n_changepoints_factor = 4
changepoint_prior_scale = 0.2
growth = 'linear'
changepoint_range = 1
interval_width = 0.68
mcmc_samples = 0
# train
start = time.time()
prophet_train_h1 = apply_parallel_prophet(
prophet_train_data_h1.groupby('drug_id'), prophet_train_monthly,
n_changepoints_factor, changepoint_prior_scale, growth,
changepoint_range, interval_width, mcmc_samples, horizon, out_of_sample
).reset_index(drop=True)
end = time.time()
logger.info(
'H1 Prophet Train: Run time ' + str(round(end-start, 2)) + 'secs')
# train error
start = time.time()
prophet_train_error_h1 = prophet_train_h1.groupby('drug_id').\
apply(train_error).reset_index(drop=True)
end = time.time()
logger.info(
'H1 Prophet Error: Run time ' + str(round(end-start, 2)) + 'secs')
# predict
start = time.time()
prophet_predict_h1 = apply_parallel_prophet(
prophet_train_data_h1.groupby('drug_id'), prophet_predict_monthly,
n_changepoints_factor, changepoint_prior_scale, growth,
changepoint_range, interval_width, mcmc_samples, horizon, out_of_sample
).reset_index(drop=True)
end = time.time()
logger.info(
'H1 Prophet Fcst: Run time ' + str(round(end-start, 2)) + 'secs')
'''
# FORECASTING MODULE - ENSEMBLE
# identifying best model for each drug - using MA and ETS
ensemble_error_h1 = ets_train_error_h1.merge(
ma_train_error_h1, how='outer', on='drug_id', suffixes=('_ets', '_ma'))
ensemble_error_h1['model'] = np.where(
ensemble_error_h1['mape_ma'] < ensemble_error_h1['mape_ets'],
'ma', 'ets')
# choosing ma where SS days for ets is crossing 1 month
if ensemble_error_h1.loc[0]['model'] == 'ma':
ensemble_error_h1['ss_days_ets'] = 14.84 * ensemble_error_h1['std'] / \
ensemble_error_h1['actual']
else:
ensemble_error_h1['ss_days_ets'] = 14.84 * ensemble_error_h1['std'] / \
ensemble_error_h1['actual']
ensemble_error_h1['model'] = np.where(ensemble_error_h1['ss_days_ets'] > 28,
'ma', 'ets')
ensemble_error_h1.loc[np.isnan(ensemble_error_h1['std']), 'model'] = 'ma'
del ensemble_error_h1['actual']
del ensemble_error_h1['std']
del ensemble_error_h1['ss_days_ets']
del ets_train_error_h1['actual']
del ets_train_error_h1['std']
ensemble_error_h1['mape'] = np.where(
ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['mape_ma'], ensemble_error_h1['mape_ets'])
ensemble_error_h1['mae'] = np.where(
ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['mae_ma'], ensemble_error_h1['mae_ets'])
# creating ensemble dataframe for best model - MA + ETS
ma_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ma', 'drug_id']
ets_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ets', 'drug_id']
ma_train_best_h1 = ma_train_h1[
ma_train_h1['drug_id'].isin(ma_drug_best_h1)]
ma_predict_best_h1 = ma_predict_h1[
ma_predict_h1['drug_id'].isin(ma_drug_best_h1)]
ma_train_best_h1['model'] = 'ma'
ma_predict_best_h1['model'] = 'ma'
ets_train_best_h1 = ets_train_h1[
ets_train_h1['drug_id'].isin(ets_drug_best_h1)]
ets_predict_best_h1 = ets_predict_h1[
ets_predict_h1['drug_id'].isin(ets_drug_best_h1)]
ets_train_best_h1['model'] = 'ets'
ets_predict_best_h1['model'] = 'ets'
ensemble_train_h1 = pd.concat(
[ma_train_best_h1, ets_train_best_h1], axis=0)
ensemble_predict_h1 = pd.concat(
[ma_predict_best_h1, ets_predict_best_h1], axis=0)
''' # TODO - PROPHET TO BE INTG. LATER
# identifying best model for each drug - using MA, ETS and Prophet
ensemble_error_h1 = ets_train_error_h1.merge(
ma_train_error_h1, how='outer', on='drug_id',
suffixes=('_ets', '_ma')).merge(
prophet_train_error_h1, how='outer', on='drug_id',
suffixes=('', '_prophet'))
ensemble_error_h1.columns = [
'drug_id', 'mae_ets', 'mape_ets', 'mae_ma', 'mape_ma',
'mae_prophet', 'mape_prophet']
ensemble_error_h1['model'] = np.select(
[(ensemble_error_h1['mape_ma'] < ensemble_error_h1['mape_ets']) &
(ensemble_error_h1['mape_ma'] < ensemble_error_h1['mape_prophet']),
(ensemble_error_h1['mape_ets'] < ensemble_error_h1['mape_ma']) &
(ensemble_error_h1['mape_ets'] < ensemble_error_h1['mape_prophet']),
(ensemble_error_h1['mape_prophet'] < ensemble_error_h1['mape_ma']) &
(ensemble_error_h1['mape_prophet'] < ensemble_error_h1['mape_ets'])],
['ma', 'ets', 'prophet'], default='ets')
ensemble_error_h1['mape'] = np.select(
[ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['model'] == 'ets',
ensemble_error_h1['model'] == 'prophet'],
[ensemble_error_h1['mape_ma'],
ensemble_error_h1['mape_ets'],
ensemble_error_h1['mape_prophet']],
default=ensemble_error_h1['mape_ets'])
ensemble_error_h1['mae'] = np.select(
[ensemble_error_h1['model'] == 'ma',
ensemble_error_h1['model'] == 'ets',
ensemble_error_h1['model'] == 'prophet'],
[ensemble_error_h1['mae_ma'],
ensemble_error_h1['mae_ets'],
ensemble_error_h1['mae_prophet']],
default=ensemble_error_h1['mae_ets'])
# creating ensemble dataframe for best model - MA + ETS + Prophet
ma_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ma', 'drug_id']
ets_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'ets', 'drug_id']
prophet_drug_best_h1 = ensemble_error_h1.loc[
ensemble_error_h1['model'] == 'prophet', 'drug_id']
ma_train_best_h1 = ma_train[
ma_train_h1['drug_id'].isin(ma_drug_best_h1)]
ma_predict_best_h1 = ma_predict[
ma_predict['drug_id'].isin(ma_drug_best_h1)]
ma_train_best_h1['model'] = 'ma'
ma_predict_best_h1['model'] = 'ma'
ets_train_best_h1 = ets_train_h1[
ets_train_h1['drug_id'].isin(ets_drug_best_h1)]
ets_predict_best_h1 = ets_predict_h1[
ets_predict_h1['drug_id'].isin(ets_drug_best_h1)]
ets_train_best_h1['model'] = 'ets'
ets_predict_best_h1['model'] = 'ets'
prophet_train_best_h1 = prophet_train_h1[
prophet_train_h1['drug_id'].isin(prophet_drug_best_h1)]
prophet_predict_best_h1 = prophet_predict_h1[
prophet_predict_h1['drug_id'].isin(prophet_drug_best_h1)]
prophet_train_best_h1['model'] = 'prophet'
prophet_predict_best_h1['model'] = 'prophet'
ensemble_train_h1 = pd.concat(
[ma_train_best_h1, ets_train_best_h1, prophet_train_best_h1], axis=0)
ensemble_predict_h1 = pd.concat(
[ma_predict_best_h1, ets_predict_best_h1, prophet_predict_best_h1],
axis=0)
'''
# H1 BUCKET AGGREGATING
ma_train_h1['model'] = 'ma'
ma_train_h1['history_bucket'] = 'H1'
ets_train_h1['model'] = 'ets'
ets_train_h1['history_bucket'] = 'H1'
ma_train_error_h1['model'] = 'ma'
ma_train_error_h1['history_bucket'] = 'H1'
ets_train_error_h1['model'] = 'ets'
ets_train_error_h1['history_bucket'] = 'H1'
ma_predict_h1['model'] = 'ma'
ma_predict_h1['history_bucket'] = 'H1'
ets_predict_h1['model'] = 'ets'
ets_predict_h1['history_bucket'] = 'H1'
train_h1 = pd.concat([ma_train_h1, ets_train_h1], axis=0)
train_error_h1 = pd.concat([ma_train_error_h1, ets_train_error_h1], axis=0)
predict_h1 = pd.concat([ma_predict_h1, ets_predict_h1], axis=0)
train_h1['forecast_type'] = 'train'
train_h1['final_fcst'] = 'N'
train_error_h1['forecast_type'] = 'train'
train_error_h1['final_fcst'] = 'N'
predict_h1['forecast_type'] = 'forecast'
predict_h1['final_fcst'] = 'N'
ensemble_train_h1['forecast_type'] = 'train'
ensemble_train_h1['final_fcst'] = 'Y'
ensemble_train_h1['history_bucket'] = 'H1'
ensemble_error_h1['forecast_type'] = 'train'
ensemble_error_h1['final_fcst'] = 'Y'
ensemble_error_h1['history_bucket'] = 'H1'
ensemble_predict_h1['forecast_type'] = 'forecast'
ensemble_predict_h1['final_fcst'] = 'Y'
ensemble_predict_h1['history_bucket'] = 'H1'
''' H2/H2- bucket - Train and Forecast'''
logger.info(
'Drugs for training' +
str(drug_sales_monthly_bucket_h2.drug_id.nunique()))
# FORECASTING MODULES: MOVING AVERAGES K=3
ma_train_data_h2 = drug_sales_monthly_bucket_h2.copy()
ma_train_data_h2 = ma_train_data_h2[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# model parameters
horizon = 3 # future forecast
# train
start = time.time()
ma_train_h2 = ma_train_data_h2.groupby('drug_id').apply(ma_train_monthly). \
reset_index(drop=True)
end = time.time()
logger.info('H2 MA Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ma_train_error_h2 = ma_train_h2.groupby('drug_id').apply(train_error). \
reset_index(drop=True)
end = time.time()
logger.info('H2 MA Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ma_predict_h2 = ma_train_data_h2.groupby('drug_id'). \
apply(ma_predict_monthly).reset_index(drop=True)
end = time.time()
logger.info('H2 MA Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# FORECASTING MODULES: SIMPLE EXPONENTIAL SMOOTHING
ses_train_data_h2 = drug_sales_monthly_bucket_h2.copy()
ses_train_data_h2 = ses_train_data_h2[
['drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# variables
horizon = 3 # future forecast
out_of_sample = 3 # out of sample forecast
# ses implementation
trend = [None]
seasonal = [None]
damped = [False]
seasonal_periods = [12]
use_boxcox = [False]
ses_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
# train
start = time.time()
ses_train_h2 = apply_parallel_ets(
ses_train_data_h2.groupby('drug_id'), ets_train_monthly, ses_params
).reset_index(drop=True)
end = time.time()
logger.info('H2 ETS Train: Run time ' + str(round(end - start, 2)) + 'secs')
# train error
start = time.time()
ses_train_error_h2 = ses_train_h2.groupby('drug_id').apply(train_error). \
reset_index(drop=True)
end = time.time()
logger.info('H2 ETS Error: Run time ' + str(round(end - start, 2)) + 'secs')
# predict
start = time.time()
ses_predict_h2 = apply_parallel_ets(
ses_train_data_h2.groupby('drug_id'), ets_predict_monthly,
ses_train_h2).reset_index(drop=True)
end = time.time()
logger.info('H2 ETS Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# FORECASTING MODULE - ENSEMBLE
# identifying best model for each drug - using MA and SES
ensemble_error_h2 = ses_train_error_h2.merge(
ma_train_error_h2, how='outer', on='drug_id', suffixes=('_ses', '_ma'))
ensemble_error_h2['model'] = np.where(
ensemble_error_h2['mape_ma'] < ensemble_error_h2['mape_ses'],
'ma', 'ses')
ensemble_error_h2['mape'] = np.where(
ensemble_error_h2['model'] == 'ma',
ensemble_error_h2['mape_ma'], ensemble_error_h2['mape_ses'])
ensemble_error_h2['mae'] = np.where(
ensemble_error_h2['model'] == 'ma',
ensemble_error_h2['mae_ma'], ensemble_error_h2['mae_ses'])
# creating ensemble dataframe for best_h2 model - MA + ses
ma_drug_best_h2 = ensemble_error_h2.loc[
ensemble_error_h2['model'] == 'ma', 'drug_id']
ses_drug_best_h2 = ensemble_error_h2.loc[
ensemble_error_h2['model'] == 'ses', 'drug_id']
ma_train_best_h2 = ma_train_h2[
ma_train_h2['drug_id'].isin(ma_drug_best_h2)]
ma_predict_best_h2 = ma_predict_h2[
ma_predict_h2['drug_id'].isin(ma_drug_best_h2)]
ma_train_best_h2['model'] = 'ma'
ma_predict_best_h2['model'] = 'ma'
ses_train_best_h2 = ses_train_h2[
ses_train_h2['drug_id'].isin(ses_drug_best_h2)]
ses_predict_best_h2 = ses_predict_h2[
ses_predict_h2['drug_id'].isin(ses_drug_best_h2)]
ses_train_best_h2['model'] = 'ses'
ses_predict_best_h2['model'] = 'ses'
ensemble_train_h2 = pd.concat(
[ma_train_best_h2, ses_train_best_h2], axis=0)
# getting best model for H2- bucket
ensemble_model_agg = ensemble_error_h2.groupby('model')['drug_id']. \
count().reset_index()
ensemble_model_best_h2 = ensemble_model_agg.loc[
ensemble_model_agg['drug_id'] == ensemble_model_agg['drug_id'].max(),
'model'].values[0]
logger.info('Best model for H2 forecast' + ensemble_model_best_h2)
# H2 minus bucket predic based on the best_h2 model overall
train_data_h2minus = drug_sales_monthly_bucket_h2minus.copy()
predict_h2minus = pd.DataFrame()
start = time.time()
if ensemble_model_best_h2 == 'ses' and len(drug_sales_monthly_bucket_h2minus):
start = time.time()
train_data_h2minus['hyper_params'] = str(ses_params[0])
predict_h2minus = apply_parallel_ets(
train_data_h2minus.groupby('drug_id'), ets_predict_monthly,
train_data_h2minus). \
reset_index(drop=True)
if ensemble_model_best_h2 == 'ma':
start = time.time()
predict_h2minus = train_data_h2minus.groupby('drug_id'). \
apply(ma_predict_monthly).reset_index(drop=True)
predict_h2minus['model'] = ensemble_model_best_h2
end = time.time()
logger.info(
'H2 Minus Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
ensemble_predict_h2 = pd.concat(
[ma_predict_best_h2, ses_predict_best_h2, predict_h2minus], axis=0)
# H2 BUCKET AGGREGATING
ma_train_h2['model'] = 'ma'
ma_train_h2['history_bucket'] = 'H2'
ses_train_h2['model'] = 'ses'
ses_train_h2['history_bucket'] = 'H2'
ma_train_error_h2['model'] = 'ma'
ma_train_error_h2['history_bucket'] = 'H2'
ses_train_error_h2['model'] = 'ses'
ses_train_error_h2['history_bucket'] = 'H2'
ma_predict_h2['model'] = 'ma'
ma_predict_h2['history_bucket'] = 'H2'
ses_predict_h2['model'] = 'ses'
ses_predict_h2['history_bucket'] = 'H2'
train_h2 = pd.concat([ma_train_h2, ses_train_h2], axis=0)
train_error_h2 = pd.concat([ma_train_error_h2, ses_train_error_h2], axis=0)
predict_h2 = pd.concat([ma_predict_h2, ses_predict_h2], axis=0)
train_h2['forecast_type'] = 'train'
train_h2['final_fcst'] = 'N'
train_error_h2['forecast_type'] = 'train'
train_error_h2['final_fcst'] = 'N'
predict_h2['forecast_type'] = 'forecast'
predict_h2['final_fcst'] = 'N'
ensemble_train_h2['forecast_type'] = 'train'
ensemble_train_h2['final_fcst'] = 'Y'
ensemble_train_h2['history_bucket'] = 'H2'
ensemble_error_h2['forecast_type'] = 'train'
ensemble_error_h2['final_fcst'] = 'Y'
ensemble_error_h2['history_bucket'] = 'H2'
ensemble_predict_h2['forecast_type'] = 'forecast'
ensemble_predict_h2['final_fcst'] = 'Y'
ensemble_predict_h2['history_bucket'] = 'H2'
''' H3- bucket - Train and Forecast'''
logger.info(
'Drugs for training' +
str(drug_sales_monthly_bucket_h2.drug_id.nunique()))
# FORECASTING MODULES: NAIVE
naive_train_data_h3 = drug_sales_monthly_bucket_h3.copy()
naive_train_data_h3 = naive_train_data_h3[[
'drug_id', 'month_begin_dt', 'year', 'month', 'net_sales_quantity']]
# predict
start = time.time()
naive_predict_h3 = naive_train_data_h3.groupby('drug_id'). \
apply(naive_predict_monthly, horizon).reset_index(drop=True)
end = time.time()
logger.info(
'H3 Naive Fcst: Run time ' + str(round(end - start, 2)) + 'secs')
# H3 BUCKET AGGREGATING
naive_predict_h3['model'] = 'naive'
naive_predict_h3['history_bucket'] = 'H3'
predict_h3 = naive_predict_h3.copy()
predict_h3['forecast_type'] = 'forecast'
predict_h3['final_fcst'] = 'N'
ensemble_predict_h3 = naive_predict_h3.copy()
ensemble_predict_h3['forecast_type'] = 'forecast'
ensemble_predict_h3['final_fcst'] = 'Y'
''' AGG. TRAIN/ERROR/FORECAST TABLES '''
train = pd.concat([train_h1, train_h2], axis=0)
error = pd.concat([train_error_h1, train_error_h2], axis=0)
predict = pd.concat([predict_h1, predict_h2, predict_h3], axis=0)
ensemble_train = pd.concat([ensemble_train_h1, ensemble_train_h2], axis=0)
ensemble_error = pd.concat([ensemble_error_h1, ensemble_error_h2], axis=0)
ensemble_predict = pd.concat(
[ensemble_predict_h1, ensemble_predict_h2, ensemble_predict_h3],
axis=0)
# Letting code to not fail when h3 bucket is empty
if 'net_sales_quantity' in predict.columns:
del predict['net_sales_quantity']
if 'net_sales_quantity' in ensemble_predict.columns:
del ensemble_predict['net_sales_quantity']
# converting data to str objection
train['month_begin_dt'] = train['month_begin_dt']. \
dt.date.astype(str)
predict['month_begin_dt'] = predict['month_begin_dt']. \
dt.date.astype(str)
ensemble_train['month_begin_dt'] = ensemble_train['month_begin_dt']. \
dt.date.astype(str)
ensemble_predict['month_begin_dt'] = ensemble_predict['month_begin_dt']. \
dt.date.astype(str)
return train, error, predict, ensemble_train, ensemble_error, \
ensemble_predict | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/forecast/forecast_main.py | forecast_main.py |
import numpy as np
from zeno_etl_libs.utils.warehouse.forecast.helper_functions import make_future_df
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc
from statsmodels.tsa.holtwinters import ExponentialSmoothing
# ets train
def ets_train_monthly(df, ets_params, horizon=3, out_of_sample=3, logger=None):
train = df.copy()
train.drop(train.tail(out_of_sample).index, inplace=True)
# dividing the series into train and validation set
drug_id = train['drug_id'].values[0]
input_series = train['net_sales_quantity'].values
validation = df['net_sales_quantity'].tail(out_of_sample).values
# creating dummy best fit param and fit values
best_fit = None
best_fit_params = [None, None, False, None, False]
best_accuracy = np.inf
best_ape = [0]*horizon
best_ae = [0]*horizon
# best_fittedvalues = [0]*len(train)
# best_fcastvalues = [0]*horizon
# running a loop for grid search
for params in ets_params:
trend, seasonal, damped, seasonal_periods, use_boxcox = params
try:
ape = []
ae = []
# model fitting
model = ExponentialSmoothing(
input_series, trend=trend, seasonal=seasonal, damped=damped,
seasonal_periods=seasonal_periods, use_boxcox=use_boxcox)
fit = model.fit(optimized=True)
# accuracy parameter can be - aic, bic, sse or mape
forecast = np.round(fit.forecast(horizon))
# print(forecast)
ape = [
ape_calc(actual, forecast)
for actual, forecast in zip(validation, forecast)]
ae = [
ae_calc(actual, forecast)
for actual, forecast in zip(validation, forecast)]
fit_mape = np.mean(ape)
# fit_mae = np.mean(ae)
# fitted_values = fit.fittedvalues
# identifying the best fit params
if (fit_mape <= best_accuracy) & (fit_mape != -np.inf):
best_fit = fit
best_fit_params = params
best_accuracy = fit_mape
best_ape = ape
best_ae = ae
# best_fittedvalues = fitted_values
best_forecast = forecast
except Exception as error:
# print(params,':', error)
error_str = '''Drug {} Params {} Error: {}'''.format(
drug_id, str(params), error)
# logger.info(error_str)
pass
# creating out of output dataset
predict_df = make_future_df(train, out_of_sample)
# getting forecast deviation sigma = sse*(1 + alpha^2(h-1))/n holts methods
alpha = best_fit.params['smoothing_level']
std = np.round(
np.sqrt(best_fit.sse*(1 + alpha * alpha * (horizon-1)) /
len(best_fit.fittedvalues)))
predict_df['fcst'] = best_forecast
predict_df['std'] = std
predict_df['actual'] = validation
# model variables
predict_df['ape'] = best_ape
predict_df['ae'] = best_ae
predict_df['hyper_params'] = str(best_fit_params)
return predict_df
# ets predict
def ets_predict_monthly(df, ets_train, horizon=3, out_of_sample=3):
df = df.copy()
print("running for drug_id --> " + str(df['drug_id'].unique()[0]))
fit_params = ets_train[ets_train['drug_id']==df['drug_id'].unique()[0]]
fit_params = tuple(eval(fit_params['hyper_params'].values[0]))
series = df['net_sales_quantity'].values
# getting best fit params for forecast
trend, seasonal, damped, seasonal_periods, use_boxcox = fit_params
# creating model instance
try:
model = ExponentialSmoothing(
series, trend=trend, seasonal=seasonal, damped=damped,
seasonal_periods=seasonal_periods, use_boxcox=use_boxcox)
fit = model.fit(optimized=True)
if np.isnan(fit.sse) == True or fit.forecast(horizon)[0] < 0 or \
(series[-1] > 0 and fit.forecast(horizon)[0] > 0 and
(0.33 > series[-1]/fit.forecast(horizon)[0] or
series[-1]/fit.forecast(horizon)[0] > 3)):
raise Exception(
'ets hyperparams giving outliers for drug_id: ' \
+ str(df['drug_id'].unique()[0]) + \
' running model with default params')
except Exception as error:
model = ExponentialSmoothing(
series, trend=None, seasonal=None, damped=False,
seasonal_periods=seasonal_periods, use_boxcox=False)
fit = model.fit(optimized=True)
print(error)
# creating out of output dataset
predict_df = make_future_df(df, horizon)
predict_df['fcst'] = np.round(fit.forecast(horizon))
# getting forecast deviation sigma = sse*(1 + alpha^2(h-1))/n holts methods
alpha = fit.params['smoothing_level']
std = np.round(
np.sqrt(fit.sse*(1 + alpha * alpha * (horizon-1)) /
len(fit.fittedvalues)))
predict_df['std'] = std
return predict_df | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/forecast/ets.py | ets.py |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# absolute percentage error calculation
def ape_calc(actual, forecast):
if (actual == 0) & (forecast == 0):
ape = 0
elif forecast == 0:
ape = 1
elif actual == 0:
ape = 1
else:
ape = abs((forecast - actual)/actual)
return ape
# abs error calculation
def ae_calc(actual, forecast):
if (actual == 0) & (forecast == 0):
ae = 0
elif forecast == 0:
ae = actual
elif actual == 0:
ae = forecast
else:
ae = abs(forecast - actual)
return ae
# weighted mape calculation
def wmape(actual, forecast):
wmape = sum(abs(forecast-actual))/sum(actual)
return round(100*wmape, 1)
# avg mape, ape for the forecast horizon
def train_error(train_df):
drug_id = train_df['drug_id'].values[-1]
mae = np.mean(train_df['ae'])
mape = np.mean(train_df['ape'])
return pd.DataFrame(
[[drug_id, mae, mape]], columns=['drug_id', 'mae', 'mape'])
def train_error_ets_h1(train_df):
drug_id = train_df['drug_id'].values[-1]
mae = np.mean(train_df['ae'])
mape = np.mean(train_df['ape'])
actual = np.mean(train_df['actual'])
std = np.mean(train_df['std'])
return pd.DataFrame(
[[drug_id, mae, mape, actual, std]], columns=['drug_id', 'mae', 'mape', 'actual', 'std'])
# error reporting overall
def error_report(error_df, wh_drug_list, drug_history):
print('MAE and MAPE error')
error_df = error_df.copy()
error_df['mape'] = np.round(error_df['mape'] * 100, 1)
print(np.round(error_df.mae.mean()), error_df.mape.mean(), '\n')
print('MAPE describe')
print(error_df['mape'].describe(), '\n')
print('MAPE Plots')
fig, ax = plt.subplots()
error_df['mape'].hist(ax=ax, bins=100, bottom=0.05)
ax.set_yscale('log')
ax.set_ylabel('# of SKUs')
ax.set_xlabel('MAPE')
print(' ', '\n')
print('MAPE where error % > 100%')
print(error_df.query('mape > 1').sort_values('mape')['mape'].mean(), '\n')
print('MAE describe')
print(error_df['mae'].describe(), '\n')
print('MAE Plots')
fig, ax = plt.subplots()
error_df['mae'].hist(ax=ax, bins=100, bottom=0.05)
ax.set_ylabel('# of SKUs')
ax.set_xlabel('MAE')
ax.set_yscale('log')
print('ERROR MAPPING WITH BUCKETS AND HISTORY')
error_bucket = error_df.merge(
wh_drug_list[['drug_id', 'bucket']], on='drug_id').\
merge(drug_history, on='drug_id')
fig, ax = plt.subplots()
error_bucket.groupby('month_history')['mape'].mean().plot()
ax.set_ylabel('MAPE')
ax.set_xlabel('Available history')
print(error_bucket.groupby('bucket')['mape'].mean())
return 0
def error_report_monthly(train_data, wh_drug_list, drug_history):
train_data = train_data.copy()
train_data['ape'] = np.round(train_data['ape'] * 100, 1)
train_data['out_month'] = train_data.\
groupby('drug_id')['month_begin_dt'].rank()
print('MAE and MAPE error')
print(
train_data.groupby('out_month')['ape'].mean(),
train_data.groupby('out_month')['ae'].mean())
print('MAPE describe')
print(train_data.groupby('out_month')['ape'].describe(), '\n')
print('MAPE Plots')
for month in train_data['out_month'].unique():
train_data_month = train_data[train_data['out_month'] == month]
fig, ax = plt.subplots()
train_data_month['ape'].hist(ax=ax, bins=100, bottom=0.05)
plt.title('MAPE: Month out {}'.format(month))
ax.set_yscale('log')
ax.set_ylabel('# of SKUs')
ax.set_xlabel('APE')
print(' ', '\n')
print('MAPE where error % > 100%')
print(train_data.query('ape > 1').groupby('out_month')['ape'].mean(), '\n')
print('MAE describe')
print(train_data.groupby('out_month')['ae'].describe(), '\n')
print('MAE Plots')
for month in train_data['out_month'].unique():
train_data_month = train_data[train_data['out_month'] == month]
fig, ax = plt.subplots()
train_data_month['ae'].hist(ax=ax, bins=100, bottom=0.05)
plt.title('MAE: Month out {}'.format(month))
ax.set_yscale('log')
ax.set_yscale('log')
ax.set_ylabel('# of SKUs')
ax.set_xlabel('AE')
print(' ', '\n')
print('ERROR MAPPING WITH BUCKETS AND HISTORY')
train_bucket = train_data.merge(
wh_drug_list[['drug_id', 'bucket']], on='drug_id').\
merge(drug_history, on='drug_id')
fig, ax = plt.subplots()
colors = {1: 'red', 2: 'green', 3: 'blue'}
for month in train_bucket['out_month'].unique():
train_bucket_month = train_bucket[train_bucket['out_month'] == month]
train_bucket_month.groupby('month_history')['ape'].mean().\
plot(color=colors[month], title='APE: Month out {}'.format(month),
label=month)
print('APE: Month out {}'.format(month))
print(train_bucket_month.groupby('bucket')['ape'].mean())
plt.title('APE: Month out vs Data history' + str(colors))
ax.set_ylabel('APE')
ax.set_xlabel('Available history')
return 0
# weigheted mape report
def wmape_report(train_data, wh_drug_list, drug_history):
train_data = train_data.copy()
train_data['out_month'] = train_data.\
groupby('drug_id')['month_begin_dt'].rank()
print('wMAPE', wmape(train_data['actual'], train_data['fcst']))
print('Month out wMAPE', train_data.groupby('out_month').
apply(lambda row: wmape(row['actual'], row['fcst'])))
train_bucket = train_data.merge(
wh_drug_list[['drug_id', 'bucket']], on='drug_id').\
merge(drug_history, on='drug_id')
print('Bucket out wMAPE', train_bucket.groupby('bucket').
apply(lambda row: wmape(row['actual'], row['fcst'])))
print('Bucket out 1st Month wMAPE', train_bucket.query('out_month == 1').
groupby('bucket').apply(
lambda row: wmape(row['actual'], row['fcst'])))
return 0 | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/warehouse/forecast/errors.py | errors.py |
import pandas as pd
import numpy as np
import datetime as dt
def ranking_override_dc(features_rank, db, read_schema, logger,
override_type_list=['AS/MS']):
# Get distributor preference list
current_date = dt.date.today().strftime('%Y-%m-%d')
q_preference = """
select "dc-id", "drug-id", "distributor-preference", "distributor-id"
from "{read_schema}"."distributor-ranking-preference"
where "is-active" = 1
and "start-date" < '{0}'
and "end-date" > '{0}'
and "dc-id" is not null
""".format(current_date, read_schema=read_schema)
rank_override = db.get_df(q_preference)
if rank_override.shape[0] != 0:
# Manual rank override logic starts
logger.info(f"Overriding for {override_type_list}")
original_shape = features_rank.shape
features_rank = features_rank.reset_index(drop=True)
rank_override = rank_override.dropna()
rank_override = rank_override.drop_duplicates()
rank_override = rank_override.sort_values(
['dc_id', 'drug_id', 'distributor_preference', 'distributor_id'],
ascending=[True, True, True, True]).reset_index(drop=True)
rank_override_grp = rank_override.groupby(["dc_id", "drug_id"],
as_index=False).agg(
{"distributor_id": dist_order})
rank_override_grp.rename({"distributor_id": "override_dist_order"}, axis=1,
inplace=True)
df_merged = features_rank.merge(rank_override_grp, on=["dc_id", "drug_id"],
how="left")
df_rank_override = df_merged.loc[~df_merged["override_dist_order"].isna()]
df_rank_override = df_rank_override.loc[
df_rank_override["request_type"].isin(override_type_list)]
index_to_drop = df_rank_override.index.values.tolist()
features_rank = features_rank.drop(index_to_drop)
logger.info(f"Number of rows to update ranks: {original_shape[0]-features_rank.shape[0]}")
df_rank_override["final_dist_1"] = df_rank_override["final_dist_1"].fillna(0)
df_rank_override["final_dist_2"] = df_rank_override["final_dist_2"].fillna(0)
df_rank_override["final_dist_3"] = df_rank_override["final_dist_3"].fillna(0)
dist_1 = np.array(df_rank_override["final_dist_1"].astype(int))
dist_2 = np.array(df_rank_override["final_dist_2"].astype(int))
dist_3 = np.array(df_rank_override["final_dist_3"].astype(int))
stacked_dist = np.stack((dist_1, dist_2, dist_3), axis=-1)
df_rank_override["prev_dist_order"] = list(stacked_dist)
order_list = []
for index, row in df_rank_override.iterrows():
eval_string = str(row["override_dist_order"]) + "+" + str(list(row["prev_dist_order"]))
order_list.append(str(eval(eval_string)[:3]).replace('[', '').replace(']', '').replace(' ', ''))
df_rank_override["final_order"] = order_list
df_final_order = df_rank_override['final_order'].str.split(pat=',', expand=True).rename(
columns={0: 'final_dist_1',
1: 'final_dist_2',
2: 'final_dist_3'})
df_final_order["final_dist_1"] = df_final_order["final_dist_1"].astype(int)
df_final_order["final_dist_2"] = df_final_order["final_dist_2"].astype(int)
df_final_order["final_dist_3"] = df_final_order["final_dist_3"].astype(int)
df_final_order = df_final_order.replace({0: np.nan})
df_rank_override["final_dist_1"] = df_final_order["final_dist_1"]
df_rank_override["final_dist_2"] = df_final_order["final_dist_2"]
df_rank_override["final_dist_3"] = df_final_order["final_dist_3"]
df_rank_override.drop(["override_dist_order", "prev_dist_order", "final_order"],
axis=1, inplace=True)
features_rank = features_rank.append(df_rank_override)
features_rank.sort_index(ascending=True, inplace=True)
assert features_rank.shape == original_shape
else:
logger.info("Skipping..: no rank preferences present in table")
return features_rank
def ranking_override_franchisee(features_rank, db, read_schema, logger,
override_type_list=['AS/MS', 'PR']):
# Get distributor preference list
current_date = dt.date.today().strftime('%Y-%m-%d')
q_preference = """
select "dc-id", "drug-id", "distributor-preference", "distributor-id"
from "{read_schema}"."distributor-ranking-preference"
where "is-active" = 1
and start_date < '{0}'
and end_date > '{0}'
and "store-id" is not null
""".format(current_date, read_schema=read_schema)
rank_override = db.get_df(q_preference)
if rank_override.shape[0] != 0:
# Manual rank override logic starts
logger.info(f"Overriding for {override_type_list}")
original_shape = features_rank.shape
features_rank = features_rank.reset_index(drop=True)
rank_override = rank_override.dropna()
rank_override = rank_override.drop_duplicates()
rank_override = rank_override.sort_values(
['store_id', 'drug_id', 'distributor_preference', 'distributor_id'],
ascending=[True, True, True, True]).reset_index(drop=True)
rank_override_grp = rank_override.groupby(["store_id", "drug_id"],
as_index=False).agg(
{"distributor_id": dist_order})
rank_override_grp.rename({"distributor_id": "override_dist_order"}, axis=1,
inplace=True)
df_merged = features_rank.merge(rank_override_grp, on=["store_id", "drug_id"],
how="left")
df_rank_override = df_merged.loc[~df_merged["override_dist_order"].isna()]
df_rank_override = df_rank_override.loc[
df_rank_override["request_type"].isin(override_type_list)]
index_to_drop = df_rank_override.index.values.tolist()
features_rank = features_rank.drop(index_to_drop)
logger.info(f"Number of rows to update ranks: {original_shape[0]-features_rank.shape[0]}")
df_rank_override["final_dist_1"] = df_rank_override["final_dist_1"].fillna(0)
df_rank_override["final_dist_2"] = df_rank_override["final_dist_2"].fillna(0)
df_rank_override["final_dist_3"] = df_rank_override["final_dist_3"].fillna(0)
dist_1 = np.array(df_rank_override["final_dist_1"].astype(int))
dist_2 = np.array(df_rank_override["final_dist_2"].astype(int))
dist_3 = np.array(df_rank_override["final_dist_3"].astype(int))
stacked_dist = np.stack((dist_1, dist_2, dist_3), axis=-1)
df_rank_override["prev_dist_order"] = list(stacked_dist)
order_list = []
for index, row in df_rank_override.iterrows():
eval_string = str(row["override_dist_order"]) + "+" + str(list(row["prev_dist_order"]))
order_list.append(str(eval(eval_string)[:3]).replace('[', '').replace(']', '').replace(' ', ''))
df_rank_override["final_order"] = order_list
df_final_order = df_rank_override['final_order'].str.split(pat=',', expand=True).rename(
columns={0: 'final_dist_1',
1: 'final_dist_2',
2: 'final_dist_3'})
df_final_order["final_dist_1"] = df_final_order["final_dist_1"].astype(int)
df_final_order["final_dist_2"] = df_final_order["final_dist_2"].astype(int)
df_final_order["final_dist_3"] = df_final_order["final_dist_3"].astype(int)
df_final_order = df_final_order.replace({0: np.nan})
df_rank_override["final_dist_1"] = df_final_order["final_dist_1"]
df_rank_override["final_dist_2"] = df_final_order["final_dist_2"]
df_rank_override["final_dist_3"] = df_final_order["final_dist_3"]
df_rank_override.drop(["override_dist_order", "prev_dist_order", "final_order"],
axis=1, inplace=True)
features_rank = features_rank.append(df_rank_override)
features_rank.sort_index(ascending=True, inplace=True)
assert features_rank.shape == original_shape
else:
logger.info("Skipping..: no rank preferences present in table")
return features_rank
def dist_order(pd_arr):
"""To arrange in preference order and avoid duplication"""
pd_arr = list(pd_arr)
dist_list = [i for n, i in enumerate(pd_arr) if i not in pd_arr[:n]]
return dist_list[:3] | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/ranking_intervention.py | ranking_intervention.py |
import pandas as pd
import numpy as np
from sklearn import preprocessing
def apply_topsis(features, x_train, weights, cutoff_percentage, volume_fraction):
''' cutoff percentage is cutoff for determining whether a distributor is low volume or not'''
scaler = preprocessing.MinMaxScaler()
# normalize features
x_normalized = pd.DataFrame(
scaler.fit_transform(x_train), columns=x_train.columns)
# multiply with normalized weights here.
x_weighted = np.multiply(x_normalized, weights)
# merge drug id, dist id and dc id for reference
x_weighted = pd.merge(features[['drug_id', 'partial_distributor_id', 'partial_dc_id']],
x_weighted, left_index=True, right_index=True, how='inner')
# define ideal best vector here
ideal_best = x_weighted.agg({'lead_time': 'min', 'margin': 'max', 'bounce_rate': 'min',
'ff': 'max',
'lost_recency': 'max',
'success_recency': 'min'}).reset_index()
ideal_best = ideal_best.set_index(
'index').rename(columns={0: 'ideal_best'})
# define ideal worse vector here.
ideal_worse = x_weighted.agg({'lead_time': 'max', 'margin':'min',
'bounce_rate': 'max',
'ff': 'min',
'lost_recency': 'min',
'success_recency': 'max'}).reset_index()
ideal_worse = ideal_worse.set_index(
'index').rename(columns={0: 'ideal_worse'})
x_weighted_best = pd.merge(x_weighted.T, ideal_best,
how='left', left_index=True, right_index=True).T
x_weighted_worse = pd.merge(x_weighted.T, ideal_worse,
how='left', left_index=True, right_index=True).T
# euclidean distance with ideal worse is calculated here.
ideal_worse_ed = x_weighted_worse[['lead_time', 'margin', 'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(lambda x: np.linalg.norm(x.values - ideal_worse['ideal_worse'].values), axis=1)
ideal_worse_ed = pd.DataFrame(ideal_worse_ed, columns=['ideal_worse_ed'])
# euclidean distance with ideal best is calculated here.
ideal_best_ed = x_weighted_best[['lead_time', 'margin',
'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(lambda x: np.linalg.norm(x.values - ideal_best['ideal_best'].values), axis=1)
ideal_best_ed = pd.DataFrame(ideal_best_ed, columns=['ideal_best_ed'])
# append ideal worse euclidean distance here.
x_eval = pd.merge(x_weighted, ideal_worse_ed, how='left',
left_index=True, right_index=True)
# append ideal best euclidean distance here.
x_eval = pd.merge(x_eval, ideal_best_ed, how='left',
left_index=True, right_index=True)
x_eval['performance'] = (x_eval['ideal_worse_ed'] /
(x_eval['ideal_worse_ed'] + x_eval['ideal_best_ed'])) * 100
x_rank = x_eval.copy()
x_rank['rank'] = x_rank.groupby(['partial_dc_id', 'drug_id'])[
'performance'].rank(method='first', ascending=False)
x_rank['rank'] = x_rank['rank'].astype(int)
#################heuristics #############
features_rank = pd.merge(features,
x_rank[['drug_id', 'partial_distributor_id',
'partial_dc_id', 'performance', 'rank']],
how='outer', validate='one_to_one')
# add filter for low volume distributor exclusion for heuristic substitute
volume = features_rank.groupby(['partial_dc_id', 'partial_distributor_id',
'partial_distributor_name']).agg(
total_requests=('total_requests', 'sum'))
small_dist = volume.copy()
cutoff = max(small_dist['total_requests']) * cutoff_percentage
print(max(small_dist))
print('low volumne cutoff: ', cutoff)
small_dist['is_small'] = np.where(volume['total_requests'] < cutoff, 1, 0)
small_dist = small_dist.reset_index()
small_dist['fraction_total_requests'] = small_dist['total_requests'] / \
small_dist['total_requests'].sum()
# add flag for small distributors here
features_rank = pd.merge(features_rank,
small_dist[['partial_dc_id',
'partial_distributor_id', 'is_small']],
on=['partial_dc_id', 'partial_distributor_id'],
validate='many_to_one',
how='left')
dc_type_performance = features_rank.groupby(['partial_dc_id', 'drug_type', 'partial_distributor_id']).agg(
dc_type_performance=('performance', 'mean')).reset_index()
features_rank = pd.merge(features_rank, dc_type_performance,
on=['partial_dc_id', 'drug_type', 'partial_distributor_id'], how='left',
validate='many_to_one')
# determine dc type rank
features_rank['dc_type_rank'] = \
features_rank[(features_rank['is_small'] == 0) | ((features_rank['drug_type'] != 'generic') & (features_rank['drug_type'] != 'ethical'))].groupby(['partial_dc_id', 'drug_type'])['dc_type_performance'].rank(
method='dense', ascending=False).astype(int, errors='ignore')
dc_type_rank_ref = pd.pivot_table(features_rank, index=['partial_dc_id', 'drug_type'], columns=['dc_type_rank'],
values='partial_distributor_id').add_prefix('dc_drug_type_level_dist_').reset_index()
features_rank = pd.merge(features_rank,
dc_type_rank_ref[['partial_dc_id', 'drug_type', 'dc_drug_type_level_dist_1.0',
'dc_drug_type_level_dist_2.0', 'dc_drug_type_level_dist_3.0']],
how='left', on=['partial_dc_id', 'drug_type'], validate='many_to_one')
# append enterprise type rank
enterprise_type_performance = features_rank.groupby(['drug_type', 'partial_distributor_id']).agg(
enterprise_type_performance=('performance', 'mean')).reset_index()
features_rank = pd.merge(features_rank, enterprise_type_performance, on=['drug_type', 'partial_distributor_id'],
how='left', validate='many_to_one')
features_rank['enterprise_type_rank'] = features_rank[(features_rank['is_small'] == 0)
| ((features_rank['drug_type'] != 'generic')
& (features_rank['drug_type'] != 'ethical'))].groupby(['drug_type'])[
'enterprise_type_performance'].rank(method='dense', ascending=False).astype(int, errors='ignore')
enterprise_type_rank_ref = pd.pivot_table(features_rank, index=['drug_type'], columns=['enterprise_type_rank'],
values='partial_distributor_id').add_prefix('enterprise_drug_type_level_dist_').reset_index()
features_rank = pd.merge(features_rank,
enterprise_type_rank_ref[['drug_type',
'enterprise_drug_type_level_dist_1.0',
'enterprise_drug_type_level_dist_2.0',
'enterprise_drug_type_level_dist_3.0']],
how='left', on=['drug_type'], validate='many_to_one')
# 999 denotes that bounce rate = 1 and total requests is greater than 5 for that distributor.
features_rank['rank'] = np.where(
(features_rank['rank'] == 1) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 2) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 3) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
output_ranks = pd.pivot_table(features_rank, index=['partial_dc_id', 'drug_id'], columns='rank',
values='partial_distributor_id')[[1, 2, 3]].add_prefix('final_dist_').add_suffix('.0').reset_index()
features_rank = pd.merge(features_rank, output_ranks, on=['partial_dc_id', 'drug_id'], how='left',
validate='many_to_one')
# add volume fraction here
features_rank['volume_fraction'] = volume_fraction
######organize output here ####################
# remove .0 suffix from columns
features_rank.columns = features_rank.columns.str.replace(r'.0$', '')
# remove partial_ prefix from columns
features_rank.columns = features_rank.columns.str.replace(r'^partial_', '')
# decide columns to be included here
features_rank = features_rank[['dc_id', 'dc_name', 'distributor_id',
'distributor_name',
'distributor_type',
'is_small', 'drug_id',
'drug_name', 'drug_type',
'lead_time', 'margin',
'total_lost', 'total_requests',
'bounce_rate', 'ff',
'lost_recency', 'success_recency',
'performance', 'rank',
'final_dist_1',
'final_dist_2',
'final_dist_3',
'dc_drug_type_level_dist_1',
'dc_drug_type_level_dist_2',
'dc_drug_type_level_dist_3',
'enterprise_drug_type_level_dist_1',
'enterprise_drug_type_level_dist_2',
'enterprise_drug_type_level_dist_3',
'volume_fraction']]
return features_rank
def apply_topsis_franchisee(features, x_train, weights, cutoff_percentage, volume_fraction):
'''cutoff percentage is cutoff for determining whether a distributor is low volume or not'''
scaler = preprocessing.MinMaxScaler()
# normalize features
x_normalized = pd.DataFrame(
scaler.fit_transform(x_train), columns=x_train.columns)
# multiply with normalized weights here.
x_weighted = np.multiply(x_normalized, weights)
# merge drug id, dist id and store id for reference
x_weighted = pd.merge(
features[['drug_id', 'partial_distributor_id', 'store_id']],
x_weighted, left_index=True, right_index=True, how='inner')
# define ideal best vector here
ideal_best = x_weighted.agg(
{'lead_time': 'min', 'margin': 'max', 'bounce_rate': 'min',
'ff': 'max',
'lost_recency': 'max',
'success_recency': 'min'}).reset_index()
ideal_best = ideal_best.set_index(
'index').rename(columns={0: 'ideal_best'})
# define ideal worse vector here.
ideal_worse = x_weighted.agg({'lead_time': 'max', 'margin': 'min',
'bounce_rate': 'max',
'ff': 'min',
'lost_recency': 'min',
'success_recency': 'max'}).reset_index()
ideal_worse = ideal_worse.set_index(
'index').rename(columns={0: 'ideal_worse'})
x_weighted_best = pd.merge(x_weighted.T, ideal_best,
how='left', left_index=True, right_index=True).T
x_weighted_worse = pd.merge(x_weighted.T, ideal_worse,
how='left', left_index=True, right_index=True).T
# euclidean distance with ideal worse is calculated here.
ideal_worse_ed = x_weighted_worse[
['lead_time', 'margin', 'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(
lambda x: np.linalg.norm(x.values - ideal_worse['ideal_worse'].values),
axis=1)
ideal_worse_ed = pd.DataFrame(ideal_worse_ed, columns=['ideal_worse_ed'])
# euclidean distance with ideal best is calculated here.
ideal_best_ed = x_weighted_best[['lead_time', 'margin',
'bounce_rate', 'ff',
'lost_recency',
'success_recency']].apply(
lambda x: np.linalg.norm(x.values - ideal_best['ideal_best'].values),
axis=1)
ideal_best_ed = pd.DataFrame(ideal_best_ed, columns=['ideal_best_ed'])
# append ideal worse euclidean distance here.
x_eval = pd.merge(x_weighted, ideal_worse_ed, how='left',
left_index=True, right_index=True)
# append ideal best euclidean distance here.
x_eval = pd.merge(x_eval, ideal_best_ed, how='left',
left_index=True, right_index=True)
x_eval['performance'] = (x_eval['ideal_worse_ed'] /
(x_eval['ideal_worse_ed'] + x_eval[
'ideal_best_ed'])) * 100
x_rank = x_eval.copy()
x_rank['rank'] = x_rank.groupby(['store_id', 'drug_id'])[
'performance'].rank(method='first', ascending=False)
x_rank['rank'] = x_rank['rank'].astype(int)
#################heuristics #############
features_rank = pd.merge(features,
x_rank[['drug_id', 'partial_distributor_id',
'store_id', 'performance', 'rank']],
how='outer', validate='one_to_one')
# add filter for low volume distributor exclusion for heuristic substitute
volume = features_rank.groupby(['store_id', 'partial_distributor_id',
'partial_distributor_name']).agg(
total_requests=('total_requests', 'sum'))
small_dist = volume.copy()
cutoff = max(small_dist['total_requests']) * cutoff_percentage
print(max(small_dist))
print('low volumne cutoff: ', cutoff)
small_dist['is_small'] = np.where(volume['total_requests'] < cutoff, 1, 0)
small_dist = small_dist.reset_index()
small_dist['fraction_total_requests'] = small_dist['total_requests'] / \
small_dist['total_requests'].sum()
# add flag for small distributors here
features_rank = pd.merge(features_rank,
small_dist[['store_id',
'partial_distributor_id', 'is_small']],
on=['store_id', 'partial_distributor_id'],
validate='many_to_one',
how='left')
store_type_performance = features_rank.groupby(
['store_id', 'drug_type', 'partial_distributor_id']).agg(
store_type_performance=('performance', 'mean')).reset_index()
features_rank = pd.merge(features_rank, store_type_performance,
on=['store_id', 'drug_type',
'partial_distributor_id'], how='left',
validate='many_to_one')
# determine store type rank
features_rank['store_type_rank'] = \
features_rank[(features_rank['is_small'] == 0) | (
(features_rank['drug_type'] != 'generic') & (
features_rank['drug_type'] != 'ethical'))].groupby(
['store_id', 'drug_type'])['store_type_performance'].rank(
method='dense', ascending=False).astype(int, errors='ignore')
store_type_rank_ref = \
pd.pivot_table(features_rank, index=['store_id', 'drug_type'],
columns=['store_type_rank'],
values='partial_distributor_id')[[1, 2, 3]].add_prefix(
'store_drug_type_level_dist_').reset_index()
features_rank = pd.merge(features_rank,
store_type_rank_ref[['store_id', 'drug_type',
'store_drug_type_level_dist_1',
'store_drug_type_level_dist_2',
'store_drug_type_level_dist_3']],
how='left', on=['store_id', 'drug_type'],
validate='many_to_one')
# 999 denotes that bounce rate = 1 and total requests is greater than 5 for that distributor.
features_rank['rank'] = np.where(
(features_rank['rank'] == 1) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 2) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
features_rank['rank'] = np.where(
(features_rank['rank'] == 3) & (features_rank['bounce_rate'] == 1) & (
features_rank['total_requests'] > 5),
999, features_rank['rank'])
output_ranks = \
pd.pivot_table(features_rank, index=['store_id', 'drug_id'], columns='rank',
values='partial_distributor_id')[[1, 2, 3]].add_prefix(
'final_dist_').add_suffix('.0').reset_index()
features_rank = pd.merge(features_rank, output_ranks,
on=['store_id', 'drug_id'], how='left',
validate='many_to_one')
# add volume fraction here
features_rank['volume_fraction'] = volume_fraction
######organize output here ####################
# remove .0 suffix from columns
features_rank.columns = features_rank.columns.str.replace(r'.0$', '')
# remove partial_ prefix from columns
features_rank.columns = features_rank.columns.str.replace(r'^partial_', '')
# decide columns to be included here
features_rank = features_rank[['store_id', 'store_name', 'franchisee_id',
'distributor_id',
'distributor_name',
'distributor_type',
'is_small', 'drug_id',
'drug_name', 'drug_type',
'lead_time', 'margin',
'total_lost', 'total_requests',
'bounce_rate', 'ff',
'lost_recency', 'success_recency',
'performance', 'rank',
'final_dist_1',
'final_dist_2',
'final_dist_3',
'store_drug_type_level_dist_1',
'store_drug_type_level_dist_2',
'store_drug_type_level_dist_3',
'volume_fraction']]
return features_rank | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/topsis.py | topsis.py |
import pandas as pd
from zeno_etl_libs.utils.distributor_ranking.pull_data import pull_data, pull_data_franchisee
from zeno_etl_libs.utils.distributor_ranking.preprocess_features import preprocess_features_dc, preprocess_features_franchisee
from zeno_etl_libs.utils.distributor_ranking.calculate_features import calculate_features
from zeno_etl_libs.utils.distributor_ranking.topsis import apply_topsis, apply_topsis_franchisee
def ranking_calc_dc(time_interval, weights_as, weights_pr, as_low_volume_cutoff,
pr_low_volume_cutoff, volume_fraction, db, read_schema, logger):
'''output distributor ranking for AS and PR separately'''
logger.info('starting to import data')
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval + 7
df_features, df_distributors = pull_data(time_interval, db, read_schema)
logger.info('finished importing data')
######################### preprocessing starts #########################
logger.info('started preprocessing')
df_features = preprocess_features_dc(df_features, db, read_schema)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors, on=['partial_distributor_id'],
how='left', validate='many_to_one')
logger.info('finished preprocessing')
########################## preprocessing ends ##########################
####################### features calculation starts #######################
features = calculate_features(df_features, group_cols=['partial_dc_id','partial_distributor_id','drug_id'])
##### add neccessary columns in features #####
# add drug type column here
features = pd.merge(features, df_features[['drug_id', 'drug_type']].drop_duplicates(), on=['drug_id'],
how='left',
validate='many_to_one')
# add dist type column here
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name', 'partial_distributor_type']].drop_duplicates(),
on=['partial_distributor_id'], how='left',
validate='many_to_one')
# add dc name here.
features = pd.merge(features, df_features[['partial_dc_id', 'dc_name']].dropna().drop_duplicates(),
on=['partial_dc_id'], validate='many_to_one', how='left')
# add drug name here
features = pd.merge(features, df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
#### apply topsis ####
# weights format is [lead time, margin, bounce rate, ff, lost recency, success recency]
x_train = features[['lead_time', 'margin', 'bounce_rate',
'ff', 'lost_recency', 'success_recency']]
features_as = apply_topsis(features=features,
x_train=x_train, weights=weights_as, cutoff_percentage=as_low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for as')
features_pr = apply_topsis(features=features,
x_train=x_train, weights=weights_pr, cutoff_percentage=pr_low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for pr')
features_as.loc[:, 'request_type'] = 'AS/MS'
features_pr.loc[:, 'request_type'] = 'PR'
features_rank = pd.concat([features_as, features_pr])
return features_rank
def ranking_calc_franchisee(time_interval, weights_as, weights_pr,
low_volume_cutoff, volume_fraction, db,
read_schema, logger):
'''output distributor ranking for AS and PR separately'''
logger.info('starting to import data')
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval + 7
df_features, df_distributors = pull_data_franchisee(time_interval, db, read_schema)
logger.info('finished importing data')
######################### preprocessing starts #########################
logger.info('started preprocessing')
df_features = preprocess_features_franchisee(df_features, db, read_schema)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors, on=['partial_distributor_id'],
how='left', validate='many_to_one')
logger.info('finished preprocessing')
########################## preprocessing ends ##########################
####################### features calculation starts #######################
features = calculate_features(df_features, group_cols=['store_id','partial_distributor_id', 'drug_id'])
##### add neccessary columns in features #####
# add drug type column here
features = pd.merge(features,
df_features[['drug_id', 'drug_type']].drop_duplicates(),
on=['drug_id'],
how='left',
validate='many_to_one')
# add dist type column here
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name',
'partial_distributor_type']].drop_duplicates(),
on=['partial_distributor_id'], how='left',
validate='many_to_one')
# add store name and franchisee_id here.
features = pd.merge(features, df_features[
['store_id', 'store_name', 'franchisee_id']].dropna().drop_duplicates(),
on=['store_id'], validate='many_to_one', how='left')
# add drug name here
features = pd.merge(features,
df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
#### apply topsis ####
# weights format is [lead time, margin, bounce rate, ff, lost recency, success recency]
x_train = features[['lead_time', 'margin', 'bounce_rate',
'ff', 'lost_recency', 'success_recency']]
features_rank_as = apply_topsis_franchisee(features=features,
x_train=x_train,
weights=weights_as,
cutoff_percentage=low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for franchisee as')
features_rank_pr = apply_topsis_franchisee(features=features,
x_train=x_train,
weights=weights_pr,
cutoff_percentage=low_volume_cutoff,
volume_fraction=volume_fraction)
logger.info('applied topsis for franchisee pr')
features_rank_as.loc[:, 'request_type'] = 'AS/MS'
features_rank_pr.loc[:, 'request_type'] = 'PR'
features_rank = pd.concat([features_rank_as, features_rank_pr])
return features_rank | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/distributor_ranking_calc.py | distributor_ranking_calc.py |
import pandas as pd
import numpy as np
def preprocess_features_dc(df_features, db, read_schema):
'''df_features is the raw data variable '''
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where selling rate is 0 otherwise margin becomes infinity
df_features = df_features[df_features['selling_rate']!=0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
# if no dc information is present then remove those cases.
df_features = df_features[((~df_features['partial_dc_id'].isna()) | (
~df_features['forward_dc_id'].isna()))]
# for those cases where invoice doesn't exist, take invoice dc as obtained from sdm table
df_features['partial_dc_id'] = df_features['partial_dc_id'].fillna(
df_features['forward_dc_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs entries
print('pulling drug-distributor filter data')
q_drug_distributor = """select DISTINCT "distributor-id" as "partial_distributor_id",
"drug-id" as drug_id
from "{read_schema}"."distributor-drugs" dd """
drug_distributor_list_filter = db.get_df(q_drug_distributor.format(read_schema=read_schema))
df_features = pd.merge(df_features, drug_distributor_list_filter,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
return df_features
def preprocess_features_franchisee(df_features, db, read_schema):
'''df_features is the raw data variable '''
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where selling rate is 0 otherwise margin becomes infinity
df_features = df_features[df_features['selling_rate']!=0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs entries
print('pulling drug-distributor filter data')
q_drug_distributor = """select DISTINCT "distributor-id" as "partial_distributor_id",
"drug-id" as drug_id
from "{read_schema}"."distributor-drugs" dd """
drug_distributor_list_filter = db.get_df(q_drug_distributor.format(read_schema=read_schema))
df_features = pd.merge(df_features, drug_distributor_list_filter,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
return df_features | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/preprocess_features.py | preprocess_features.py |
import pandas as pd
def postprocess_ranking_dc(features_rank, volume_fraction):
tech_input = features_rank.copy()
# drop cases for tech input where all 3 distributor assigned are NULL.
# Since they automatically need to go to dc drug type level.
tech_input = tech_input[~((tech_input['final_dist_1'].isna()) & (
tech_input['final_dist_2'].isna()) & (
tech_input['final_dist_3'].isna()))]
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_2'])
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_2'], 'final_dist_2'] = float('NaN')
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
tech_input['final_dist_2'] = tech_input['final_dist_2'].fillna(
tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_2'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
# append dc_drug_type entries as separate rows in tech input
dc_drug_type_entries = features_rank[
['dc_id', 'drug_type', 'request_type', 'dc_drug_type_level_dist_1',
'dc_drug_type_level_dist_2',
'dc_drug_type_level_dist_3']].drop_duplicates().rename(
columns={'dc_drug_type_level_dist_1': 'final_dist_1',
'dc_drug_type_level_dist_2': 'final_dist_2',
'dc_drug_type_level_dist_3': 'final_dist_3'
})
dc_drug_type_entries['drug_id'] = float('NaN')
dc_drug_type_entries['volume_fraction'] = volume_fraction
dc_drug_type_entries = dc_drug_type_entries[
['dc_id', 'drug_id', 'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = pd.concat([tech_input, dc_drug_type_entries])
# append enterprise_drug_type entries as separate rows in tech input
enterprise_drug_type_entries = features_rank[
['drug_type', 'request_type', 'enterprise_drug_type_level_dist_1',
'enterprise_drug_type_level_dist_2',
'enterprise_drug_type_level_dist_3']].drop_duplicates().rename(
columns={'enterprise_drug_type_level_dist_1': 'final_dist_1',
'enterprise_drug_type_level_dist_2': 'final_dist_2',
'enterprise_drug_type_level_dist_3': 'final_dist_3'})
enterprise_drug_type_entries['dc_id'] = float('NaN')
enterprise_drug_type_entries['drug_id'] = float('NaN')
enterprise_drug_type_entries['volume_fraction'] = volume_fraction
enterprise_drug_type_entries = enterprise_drug_type_entries[
['dc_id', 'drug_id', 'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = pd.concat([tech_input, enterprise_drug_type_entries])
tech_input["store_id"] = float('NaN')
tech_input["franchisee_id"] = 1 # ZIPPIN PHARMA
tech_input = tech_input[['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = tech_input.drop_duplicates()
return tech_input
def postprocess_ranking_franchisee(features_rank, volume_fraction):
tech_input = features_rank.copy()
# drop cases for tech input where all 3 distributor assigned are NULL.
# Since they automatically need to go to store drug type level.
tech_input = tech_input[~((tech_input['final_dist_1'].isna()) & (
tech_input['final_dist_2'].isna()) & (tech_input['final_dist_3'].isna()))]
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_2'])
tech_input['final_dist_1'] = tech_input['final_dist_1'].fillna(
tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_2'], 'final_dist_2'] = float('NaN')
tech_input.loc[tech_input['final_dist_1'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
tech_input['final_dist_2'] = tech_input['final_dist_2'].fillna(tech_input['final_dist_3'])
tech_input.loc[tech_input['final_dist_2'] ==
tech_input['final_dist_3'], 'final_dist_3'] = float('NaN')
tech_input = tech_input[['store_id', 'franchisee_id', 'drug_id', 'drug_type',
'request_type', 'volume_fraction','final_dist_1', 'final_dist_2', 'final_dist_3']]
# append store_drug_type entries as separate rows in tech input
store_drug_type_entries = features_rank[
['store_id', 'franchisee_id', 'drug_type', 'request_type',
'store_drug_type_level_dist_1', 'store_drug_type_level_dist_2',
'store_drug_type_level_dist_3']].drop_duplicates().rename(
columns={'store_drug_type_level_dist_1': 'final_dist_1',
'store_drug_type_level_dist_2': 'final_dist_2',
'store_drug_type_level_dist_3': 'final_dist_3'
})
store_drug_type_entries['drug_id'] = float('NaN')
store_drug_type_entries['volume_fraction'] = volume_fraction
store_drug_type_entries = store_drug_type_entries[
['store_id', 'franchisee_id', 'drug_id', 'drug_type', 'request_type',
'volume_fraction', 'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = pd.concat([tech_input, store_drug_type_entries], sort=False)
tech_input['dc_id'] = float('NaN')
tech_input = tech_input[['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
tech_input = tech_input.drop_duplicates()
return tech_input | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/postprocess_ranking.py | postprocess_ranking.py |
from functools import reduce
import numpy as np
import pandas as pd
def calculate_features(df_features, group_cols):
"""
DC-LEVEL: group_cols=['partial_dc_id','partial_distributor_id', 'drug_id']
FRANCHISEE-LEVEL: group_cols=['store_id','partial_distributor_id', 'drug_id']
"""
'''outputs the calculated features when supplied with raw data'''
dfx = df_features[df_features['invoice_count'] != 0]
####################### feature calculation starts ########################
####lead time calculations ########
df_lead_time = df_features.copy()
cond_0 = (df_lead_time['invoice_count'] == 0)
df_lead_time['lead_time'] = float('NaN')
# for those cases where reordered does not exists and invoice count is 0
# lead time is invoiced-at - created at for invoice count 0
df_lead_time['lead_time'] = np.where((df_lead_time['original_created_at_2'].isna()) & (~cond_0),
(df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at']).astype('timedelta64[h]'),
df_lead_time['lead_time'])
# for cases where invoiced_at - reordered_at < 8, lead time is unreliable.
df_lead_time['lead_time'] = np.where(
(~df_lead_time['original_created_at_2'].isna() |
(((df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at_2']).astype('timedelta64[h]')) > 8))
& (~cond_0),
(df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at_2']).astype('timedelta64[h]'),
df_lead_time['lead_time'])
df_lead_time['lead_time'] = np.where(
(~df_lead_time['original_created_at_2'].isna() |
(((df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at_2']).astype('timedelta64[h]')) < 8))
& (~cond_0),
(df_lead_time['partial_invoiced_at'] -
df_lead_time['original_created_at']).astype('timedelta64[h]'),
df_lead_time['lead_time'])
# invoice count 0, take lead time as max value
# This is done because we are eventually scaling things between 0 to 1.
df_lead_time['lead_time'] = np.where(cond_0,
df_lead_time['lead_time'].max(),
df_lead_time['lead_time'])
# If after taking the condition for lead time less than 8,
# still cases are present then those are unreliable, take lead time as mean.
df_lead_time.loc[df_lead_time['lead_time'] < 8, 'lead_time'] = df_lead_time[df_lead_time['lead_time'] > 8][
'lead_time'].mean()
# lead time for a distributor per drug id is the average of lead time per order.
df_lead_time = df_lead_time.groupby(group_cols).agg(
lead_time=('lead_time', 'mean')).reset_index()
# sanity check
assert df_lead_time.shape[0] == \
df_features[group_cols].drop_duplicates().shape[0]
print('finished lead time calculations')
####### margin calculation starts #######
df_margin = dfx.copy()
df_margin['margin'] = (df_margin['selling_rate'] -
df_margin['distributor_rate']) / df_margin['selling_rate']
df_margin = df_margin.groupby(group_cols).agg(margin=('margin', 'mean')).reset_index()
# sanity check
assert df_margin.shape[0] == dfx[group_cols].drop_duplicates().shape[0]
print('finished margin calculations')
####### bounce rate calculation #######
df_br = df_features.groupby(group_cols).agg(
total_lost=('is_lost', 'sum'),
total_requests=('is_lost', 'count')).reset_index()
df_br['bounce_rate'] = (df_br['total_lost']) / df_br['total_requests']
# sanity check
assert df_br.shape[0] == df_features[group_cols].drop_duplicates().shape[0]
print('finished bounce rate calculations')
####### ff calculation #######
df_sorted = dfx.groupby(['short_book_1_id'], as_index=False).apply(
lambda x: x.sort_values(by=['partial_invoiced_at']))
# for multiple invoices, calculate cumulative fulfilled quantities
df_sorted = df_sorted.groupby(['short_book_1_id']).apply(
lambda x: x['partial_quantity'].cumsum()).reset_index().rename(
columns={'partial_quantity': 'cum_partial_quantity'})
df_sorted = df_sorted.set_index('level_1')
df_fulfillment = pd.merge(df_sorted, dfx, left_index=True,
right_index=True, how='left', suffixes=('', '_y'))
# assert df_fulfillment['short_book_1_id'].equals(
# df_fulfillment['short_book_1_id_y'])
df_fulfillment = df_fulfillment[
['short_book_1_id'] + group_cols + ['original_order', 'partial_quantity',
'cum_partial_quantity']]
# cum required quantity is quantity left after subtracting cum quantity from all previous invoices.
df_fulfillment['cum_required_quantity'] = df_fulfillment['original_order'] - \
df_fulfillment['cum_partial_quantity']
# the real required quantity while placing an order is quantity
# unfulfilled by the previours invoice. Hence shifted by 1
df_fulfillment['actual_required'] = df_fulfillment.groupby(
['short_book_1_id']).shift(1)['cum_required_quantity']
# fill single invoices with the original order
df_fulfillment['actual_required'] = df_fulfillment['actual_required'].fillna(
df_fulfillment['original_order'])
# put actual required = 0 when ordered exceeds required.
df_fulfillment.loc[df_fulfillment['actual_required']
< 0, 'actual_required'] = 0
df_fulfillment['redundant_order_flag'] = np.where(
df_fulfillment['actual_required'] == 0, 1, 0)
df_fulfillment = df_fulfillment[['short_book_1_id'] + group_cols +
['original_order', 'partial_quantity', 'actual_required', 'redundant_order_flag']]
df_fulfillment['ff'] = df_fulfillment['partial_quantity'] / \
df_fulfillment['actual_required']
# for those quantities where nothing was required and still order placed, take them as 0.
df_fulfillment.loc[(df_fulfillment['actual_required'] == 0) & (
df_fulfillment['partial_quantity'] > 0), 'ff'] = 1
df_fulfillment.loc[(df_fulfillment['ff'] > 1), 'ff'] = 1
# removed redundant orders here.
df_ff = df_fulfillment[df_fulfillment['redundant_order_flag'] != 1].groupby(group_cols).agg(ff=('ff', 'mean')).reset_index()
print('finished ff calculations')
####### recency lost calculations #######
# number of days ago it was marked lost.
df_recency_lost = df_features[df_features['is_lost'] == 1].groupby(group_cols).agg(
max_lost_date=('original_created_at', 'max')).reset_index()
df_recency_lost['lost_recency'] = (
pd.datetime.today() - pd.to_datetime(df_recency_lost['max_lost_date'])).dt.days
df_recency_lost = df_recency_lost[group_cols + ['lost_recency']]
####### recency success calculations #######
# number of days ago it was marked success
df_recency_success = df_features[df_features['is_lost'] == 0].groupby(group_cols).agg(
max_success_date=('original_created_at', 'max')).reset_index()
df_recency_success['success_recency'] = (
pd.datetime.today() - pd.to_datetime(df_recency_success['max_success_date'])).dt.days
df_recency_success = df_recency_success[group_cols + ['success_recency']]
print('finished recency calculations')
######################## feature calculation ends #########################
################## compiling all the features #############################
meg_list = [df_lead_time, df_margin, df_br,
df_ff, df_recency_lost, df_recency_success]
features = reduce(
lambda left, right: pd.merge(left, right,
on=group_cols,
how='outer'), meg_list)
# lead_time: Replace lead time NA (i.e. bounce rate 1) with max lead time.
features['lead_time'] = features['lead_time'].fillna(
features['lead_time'].max())
# margin
features['margin'] = features['margin'].fillna(features['margin'].mean())
# ff
features.loc[(features['ff'].isna()) & (
features['bounce_rate'] == 1), 'ff'] = 0
features['ff'] = features['ff'].fillna(features['ff'].mean())
# for bounce rate = 0.
features['lost_recency'] = features['lost_recency'].fillna(
features['lost_recency'].max())
# for bounce rate = 1
features['success_recency'] = features['success_recency'].fillna(
features['success_recency'].max())
print('finished compiling features')
return features | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/calculate_features.py | calculate_features.py |
Q_FEATURES = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."dc-id" as "partial-dc-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
d."drug-name",
d.type as "drug_type",
sdm."forward-dc-id",
s.name as "dc-name"
from
"{read_schema}"."short-book-1" sb
left join "{read_schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{read_schema}".invoices i on
sbi."invoice-id" = i.id
left join "{read_schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{read_schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{read_schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{read_schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{read_schema}".drugs d on
sb."drug-id" = d.id
left join "{read_schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{read_schema}"."store-dc-mapping" sdm on
sb."store-id" = sdm."store-id"
and dis.type = sdm."drug-type"
left join "{read_schema}".stores s on
i."dc-id" = s.id
where
DATEDIFF(day, date(sb."created-at"), current_date) <= {0}
and DATEDIFF(day, date(sb."created-at"), current_date) >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
"""
Q_FEATURES_FRANCHISEE = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."store-id",
ss."franchisee-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
d."drug-name",
d.type as "drug_type",
ss."name" as "store-name"
from
"{read_schema}"."short-book-1" sb
left join "{read_schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{read_schema}".invoices i on
sbi."invoice-id" = i.id
left join "{read_schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{read_schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{read_schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{read_schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{read_schema}".drugs d on
sb."drug-id" = d.id
left join "{read_schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{read_schema}".stores s on
i."dc-id" = s.id
left join "{read_schema}".stores ss on
sb."store-id" = ss.id
where
DATEDIFF(day, date(sb."created-at"), current_date) <= {0}
and DATEDIFF(day, date(sb."created-at"), current_date) >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
and ss."franchisee-id" != 1
"""
Q_DISTRIBUTORS = """
select id as "partial-distributor-id",
name as "partial-distributor-name",
type as "partial-distributor-type"
from "{read_schema}".distributors
"""
def pull_data(time_interval, db, read_schema):
''' time interval is the buffer cutoff after which data isn't taken. 7 days in our case'''
df_features = db.get_df(Q_FEATURES.format(time_interval,
read_schema=read_schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(read_schema=read_schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
return df_features, df_distributors
def pull_data_franchisee(time_interval, db, read_schema):
''' time interval is the buffer cutoff after which data isn't taken. 7 days in our case'''
df_features = db.get_df(Q_FEATURES_FRANCHISEE.format(time_interval,
read_schema=read_schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(read_schema=read_schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
return df_features, df_distributors | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking/pull_data.py | pull_data.py |
from datetime import timedelta
class GetData:
"""To fetch required data from SQL and PostGre"""
def __init__(self, store_ids, reset_date, days_to_replenish, days_delta,
db, schema, logger):
"""
Arguments:
store_ids: (list) store id list
reset_date: (datetime.date) format
days_to_replenish: (int) days to skip for inventory replenishment
from date of reset
"""
self.store_ids = str(store_ids).replace('[', '(').replace(']', ')')
self.reset_date = reset_date.strftime('%Y-%m-%d')
self.date_before_90days = (reset_date - timedelta(90)).strftime('%Y-%m-%d')
self.start_date = (reset_date + timedelta(days_to_replenish)).strftime('%Y-%m-%d')
self.end_date = (reset_date + timedelta(days_to_replenish+days_delta-1)).strftime('%Y-%m-%d')
# considering sales period of 28 days (start & end date included in sql)
self.db = db
self.schema = schema
self.logger = logger
def ipc_ss(self, store_id, sql_cut_off_condition):
"""Fetch safety stock table for current IPC store and reset date"""
self.logger.info(f"Fetching ipc_ss data for store_id: {store_id}")
q_ss = """
select *
from "{schema}"."ipc-safety-stock"
where "store-id" = {0} and "reset-date" = '{1}'
{2}
""".format(store_id, self.reset_date, sql_cut_off_condition,
schema=self.schema)
df_ss = self.db.get_df(q_ss)
df_ss.columns = [c.replace('-', '_') for c in df_ss.columns]
df_ss["store_type"] = "ipc"
return df_ss
def non_ipc_ss(self, store_id, sql_cut_off_condition):
"""Fetch safety stock table for current Non-IPC store and reset date"""
self.logger.info(f"Fetching non_ipc_ss data for store_id: {store_id}")
q_ss = """
select *
from "{schema}"."non-ipc-safety-stock"
where "store-id" = {0} and "reset-date" = '{1}'
{2}
""".format(store_id, self.reset_date, sql_cut_off_condition,
schema=self.schema)
df_ss = self.db.get_df(q_ss)
df_ss.columns = [c.replace('-', '_') for c in df_ss.columns]
df_ss["store_type"] = "non_ipc"
return df_ss
def ipc2_ss(self, store_id, sql_cut_off_condition):
"""Fetch safety stock table for IPC2.0 store and reset date"""
self.logger.info(f"Fetching ipc2_ss data for store_id: {store_id}")
q_ss = """
select *
from "{schema}"."ipc2-safety-stock"
where "store-id" = {0} and "reset-date" = '{1}'
{2}
""".format(store_id, self.reset_date, sql_cut_off_condition,
schema=self.schema)
df_ss = self.db.get_df(q_ss)
df_ss.columns = [c.replace('-', '_') for c in df_ss.columns]
df_ss["store_type"] = "ipc2"
return df_ss
def curr_inv(self):
"""Fetch current inventory for all stores"""
self.logger.info("Fetching inventory data")
q_inv = """
SELECT "drug-id" as drug_id,
"store-id" as store_id,
AVG(ptr) AS average_ptr,
SUM("locked-quantity"+quantity+"locked-for-audit"+"locked-for-transfer"
+"locked-for-check"+"locked-for-return") AS current_inventory
FROM "{schema}"."inventory-1"
WHERE "store-id" in {0}
GROUP BY "store-id", "drug-id"
""".format(self.store_ids, schema=self.schema)
df_inv_comb = self.db.get_df(q_inv)
return df_inv_comb
def sales_3m(self):
"""Fetch last 3 months sales data for finding weather NPI or not."""
self.logger.info("Fetching 3 months sales data")
q_3m_sales = """
select
"drug-id", "store-id",
sum("net-quantity") as "net-sales-3m"
from "{schema}".sales
where "store-id" in {0} and
date("created-at") between '{1}' and '{2}'
group by "store-id", "drug-id"
""".format(self.store_ids, self.date_before_90days, self.reset_date,
schema=self.schema)
df_3m_sales_comb = self.db.get_df(q_3m_sales)
df_3m_sales_comb.columns = [c.replace('-', '_') for c in df_3m_sales_comb.columns]
return df_3m_sales_comb
def sales_28day(self):
"""Fetch 28 days sales data after date of reset"""
self.logger.info("Fetching 28 days sales data")
q_sales = """
select
"drug-id", "store-id",
sum("net-quantity") as "net-sales"
from "{schema}".sales
where "store-id" in {0} and
date("created-at") between '{1}' and '{2}'
group by "store-id", "drug-id"
""".format(self.store_ids, self.start_date, self.end_date,
schema=self.schema)
df_sales_comb = self.db.get_df(q_sales)
df_sales_comb.columns = [c.replace('-', '_') for c in df_sales_comb.columns]
return df_sales_comb
def pr_loss_28day(self):
"""Fetch 28 days PR losses after date of reset"""
self.logger.info("Fetching 28 days pr loss data")
q_pr = """
select "drug-id", "store-id",
sum("loss-quantity") as "pr-loss"
from "{schema}"."cfr-patient-request"
where "shortbook-date" between '{1}' and '{2}'
and "store-id" in {0}
group by "store-id", "drug-id"
""".format(self.store_ids, self.start_date, self.end_date,
schema=self.schema)
df_pr_loss_comb = self.db.get_df(q_pr)
df_pr_loss_comb.columns = [c.replace('-', '_') for c in df_pr_loss_comb.columns]
df_pr_loss_comb["pr_loss"] = df_pr_loss_comb["pr_loss"].astype(float)
return df_pr_loss_comb | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/fcst_performance/get_data.py | get_data.py |
from datetime import timedelta
def get_store_ids(reset_date, exclude_stores, db, schema):
"""
Get IPC and Non-IPC store-ids which was reset on specified reset date
Parameters:
reset_date: (datetime.date) format
Returns:
store_ids: (list) of ipc and non-ipc store ids
store_type_map: (list) of ipc and non-ipc store types respectively
"""
reset_date = reset_date.strftime('%Y-%m-%d')
if not exclude_stores:
exclude_stores = "(0)"
else:
exclude_stores = tuple(exclude_stores)
# Get list of all store_ids
q_stores = f"""
select "id", name, "opened-at" as opened_at
from "{schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {exclude_stores}
"""
stores_list = list(db.get_df(q_stores)["id"])
stores_list_sql = str(stores_list).replace('[', '(').replace(']', ')')
# Get list of IPC stores which was reset on specified reset date
q_ipc = """
select distinct "store-id"
from "{schema}"."ipc-safety-stock"
where "store-id" in {0} and "reset-date" = '{1}'
""".format(stores_list_sql, reset_date, schema=schema)
ipc_stores = list(db.get_df(q_ipc)["store-id"])
# Get list of Non-IPC stores which was reset on specified reset date
q_non_ipc = """
select distinct "store-id"
from "{schema}"."non-ipc-safety-stock"
where "store-id" in {0} and "reset-date" = '{1}'
""".format(stores_list_sql, reset_date, schema=schema)
non_ipc_stores = list(db.get_df(q_non_ipc)["store-id"])
# Get list of Non-IPC stores which was reset on specified reset date
q_ipc2 = """
select distinct "store-id"
from "{schema}"."ipc2-safety-stock"
where "store-id" in {0} and "reset-date" = '{1}'
""".format(stores_list_sql, reset_date, schema=schema)
ipc2_stores = list(db.get_df(q_ipc2)["store-id"])
store_ids = ipc_stores + non_ipc_stores + ipc2_stores
store_type_map = ["ipc"] * len(ipc_stores) \
+ ["non_ipc"] * len(non_ipc_stores) \
+ ["ipc2"] * len(ipc2_stores)
return store_ids, store_type_map
def handle_multiple_resets(reset_date, store_id, store_type, db, schema, logger):
"""
Check if multiple reset occurred on specified reset date
Parameters:
reset_date: (datetime.date) format
store_id: (int) format
store_type: (str) format IPC or Non-IPC
Returns:
sql_cut_off_condition: (str) sql condition to use in query for taking
only the latest reset that occurred.
"""
sql_reset_date = reset_date.strftime('%Y-%m-%d')
if store_type == "ipc":
sql_store_type = "ipc"
elif store_type == "non_ipc":
sql_store_type = "non-ipc"
else:
sql_store_type = "ipc2"
q_drug = """
select "drug-id"
from "{schema}"."{0}-safety-stock"
where "store-id" = {1} and "reset-date" = '{2}'
limit 1
""".format(sql_store_type, store_id, sql_reset_date, schema=schema)
rand_drug_id = db.get_df(q_drug)["drug-id"][0]
q_upload_time = """
select *
from "{schema}"."{0}-safety-stock"
where "store-id" = {1} and "reset-date" = '{2}' and "drug-id" = {3}
order by "updated-at" desc
""".format(sql_store_type, store_id, sql_reset_date, rand_drug_id,
schema=schema)
df_upload_time = db.get_df(q_upload_time)["updated-at"]
reset_count = df_upload_time.shape[0]
if reset_count > 1:
logger.info(f"Multiple resets detected for store_id: {store_id}")
cut_off_datetime = df_upload_time[0] - timedelta(minutes=1)
sql_cut_off_condition = """ and "updated-at" > '{}' """.format(
str(cut_off_datetime))
else:
sql_cut_off_condition = ""
return sql_cut_off_condition | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/fcst_performance/helper_functions.py | helper_functions.py |
from datetime import date
from zeno_etl_libs.utils.fcst_performance import metric_calc
import pandas as pd
def cal_fields_store_drug_level(df_ss, df_inv, df_sales, df_pr_loss, df_3m_sales):
"""
Calculate all the fields for store-drug level forecast performance assessment
Parameters:
df_ss: (pd.DataFrame) safety stock data IPC or Non-IPC
df_inv: (pd.DataFrame) current inventory data
df_sales: (pd.DataFrame) 28 days sales data
df_pr_loss: (pd.DataFrame) 28 days pr loss data
df_3m_sales: (pd.DataFrame) 3 month sales before reset (for NPI)
Returns:
df_sdl: (pd-DataFrame) of store-drug level performance metrics
"""
# Merge Inventory and NPI dataframe
df_inv_npi = pd.merge(df_inv, df_3m_sales, on="drug_id", how="left")
df_inv_npi.net_sales_3m.fillna(0, inplace=True)
df_inv_npi['is_npi'] = (df_inv_npi['net_sales_3m'] == 0)
# Merge sales and PR loss dataframe
df_sales_pr = pd.merge(df_sales, df_pr_loss, on="drug_id", how="left")
df_sales_pr.pr_loss.fillna(0, inplace=True)
# Merge inventory, NPI, sales and PR loss dataframes
df_merged = pd.merge(df_inv_npi, df_sales_pr, on="drug_id", how="left")
df_merged.net_sales.fillna(0, inplace=True)
df_merged.pr_loss.fillna(0, inplace=True)
df_merged = df_merged[["drug_id", "current_inventory", "is_npi",
"net_sales", "pr_loss"]]
df_merged.rename(columns={"net_sales": "net_sales_28days",
"pr_loss": "pr_loss_28days"}, inplace=True)
# Merge all collected data with SS table
df_all_combined = pd.merge(df_ss, df_merged, on="drug_id", how="left")
df_all_combined = df_all_combined[df_all_combined['drug_name'].notna()]
df_all_combined.current_inventory.fillna(0, inplace=True)
df_all_combined.net_sales_28days.fillna(0, inplace=True)
df_all_combined.pr_loss_28days.fillna(0, inplace=True)
df_all_combined.is_npi.fillna(True, inplace=True)
# Creating dataframe of required format
df_all_combined.rename(columns={"curr_inventory": "inventory_at_reset",
"std": "fcst_std", "type": "drug_type",
"current_inventory": "inventory_at_measurement",
"avg_ptr": "fptr"},
inplace=True)
df_all_combined["is_npi"] = df_all_combined["is_npi"].apply(
lambda x: 'Y' if x == True else 'N')
df_sdl = df_all_combined[["store_id", "store_type", "drug_id", "drug_name",
"drug_type", "drug_grade", "reset_date", "bucket",
"is_npi", "model", "percentile", "fcst", "fcst_std",
"safety_stock", "reorder_point", "order_upto_point",
"inventory_at_reset", "fptr", "inventory_at_measurement",
"net_sales_28days", "pr_loss_28days"]].copy()
df_sdl["demand_28days"] = df_sdl["net_sales_28days"] + df_sdl["pr_loss_28days"]
df_sdl["fcst_error"] = df_sdl["fcst"] - df_sdl["demand_28days"]
for index, row in df_sdl.iterrows():
forecast = row["fcst"]
actual = row["demand_28days"]
df_sdl.loc[index, "perc_error"] = metric_calc.pe(forecast, actual)
df_sdl["measurement_date"] = date.today()
return df_sdl | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/fcst_performance/data_operations.py | data_operations.py |
import pandas as pd
import numpy as np
from zeno_etl_libs.utils.distributor_ranking2.pull_data1 import \
pull_data_dc, pull_data_franchisee
from zeno_etl_libs.utils.distributor_ranking2.calculate_ranks1 import \
calc_ranks_dc, get_final_ranks_dc, calc_ranks_franchisee, \
get_final_ranks_franchisee
from zeno_etl_libs.utils.distributor_ranking2.preprocess_features import \
preprocess_features_dc, preprocess_features_franchisee
from zeno_etl_libs.utils.distributor_ranking2.calculate_features1 import \
calculate_features
from zeno_etl_libs.utils.distributor_ranking2.post_process_ranking1 import \
post_process_ranking_dc, post_process_ranking_franchisee
def ranking_calc_dc(reset_date, time_interval_dc, as_ms_weights_dc_drug_lvl,
as_ms_weights_dc_type_lvl, pr_weights_dc_drug_lvl,
pr_weights_dc_type_lvl, logger, db, schema):
# =============================== PULL DATA ===============================
logger.info("Pulling data for DC")
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval_dc + 7
df_sb, df_rates, df_store_dc_maps, df_drugs, df_distributors, \
df_dc_distributors_mapping, df_distributor_drugs = pull_data_dc(
reset_date, time_interval, db, schema)
# ========================== FEATURE CALCULATION ==========================
logger.info("Calculating features")
features = calculate_features(
df_sb, df_rates, df_store_dc_maps, reset_date, time_interval_dc, logger,
group_cols=['dc_id', 'distributor_id', 'drug_id'])
# only keep active dc-distributors (based on dc-distributor table)
features = features.merge(df_dc_distributors_mapping,
on=["dc_id", "distributor_id"], how="inner")
# add drug type column
features = features.merge(df_drugs[["drug_id", "drug_type"]], on="drug_id",
how="left")
# remove discontinued and banned drugs
features = features[~((features['drug_type'] == 'discontinued-products') |
(features['drug_type'] == 'banned'))]
# add distributor details
features = features.merge(
df_distributors[["distributor_id", "drug_type", "dist_type_portfolio_size",
"distributor_credit_period"]],
on=["drug_type", "distributor_id"], how="left")
features["dist_type_portfolio_size"] = features[
"dist_type_portfolio_size"].fillna(0).astype(np.int64)
features["distributor_credit_period"] = features[
"distributor_credit_period"].fillna(0).astype(np.int64)
# ========================= CALCULATE RANKS AS/MS =========================
logger.info("Ranking starts AS/MS")
rank_drug_lvl, rank_drug_type_lvl, disq_entries = calc_ranks_dc(
features, as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl, logger)
final_ranks = get_final_ranks_dc(
rank_drug_lvl, rank_drug_type_lvl, disq_entries, features,
df_distributor_drugs, df_distributors, df_dc_distributors_mapping,
as_ms_weights_dc_drug_lvl, logger)
# ====================== POST PROCESS RANK DFs AS/MS ======================
logger.info("Post processing rank-DFs AS/MS")
final_ranks_as_ms, ranked_features_as_ms = post_process_ranking_dc(
df_drugs, df_store_dc_maps, df_distributors,
rank_drug_lvl, rank_drug_type_lvl, final_ranks,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl)
final_ranks_as_ms["request_type"] = "AS/MS"
ranked_features_as_ms["request_type"] = "AS/MS"
# ========================== CALCULATE RANKS PR ===========================
logger.info("Ranking starts PR")
rank_drug_lvl, rank_drug_type_lvl, disq_entries = calc_ranks_dc(
features, pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl, logger)
final_ranks = get_final_ranks_dc(
rank_drug_lvl, rank_drug_type_lvl, disq_entries, features,
df_distributor_drugs, df_distributors, df_dc_distributors_mapping,
pr_weights_dc_drug_lvl, logger)
# ======================== POST PROCESS RANK DFs PR =======================
logger.info("Post processing rank-DFs PR")
final_ranks_pr, ranked_features_pr = post_process_ranking_dc(
df_drugs, df_store_dc_maps, df_distributors,
rank_drug_lvl, rank_drug_type_lvl, final_ranks,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl)
final_ranks_pr["request_type"] = "PR"
ranked_features_pr["request_type"] = "PR"
# =========================== JOIN DFs AS/MS & PR =========================
final_ranks = pd.concat([final_ranks_as_ms, final_ranks_pr], axis=0)
ranked_features = pd.concat([ranked_features_as_ms, ranked_features_pr], axis=0)
return ranked_features, final_ranks
def ranking_calc_franchisee(reset_date, time_interval_franchisee,
franchisee_stores, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl, logger, db, schema):
# =============================== PULL DATA ===============================
logger.info("Pulling data for Franchisee")
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval_franchisee + 7
df_sb, df_rates, df_store_dc_maps, df_drugs, df_distributors, \
df_dc_distributors_mapping, df_distributor_drugs = pull_data_franchisee(
reset_date, franchisee_stores, time_interval, db, schema)
# ========================== FEATURE CALCULATION ==========================
logger.info("Calculating features")
features = calculate_features(
df_sb, df_rates, df_store_dc_maps, reset_date, time_interval_franchisee, logger,
group_cols=['store_id', 'distributor_id', 'drug_id'])
# add drug type column
features = features.merge(df_drugs[["drug_id", "drug_type"]], on="drug_id",
how="left")
# remove discontinued and banned drugs
features = features[~((features['drug_type'] == 'discontinued-products') |
(features['drug_type'] == 'banned'))]
# add distributor details
features = features.merge(
df_distributors[
["distributor_id", "drug_type", "dist_type_portfolio_size",
"distributor_credit_period"]],
on=["drug_type", "distributor_id"], how="left")
features["dist_type_portfolio_size"] = features[
"dist_type_portfolio_size"].fillna(0).astype(np.int64)
features["distributor_credit_period"] = features[
"distributor_credit_period"].fillna(0).astype(np.int64)
# ============================ CALCULATE RANKS ============================
logger.info("Ranking starts")
rank_drug_lvl, rank_drug_type_lvl = calc_ranks_franchisee(
features, weights_franchisee_drug_lvl, weights_franchisee_type_lvl,
logger)
final_ranks = get_final_ranks_franchisee(
rank_drug_lvl, rank_drug_type_lvl, features, logger)
# ========================= POST PROCESS RANK DFs =========================
logger.info("Post processing rank-DFs")
final_ranks, ranked_features = post_process_ranking_franchisee(
df_drugs, df_store_dc_maps, df_distributors, rank_drug_lvl,
rank_drug_type_lvl, final_ranks, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl)
final_ranks["request_type"] = "ALL"
ranked_features["request_type"] = "ALL"
return ranked_features, final_ranks | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/distributor_ranking_calc1.py | distributor_ranking_calc1.py |
import pandas as pd
import numpy as np
def post_process_ranking_dc(df_drugs, df_store_dc_maps, df_distributors,
rank_drug_lvl, rank_drug_type_lvl, final_ranks,
weights_dc_drug_lvl, weights_dc_type_lvl):
# add drug_id dummy column in type lvl
rank_drug_type_lvl["drug_id"] = np.nan
# add weights column
rank_drug_type_lvl["weights"] = str(weights_dc_type_lvl)
rank_drug_lvl["weights"] = str(weights_dc_drug_lvl)
# adding details into drug_lvl_df
rank_drug_lvl = rank_drug_lvl.merge(
df_drugs[["drug_id", "drug_type", "drug_name"]],
on="drug_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
df_store_dc_maps[["dc_id", "dc_name"]].drop_duplicates(),
on="dc_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
df_distributors[["distributor_id", "distributor_name"]].drop_duplicates(),
on="distributor_id", how="left")
# adding details into drug_type_lvl_df
rank_drug_type_lvl = rank_drug_type_lvl.merge(
df_store_dc_maps[["dc_id", "dc_name"]].drop_duplicates(),
on="dc_id", how="left")
rank_drug_type_lvl = rank_drug_type_lvl.merge(
df_distributors[["distributor_id", "distributor_name"]].drop_duplicates(),
on="distributor_id", how="left")
# combine drug_lvl and drug_type_lvl df
ranked_features = pd.concat([rank_drug_lvl, rank_drug_type_lvl], axis=0)
# add details into final_ranks df
final_ranks = final_ranks.merge(
df_store_dc_maps[["dc_id", "dc_name"]].drop_duplicates(),
on="dc_id", how="left")
final_ranks = final_ranks.merge(
df_drugs[["drug_id", "drug_name"]], on="drug_id", how="left")
# add columns for franchisee rank addition because
# both dc & franchisee features/ranks needs to be written to same table.
final_ranks["franchisee_id"] = 1 # zippin id
final_ranks["store_id"] = np.nan
final_ranks["store_name"] = ""
ranked_features["franchisee_id"] = 1 # zippin id
ranked_features["store_id"] = np.nan
ranked_features["store_name"] = ""
ranked_features["request_volume_store_dist"] = np.nan
ranked_features["rank_store_dist_credit_period"] = np.nan
ranked_features["rank_store_dist_volume"] = np.nan
return final_ranks, ranked_features
def post_process_ranking_franchisee(df_drugs, df_store_dc_maps, df_distributors,
rank_drug_lvl, rank_drug_type_lvl,
final_ranks, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl):
# add drug_id dummy column in type lvl
rank_drug_type_lvl["drug_id"] = np.nan
# add weights column
rank_drug_type_lvl["weights"] = str(weights_franchisee_type_lvl)
rank_drug_lvl["weights"] = str(weights_franchisee_drug_lvl)
# adding details into drug_lvl_df
rank_drug_lvl = rank_drug_lvl.merge(
df_drugs[["drug_id", "drug_type", "drug_name"]],
on="drug_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
df_store_dc_maps[["store_id", "store_name"]].drop_duplicates(),
on="store_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
df_distributors[
["distributor_id", "distributor_name"]].drop_duplicates(),
on="distributor_id", how="left")
# adding details into drug_type_lvl_df
rank_drug_type_lvl = rank_drug_type_lvl.merge(
df_store_dc_maps[["store_id", "store_name"]].drop_duplicates(),
on="store_id", how="left")
rank_drug_type_lvl = rank_drug_type_lvl.merge(
df_distributors[["distributor_id", "distributor_name"]].drop_duplicates(),
on="distributor_id", how="left")
# combine drug_lvl and drug_type_lvl df
ranked_features = pd.concat([rank_drug_lvl, rank_drug_type_lvl], axis=0)
# add details into final_ranks df
final_ranks = final_ranks.merge(
df_store_dc_maps[["store_id", "store_name"]].drop_duplicates(),
on="store_id", how="left")
final_ranks = final_ranks.merge(
df_drugs[["drug_id", "drug_name"]],
on="drug_id", how="left")
# add columns for dc rank addition because
# both dc & franchisee features/ranks needs to be written to same table.
final_ranks["dc_id"] = np.nan
final_ranks["dc_name"] = ""
final_ranks["correction_flags"] = ""
ranked_features["dc_id"] = np.nan
ranked_features["dc_name"] = ""
ranked_features["request_volume_dc_dist"] = np.nan
ranked_features["rank_dc_dist_credit_period"] = np.nan
ranked_features["rank_dc_dist_volume"] = np.nan
return final_ranks, ranked_features | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/post_process_ranking1.py | post_process_ranking1.py |
import pandas as pd
from zeno_etl_libs.utils.distributor_ranking2.pull_data import \
pull_data_dc, pull_data_franchisee
from zeno_etl_libs.utils.distributor_ranking2.calculate_ranks import \
calc_ranks_dc, get_final_ranks_dc, calc_ranks_franchisee, \
get_final_ranks_franchisee
from zeno_etl_libs.utils.distributor_ranking2.preprocess_features import \
preprocess_features_dc, preprocess_features_franchisee
from zeno_etl_libs.utils.distributor_ranking2.calculate_features import \
calculate_features
from zeno_etl_libs.utils.distributor_ranking2.post_process_ranking import \
post_process_ranking_dc, post_process_ranking_franchisee
def ranking_calc_dc(reset_date, time_interval_dc, as_ms_weights_dc_drug_lvl,
as_ms_weights_dc_type_lvl, pr_weights_dc_drug_lvl,
pr_weights_dc_type_lvl, logger, db, schema):
# =============================== PULL DATA ===============================
logger.info("Pulling data for DC")
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval_dc + 7
df_features, df_distributors, df_dc_distributors_mapping, \
df_distributor_drugs = pull_data_dc(
reset_date, time_interval, db, schema)
# ========================== DATA PRE-PROCESSING ==========================
logger.info("Preprocessing data")
df_features = preprocess_features_dc(df_features, df_dc_distributors_mapping,
df_distributor_drugs)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors,
on=['partial_distributor_id', 'drug_type'],
how='left', validate='many_to_one')
# ========================== FEATURE CALCULATION ==========================
logger.info("Calculating features")
features = calculate_features(df_features, reset_date, time_interval_dc,
logger, group_cols=['partial_dc_id',
'partial_distributor_id',
'drug_id'])
# add drug type column
features = pd.merge(features,
df_features[['drug_id', 'drug_type']].drop_duplicates(),
on=['drug_id'], how='left', validate='many_to_one')
# add dist info
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name',
'partial_distributor_credit_period', 'drug_type',
'dist_type_portfolio_size']].drop_duplicates(),
on=['partial_distributor_id', 'drug_type'], how='left',
validate='many_to_one')
# add dc name
features = pd.merge(features, df_features[
['partial_dc_id', 'dc_name']].dropna().drop_duplicates(),
on=['partial_dc_id'], validate='many_to_one', how='left')
# add drug name
features = pd.merge(features,
df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
# ========================= CALCULATE RANKS AS/MS =========================
logger.info("Ranking starts AS/MS")
rank_drug_lvl, rank_drug_type_lvl, disq_entries = calc_ranks_dc(
features, as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl, logger)
final_ranks = get_final_ranks_dc(
rank_drug_lvl, rank_drug_type_lvl, disq_entries, features,
df_distributor_drugs, df_distributors, df_dc_distributors_mapping,
as_ms_weights_dc_drug_lvl, logger)
# ====================== POST PROCESS RANK DFs AS/MS ======================
logger.info("Post processing rank-DFs AS/MS")
final_ranks_as_ms, ranked_features_as_ms = post_process_ranking_dc(
features, rank_drug_lvl, rank_drug_type_lvl, final_ranks,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl)
final_ranks_as_ms["request_type"] = "AS/MS"
ranked_features_as_ms["request_type"] = "AS/MS"
# ========================== CALCULATE RANKS PR ===========================
logger.info("Ranking starts PR")
rank_drug_lvl, rank_drug_type_lvl, disq_entries = calc_ranks_dc(
features, pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl, logger)
final_ranks = get_final_ranks_dc(
rank_drug_lvl, rank_drug_type_lvl, disq_entries, features,
df_distributor_drugs, df_distributors, df_dc_distributors_mapping,
pr_weights_dc_drug_lvl, logger)
# ======================== POST PROCESS RANK DFs PR =======================
logger.info("Post processing rank-DFs PR")
final_ranks_pr, ranked_features_pr = post_process_ranking_dc(
features, rank_drug_lvl, rank_drug_type_lvl, final_ranks,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl)
final_ranks_pr["request_type"] = "PR"
ranked_features_pr["request_type"] = "PR"
# =========================== JOIN DFs AS/MS & PR =========================
final_ranks = pd.concat([final_ranks_as_ms, final_ranks_pr], axis=0)
ranked_features = pd.concat([ranked_features_as_ms, ranked_features_pr], axis=0)
return ranked_features, final_ranks
def ranking_calc_franchisee(reset_date, time_interval_franchisee,
franchisee_stores, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl, logger, db, schema):
# =============================== PULL DATA ===============================
logger.info("Pulling data for franchisee")
# add 7 days to time interval since we do not want to include last week's data.
time_interval = time_interval_franchisee + 7
df_features, df_distributors, df_distributor_drugs = pull_data_franchisee(
reset_date, time_interval, franchisee_stores, db, schema)
# ========================== DATA PRE-PROCESSING ==========================
logger.info("Preprocessing data")
df_features = preprocess_features_franchisee(
df_features, df_distributor_drugs, db, schema)
# add distributor name and distributor features here.
df_features = pd.merge(df_features, df_distributors,
on=['partial_distributor_id', 'drug_type'],
how='left', validate='many_to_one')
# ========================== FEATURE CALCULATION ==========================
logger.info("Calculating features")
features = calculate_features(df_features, reset_date, time_interval_franchisee,
logger, group_cols=['store_id',
'partial_distributor_id',
'drug_id'])
# add drug type column
features = pd.merge(features,
df_features[['drug_id', 'drug_type']].drop_duplicates(),
on=['drug_id'], how='left', validate='many_to_one')
# add dist info
features = pd.merge(features, df_features[
['partial_distributor_id', 'partial_distributor_name',
'partial_distributor_credit_period', 'drug_type',
'dist_type_portfolio_size']].drop_duplicates(),
on=['partial_distributor_id', 'drug_type'], how='left',
validate='many_to_one')
# add store name and franchisee_id here.
features = pd.merge(
features, df_features[['store_id', 'store_name', 'franchisee_id']].dropna().drop_duplicates(),
on=['store_id'], validate='many_to_one', how='left')
# add drug name
features = pd.merge(features,
df_features[['drug_id', 'drug_name']].drop_duplicates(),
on=['drug_id'], validate='many_to_one', how='left')
# ============================ CALCULATE RANKS ============================
logger.info("Ranking starts")
rank_drug_lvl, rank_drug_type_lvl = calc_ranks_franchisee(
features, weights_franchisee_drug_lvl, weights_franchisee_type_lvl,
logger)
final_ranks = get_final_ranks_franchisee(
rank_drug_lvl, rank_drug_type_lvl, features, logger)
# ========================= POST PROCESS RANK DFs =========================
logger.info("Post processing rank-DFs")
final_ranks, ranked_features = post_process_ranking_franchisee(
features, rank_drug_lvl, rank_drug_type_lvl, final_ranks,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl)
final_ranks["request_type"] = "ALL"
ranked_features["request_type"] = "ALL"
return ranked_features, final_ranks | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/distributor_ranking_calc.py | distributor_ranking_calc.py |
import pandas as pd
import numpy as np
def process_tech_df(final_ranks_dc, final_ranks_franchisee, volume_fraction):
tech_input = pd.concat([final_ranks_dc, final_ranks_franchisee], axis=0)
tech_input['volume_fraction'] = volume_fraction
tech_input.rename(
{"partial_dc_id": "dc_id", "distributor_rank_1": "final_dist_1",
"distributor_rank_2": "final_dist_2", "distributor_rank_3": "final_dist_3"},
axis=1, inplace=True)
# combine volume fraction split for cases where total distributors < 3
volume_fraction_split = tech_input['volume_fraction'].str.split(
pat='-', expand=True).rename(
columns={0: 'volume_fraction_1',
1: 'volume_fraction_2',
2: 'volume_fraction_3'})
tech_input['volume_fraction_1'] = volume_fraction_split[
'volume_fraction_1'].astype(float)
tech_input['volume_fraction_2'] = volume_fraction_split[
'volume_fraction_2'].astype(float)
tech_input['volume_fraction_3'] = volume_fraction_split[
'volume_fraction_3'].astype(float)
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_3'].isna(),
tech_input['volume_fraction_2'] +
tech_input['volume_fraction_3'],
tech_input['volume_fraction_2'])
tech_input['volume_fraction_3'] = np.where(
tech_input['final_dist_3'].isna(), 0,
tech_input['volume_fraction_3'])
tech_input['volume_fraction_1'] = np.where(
tech_input['final_dist_2'].isna(),
tech_input['volume_fraction_1'] +
tech_input['volume_fraction_2'],
tech_input['volume_fraction_1'])
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_2'].isna(), 0,
tech_input['volume_fraction_2'])
tech_input['volume_fraction'] = tech_input['volume_fraction_1'].astype(
'str') + '-' + tech_input['volume_fraction_2'].astype(
'str') + '-' + tech_input['volume_fraction_3'].astype('str')
tech_input = tech_input[
['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
# adhoc changes by tech, table restructure
tech_input = tech_input.reset_index(
drop=True).reset_index().rename(columns={'index': 'id'})
tech_input[['volume_fraction_1', 'volume_fraction_2',
'volume_fraction_3']] = tech_input[
'volume_fraction'].str.split('-', 3, expand=True)
tech_input.loc[tech_input['request_type'] == 'AS/MS',
'request_type'] = 'manual-short/auto-short'
tech_input.loc[tech_input['request_type'] ==
'PR', 'request_type'] = 'patient-request'
tech_input.loc[tech_input['request_type'] ==
'ALL', 'request_type'] = 'all'
volume_fraction_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['volume_fraction_1',
'volume_fraction_2',
'volume_fraction_3']).sort_values(
by='id')
distributor_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['final_dist_1',
'final_dist_2',
'final_dist_3']).sort_values(
by='id').rename(columns={'value': 'distributor_id'})
distributor_ranking_rule_values = pd.merge(distributor_melt,
volume_fraction_melt,
left_index=True,
right_index=True,
suffixes=('', '_y'))
distributor_ranking_rule_values = distributor_ranking_rule_values[
['id', 'distributor_id', 'value']].rename(
columns={'id': 'distributor_ranking_rule_id'}).reset_index(
drop=True)
distributor_ranking_rule_values = distributor_ranking_rule_values.reset_index().rename(
columns={'index': 'id'})
# drop null values in distributor_id(for cases where historical distributors are < 3)
distributor_ranking_rule_values = distributor_ranking_rule_values[
~distributor_ranking_rule_values['distributor_id'].isna()]
# convert distributor_id in int format
distributor_ranking_rule_values['distributor_id'] = \
distributor_ranking_rule_values['distributor_id'].astype(int)
distributor_ranking_rules = tech_input[['id', 'drug_id', 'dc_id',
'franchisee_id', 'store_id',
'drug_type', 'request_type']]
return distributor_ranking_rules, distributor_ranking_rule_values | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/tech_processing.py | tech_processing.py |
import pandas as pd
import numpy as np
def preprocess_features_dc(df_features, df_dc_distributors_mapping,
df_distributor_drugs):
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where mrp is 0 otherwise margin becomes infinity
df_features = df_features[df_features['mrp'] != 0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
# if no dc information is present then remove those cases.
df_features = df_features[((~df_features['partial_dc_id'].isna()) | (
~df_features['forward_dc_id'].isna()))]
# for those cases where invoice doesn't exist, take invoice dc as obtained from sdm table
df_features['partial_dc_id'] = df_features['partial_dc_id'].fillna(
df_features['forward_dc_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs not part of distributor portfolio
df_features = pd.merge(df_features, df_distributor_drugs,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
# filter out distributors not part of active dc-distributor mapping
df_features = pd.merge(df_features, df_dc_distributors_mapping,
on=['partial_dc_id', 'partial_distributor_id'],
how='inner', validate='many_to_one')
return df_features
def preprocess_features_franchisee(df_features, df_distributor_drugs,
db, read_schema):
# remove those entries where no order is given to dc or the order value doesn't exist
df_features = df_features[(df_features['original_order'] > 0) & (
~df_features['original_order'].isna())]
df_features = df_features.drop_duplicates()
# due to stock rotation, invoice_item_id could be same. So remove duplicates.
# Since drop duplicates drops out all na values, separate na and non na
# and then apply drop duplicates
df_features_1 = df_features[~df_features[
'invoice_item_id'].isna()]
df_features_2 = df_features[df_features['invoice_item_id'].isna()]
df_features_1 = df_features_1.drop_duplicates(subset=['invoice_item_id'])
# concat back na values after you separate them
df_features = pd.concat([df_features_1, df_features_2])
#remove cases where mrp is 0 otherwise margin becomes infinity
df_features = df_features[df_features['mrp'] != 0]
# if distributor id isn't assigned in short book then remove it.
df_features = df_features[(~df_features['short_book_distributor_id'].isna())
| (~df_features['partial_distributor_id'].isna())]
# for those cases where invoice doesn't exist, take distributor as short book distributor
df_features['partial_distributor_id'] = df_features['partial_distributor_id'].fillna(
df_features['short_book_distributor_id'])
df_features['partial_created_at'] = pd.to_datetime(
df_features['partial_created_at'], errors='coerce')
df_features['partial_invoiced_at'] = pd.to_datetime(
df_features['partial_invoiced_at'], errors='coerce')
df_features['original_created_at'] = pd.to_datetime(
df_features['original_created_at'], errors='coerce')
df_features['original_created_at_2'] = pd.to_datetime(
df_features['original_created_at_2'], errors='coerce')
# append number of invoices against each sb id.
invoice_count = df_features.groupby(['short_book_1_id']).agg(
invoice_count=('invoice_id', 'count')).reset_index()
df_features = pd.merge(df_features, invoice_count, on=[
'short_book_1_id'], how='left')
# fill those cases where no invoice is present with zero.
df_features['invoice_count'] = df_features['invoice_count'].fillna(0)
# to avoid cases where wrong product is received, we compare with invoice items drugs id as well.
df_features['is_lost'] = np.where(
(df_features['invoice_items_drug_id'] != df_features['drug_id']) | (df_features['partial_invoiced_at'].isna()), 1, 0)
# for new drugs where drug id hasn't been assigned yet, ranking won't be generated until the drug id is assigned.
df_features = df_features[~df_features['drug_id'].isna()]
# remove entries for which drug type hasn't been assigned
df_features = df_features[~df_features['drug_type'].isna()]
# remove discontinued or banned products
df_features = df_features[
~((df_features['drug_type'] == 'discontinued-products') | (df_features['drug_type'] == 'banned'))]
# sanity check
assert df_features[df_features['invoice_count'] ==
0].shape[0] == df_features[df_features['invoice_id'].isna()].shape[0]
# filter out distributor-drugs not part of distributor portfolio
df_features = pd.merge(df_features, df_distributor_drugs,
on=['partial_distributor_id', 'drug_id'],
how='inner', validate='many_to_one')
return df_features | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/preprocess_features.py | preprocess_features.py |
import pandas as pd
import numpy as np
import math
from zeno_etl_libs.utils.distributor_ranking2.correction_flag import add_corr_flag
def calc_ranks_dc(features, weights_dc_drug_lvl, weights_dc_type_lvl, logger):
# =========================== DRUG LEVEL RANKING ==========================
logger.info("DC-drug level ranking starts")
# select only relevant columns required for ranking
rank_drug_lvl = features[
['partial_dc_id', 'partial_distributor_id', 'drug_id', 'margin',
'wtd_ff', 'dist_type_portfolio_size', 'partial_distributor_credit_period',
'request_volume_dc_dist']]
# set significant digits for features with decimal points
rank_drug_lvl["margin"] = np.round(rank_drug_lvl["margin"], 3)
rank_drug_lvl["wtd_ff"] = np.round(rank_drug_lvl["wtd_ff"], 3)
rank_drug_lvl["request_volume_dc_dist"] = np.round(
rank_drug_lvl["request_volume_dc_dist"], 3)
# rank each features
rank_drug_lvl["rank_margin"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])['margin'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_ff"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_lvl["rank_dc_dist_credit_period"] = \
rank_drug_lvl.groupby(['partial_dc_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_lvl['rank_dc_dist_volume'] = features.groupby(['partial_dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin & ff
rank_drug_lvl["wtd_rank"] = (rank_drug_lvl["rank_margin"] *
weights_dc_drug_lvl["margin"]) + \
(rank_drug_lvl["rank_ff"] *
weights_dc_drug_lvl["ff"])
rank_drug_lvl["wtd_rank"] = np.round(rank_drug_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["partial_dc_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume",
"rank_dist_type_portfolio_size"]
sort_asc_order = group_col_sort_asc_order + [True, True, True, True]
rank_drug_lvl = rank_drug_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_lvl['index'] = rank_drug_lvl.index
# final ranking based on preference order
rank_drug_lvl["final_rank"] = \
rank_drug_lvl.groupby(['partial_dc_id', 'drug_id'])['index'].rank(
method='first', ascending=True)
rank_drug_lvl.drop('index', axis=1, inplace=True)
# ========================== D.TYPE LEVEL RANKING =========================
logger.info("DC-drug-type level ranking starts")
# select only relevant columns required for ranking
rank_drug_type_lvl = features[
['partial_dc_id', 'partial_distributor_id', 'drug_id', 'drug_type',
'margin', 'wtd_ff', 'dist_type_portfolio_size',
'partial_distributor_credit_period', 'request_volume_dc_dist']]
# group by dc-distributor-drug_type level and calculate features
rank_drug_type_lvl = rank_drug_type_lvl.groupby(
["partial_dc_id", "partial_distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"dist_type_portfolio_size": "first",
"partial_distributor_credit_period": "first",
"request_volume_dc_dist": "first"})
# round features to 3 significant digits
rank_drug_type_lvl["margin"] = np.round(rank_drug_type_lvl["margin"], 3)
rank_drug_type_lvl["wtd_ff"] = np.round(rank_drug_type_lvl["wtd_ff"], 3)
rank_drug_type_lvl["request_volume_dc_dist"] = np.round(
rank_drug_type_lvl["request_volume_dc_dist"], 3)
# rank each features
rank_drug_type_lvl["rank_margin"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])['margin'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_ff"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_type_lvl["rank_dc_dist_credit_period"] = \
rank_drug_type_lvl.groupby(['partial_dc_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_type_lvl['rank_dc_dist_volume'] = \
rank_drug_type_lvl.groupby(['partial_dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin, ff & portfolio size
rank_drug_type_lvl["wtd_rank"] = (rank_drug_type_lvl["rank_margin"] *
weights_dc_type_lvl["margin"]) + \
(rank_drug_type_lvl["rank_ff"] *
weights_dc_type_lvl["ff"]) + \
(rank_drug_type_lvl["rank_dist_type_portfolio_size"] *
weights_dc_type_lvl["portfolio_size"])
rank_drug_type_lvl["wtd_rank"] = np.round(rank_drug_type_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["partial_dc_id", "drug_type"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
rank_drug_type_lvl = rank_drug_type_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_type_lvl['index'] = rank_drug_type_lvl.index
# final ranking based on preference order
rank_drug_type_lvl["final_rank"] = \
rank_drug_type_lvl.groupby(['partial_dc_id', 'drug_type'])['index'].rank(
method='first', ascending=True)
rank_drug_type_lvl.drop('index', axis=1, inplace=True)
# ================== DISQUALIFY POOR DC-DRUG-DISTRIBUTORS =================
# For cases where a poor distributor in terms of wtd.ff and ff_requests
# comes in rank 3 or higher => disqualify it. As a result the rank 3 slot
# becomes vacant for the slot filling logic to assign another distributor
# which will get a chance to fulfill the order. If the assigned distributor
# performs good it will be better ranked in subsequent resets, else it will
# also get disqualified in similar way in later resets. This will keep the
# cycle to constantly look for better distributors. Else it might get locked
# in a cycle of ranking the same poor distributor over and over again.
disq_entries = rank_drug_lvl.merge(
features[["partial_dc_id", "partial_distributor_id", "drug_id", "ff_requests"]],
on=["partial_dc_id", "partial_distributor_id", "drug_id"], how="left")
# disqualify condition
disq_entries["disqualify"] = np.where(
(disq_entries["final_rank"] >= 3) &
((disq_entries["ff_requests"] == 0) | (disq_entries["wtd_ff"] < 0.4)),
1, 0)
disq_entries = disq_entries.loc[(disq_entries["disqualify"] == 1)]
disq_entries = disq_entries[["partial_dc_id", "partial_distributor_id",
"drug_id", "disqualify"]]
return rank_drug_lvl, rank_drug_type_lvl, disq_entries
def get_final_ranks_dc(rank_drug_lvl, rank_drug_type_lvl, disq_entries,
features, df_distributor_drugs, df_distributors,
df_dc_distributors_mapping, weights_dc_drug_lvl, logger):
"""
get final ranking format and apply slot filling logic to rank slots
which are empty.
"""
final_ranks = rank_drug_lvl[["partial_dc_id", "drug_id"]].drop_duplicates()
final_ranks = final_ranks.merge(
features[["drug_id", "drug_type"]].drop_duplicates(), on="drug_id",
how="left")
# remove disqualified entries
rank_drug_lvl = rank_drug_lvl.merge(
disq_entries, on=["partial_dc_id", "partial_distributor_id", "drug_id"],
how="left")
rank_drug_lvl = rank_drug_lvl.loc[rank_drug_lvl["disqualify"] != 1]
logger.info("Creating final df format")
# make final ranking df
for rank in [1, 2, 3]:
df_rank = rank_drug_lvl.loc[rank_drug_lvl["final_rank"] == rank]
df_rank = df_rank[
["partial_dc_id", "drug_id", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["partial_dc_id", "drug_id"],
how="left")
final_ranks[f"distributor_rank_{rank}"] = final_ranks[
f"distributor_rank_{rank}"].astype(float)
# ================== FILL MISSING RANK SLOTS DC-DRUG LVL ==================
# get all dc-drug with missing slots
logger.info("Get allowable dc-drug-distributors to fill slots")
missing_rank_dc_drugs = final_ranks.loc[
(final_ranks["distributor_rank_2"].isna()) | (final_ranks["distributor_rank_3"].isna())]
missing_rank_dc_drugs = missing_rank_dc_drugs[["partial_dc_id", "drug_id", "drug_type"]]
# list all missing drugs
list_missing_rank_drugs = list(missing_rank_dc_drugs["drug_id"].unique())
# get all distributors with missing drugs in their portfolio
select_distributor_drugs = df_distributor_drugs.loc[
df_distributor_drugs["drug_id"].isin(list_missing_rank_drugs)]
# assign it to all dc
available_mappings = missing_rank_dc_drugs.merge(select_distributor_drugs,
on="drug_id", how="left")
# merge distributor details
available_mappings = available_mappings.merge(
df_distributors[["partial_distributor_id", "partial_distributor_credit_period"]].drop_duplicates(),
on="partial_distributor_id", how="left")
# calculate features on drug_type level for dc-distributors (margin & ff)
distributor_type_lvl_features = features.groupby(
["partial_dc_id", "partial_distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"request_volume_dc_dist": "first"})
available_mappings = available_mappings.merge(
distributor_type_lvl_features, on=["partial_dc_id",
"partial_distributor_id",
"drug_type"], how="left")
# fill na and set significant digits
available_mappings["margin"] = available_mappings["margin"].fillna(0)
available_mappings["wtd_ff"] = available_mappings["wtd_ff"].fillna(0)
available_mappings["request_volume_dc_dist"] = available_mappings[
"request_volume_dc_dist"].fillna(0)
available_mappings["margin"] = np.round(available_mappings["margin"], 3)
available_mappings["wtd_ff"] = np.round(available_mappings["wtd_ff"], 3)
available_mappings["request_volume_dc_dist"] = np.round(
available_mappings["request_volume_dc_dist"], 3)
# remove inactive dc-distributors
available_mappings = available_mappings.merge(
df_dc_distributors_mapping, on=["partial_dc_id", "partial_distributor_id"],
how="inner")
# remove disqualified entries
available_mappings = available_mappings.merge(
disq_entries, on=["partial_dc_id", "partial_distributor_id", "drug_id"],
how="left")
available_mappings = available_mappings.loc[available_mappings["disqualify"] != 1]
# ranking distributors based on dc-drug level logic
logger.info("Ranking allowable dc-drug-distributors")
available_mapping_ranked = available_mappings.copy()
available_mapping_ranked["rank_margin"] = \
available_mapping_ranked.groupby(['partial_dc_id', 'drug_id'])[
'margin'].rank(method='dense', ascending=False)
available_mapping_ranked["rank_ff"] = \
available_mapping_ranked.groupby(['partial_dc_id', 'drug_id'])[
'wtd_ff'].rank(method='dense', ascending=False)
available_mapping_ranked["rank_dc_dist_credit_period"] = \
available_mapping_ranked.groupby(['partial_dc_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
available_mapping_ranked['rank_dc_dist_volume'] = \
available_mapping_ranked.groupby(['partial_dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# calculate wtd.ranks
available_mapping_ranked["wtd_rank"] = (available_mapping_ranked["rank_margin"] *
weights_dc_drug_lvl["margin"]) + \
(available_mapping_ranked["rank_ff"] *
weights_dc_drug_lvl["ff"])
available_mapping_ranked["wtd_rank"] = np.round(
available_mapping_ranked["wtd_rank"], 1)
# set sorting order
group_cols = ["partial_dc_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
available_mapping_ranked = available_mapping_ranked.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
available_mapping_ranked['index'] = available_mapping_ranked.index
# get final ranks
available_mapping_ranked["final_rank"] = \
available_mapping_ranked.groupby(['partial_dc_id', 'drug_id'])[
'index'].rank(method='first', ascending=True)
available_mapping_ranked.drop('index', axis=1, inplace=True)
pre_corr = final_ranks.copy() # to compare pre-post correction
# adding auxiliary ranking to empty slot dc-drugs
logger.info("Filling empty rank slots with ranked distributors")
for rank in [1, 2, 3]:
df_rank = available_mapping_ranked.loc[
available_mapping_ranked["final_rank"] == rank]
df_rank = df_rank[
["partial_dc_id", "drug_id", "partial_distributor_id"]]
df_rank.rename(
{"partial_distributor_id": f"aux_distributor_rank_{rank}"}, axis=1,
inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["partial_dc_id", "drug_id"],
how="left")
final_ranks[f"aux_distributor_rank_{rank}"] = final_ranks[
f"aux_distributor_rank_{rank}"].astype(float)
for index, row in final_ranks.iterrows():
# if rank 2 empty and aux_rank present
if math.isnan(row["distributor_rank_2"]) & \
(not math.isnan(row["aux_distributor_rank_1"])):
if row["aux_distributor_rank_1"] != row["distributor_rank_1"]:
final_ranks.loc[index, "distributor_rank_2"] = row[
"aux_distributor_rank_1"]
elif not math.isnan(row["aux_distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_2"] = row[
"aux_distributor_rank_2"]
for index, row in final_ranks.iterrows():
# if rank 1 & 2 filled, rank 3 empty and aux_ranks present
if (not math.isnan(row["distributor_rank_1"])) & \
(not math.isnan(row["distributor_rank_2"])) & \
(math.isnan(row["distributor_rank_3"])):
if (not math.isnan(row["aux_distributor_rank_1"])) & \
(row["aux_distributor_rank_1"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_1"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_1"]
elif (not math.isnan(row["aux_distributor_rank_2"])) & \
(row["aux_distributor_rank_2"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_2"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_2"]
elif (not math.isnan(row["aux_distributor_rank_3"])) & \
(row["aux_distributor_rank_3"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_3"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_3"]
final_ranks = final_ranks.drop(
["aux_distributor_rank_1", "aux_distributor_rank_2",
"aux_distributor_rank_3"], axis=1)
post_corr = final_ranks.copy() # to compare pre-post correction
# add correction flags where rank2 & rank3 slot filling took place
logger.info("Adding correction flags for filled rank slots")
final_ranks = add_corr_flag(final_ranks, pre_corr, post_corr,
col_to_compare="distributor_rank_2",
corr_flag="R2F",
group_cols=["partial_dc_id", "drug_id"])
final_ranks = add_corr_flag(final_ranks, pre_corr, post_corr,
col_to_compare="distributor_rank_3",
corr_flag="R3F",
group_cols=["partial_dc_id", "drug_id"])
# ================== COMBINE DC-DRUG LVL & DC-TYPE LVL ===================
# add dc-drug-type level ranking
logger.info("Adding dc-drug-type level ranking to final df")
final_ranks_type_lvl = rank_drug_type_lvl[
["partial_dc_id", "drug_type"]].drop_duplicates()
# create dc-type level final ranking format
for rank in [1, 2, 3]:
df_rank = rank_drug_type_lvl.loc[
rank_drug_type_lvl["final_rank"] == rank]
df_rank = df_rank[
["partial_dc_id", "drug_type", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks_type_lvl = final_ranks_type_lvl.merge(df_rank,
on=["partial_dc_id",
"drug_type"],
how="left")
final_ranks_type_lvl[f"distributor_rank_{rank}"] = final_ranks_type_lvl[
f"distributor_rank_{rank}"].astype(float)
# combine dc-drug lvl and dc-drug-type lvl
final_ranks = pd.concat([final_ranks, final_ranks_type_lvl], axis=0)
final_ranks["correction_flags"] = final_ranks["correction_flags"].fillna("")
return final_ranks
def calc_ranks_franchisee(features, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl, logger):
# =========================== DRUG LEVEL RANKING ==========================
logger.info("Franchisee-store-drug level ranking starts")
# select only relevant columns required for ranking
rank_drug_lvl = features[
['store_id', 'partial_distributor_id', 'drug_id', 'margin',
'wtd_ff', 'dist_type_portfolio_size', 'partial_distributor_credit_period',
'request_volume_store_dist']]
# set significant digits for features with decimal points
rank_drug_lvl["margin"] = np.round(rank_drug_lvl["margin"], 3)
rank_drug_lvl["wtd_ff"] = np.round(rank_drug_lvl["wtd_ff"], 3)
rank_drug_lvl["request_volume_store_dist"] = np.round(
rank_drug_lvl["request_volume_store_dist"], 3)
# rank each features
rank_drug_lvl["rank_margin"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['margin'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_ff"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_lvl["rank_store_dist_credit_period"] = \
rank_drug_lvl.groupby(['store_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_lvl['rank_store_dist_volume'] = features.groupby(['store_id'])[
'request_volume_store_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin & ff
rank_drug_lvl["wtd_rank"] = (rank_drug_lvl["rank_margin"] *
weights_franchisee_drug_lvl["margin"]) + \
(rank_drug_lvl["rank_ff"] *
weights_franchisee_drug_lvl["ff"])
rank_drug_lvl["wtd_rank"] = np.round(rank_drug_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["store_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_store_dist_credit_period",
"rank_store_dist_volume",
"rank_dist_type_portfolio_size"]
sort_asc_order = group_col_sort_asc_order + [True, True, True, True]
rank_drug_lvl = rank_drug_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_lvl['index'] = rank_drug_lvl.index
# final ranking based on preference order
rank_drug_lvl["final_rank"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['index'].rank(
method='first', ascending=True)
rank_drug_lvl.drop('index', axis=1, inplace=True)
# ========================== D.TYPE LEVEL RANKING =========================
logger.info("Franchisee-drug-type level ranking starts")
# select only relevant columns required for ranking
rank_drug_type_lvl = features[
['store_id', 'partial_distributor_id', 'drug_id', 'drug_type',
'margin', 'wtd_ff', 'dist_type_portfolio_size',
'partial_distributor_credit_period', 'request_volume_store_dist']]
# group by dc-distributor-drug_type level and calculate features
rank_drug_type_lvl = rank_drug_type_lvl.groupby(
["store_id", "partial_distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"dist_type_portfolio_size": "first",
"partial_distributor_credit_period": "first",
"request_volume_store_dist": "first"})
# round features to 3 significant digits
rank_drug_type_lvl["margin"] = np.round(rank_drug_type_lvl["margin"], 3)
rank_drug_type_lvl["wtd_ff"] = np.round(rank_drug_type_lvl["wtd_ff"], 3)
rank_drug_type_lvl["request_volume_store_dist"] = np.round(
rank_drug_type_lvl["request_volume_store_dist"], 3)
# rank each features
rank_drug_type_lvl["rank_margin"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['margin'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_ff"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_type_lvl["rank_store_dist_credit_period"] = \
rank_drug_type_lvl.groupby(['store_id'])[
'partial_distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_type_lvl['rank_store_dist_volume'] = \
rank_drug_type_lvl.groupby(['store_id'])[
'request_volume_store_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin, ff & portfolio size
rank_drug_type_lvl["wtd_rank"] = (rank_drug_type_lvl["rank_margin"] *
weights_franchisee_type_lvl["margin"]) + \
(rank_drug_type_lvl["rank_ff"] *
weights_franchisee_type_lvl["ff"]) + \
(rank_drug_type_lvl["rank_dist_type_portfolio_size"] *
weights_franchisee_type_lvl["portfolio_size"])
rank_drug_type_lvl["wtd_rank"] = np.round(rank_drug_type_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["store_id", "drug_type"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_store_dist_credit_period",
"rank_store_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
rank_drug_type_lvl = rank_drug_type_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_type_lvl['index'] = rank_drug_type_lvl.index
# final ranking based on preference order
rank_drug_type_lvl["final_rank"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['index'].rank(
method='first', ascending=True)
rank_drug_type_lvl.drop('index', axis=1, inplace=True)
return rank_drug_lvl, rank_drug_type_lvl
def get_final_ranks_franchisee(rank_drug_lvl, rank_drug_type_lvl, features,
logger):
"""
get final ranking format. no slot filling logic for franchisee stores.
"""
final_ranks = rank_drug_lvl[["store_id", "drug_id"]].drop_duplicates()
final_ranks = final_ranks.merge(
features[["drug_id", "drug_type"]].drop_duplicates(), on="drug_id",
how="left")
logger.info("Creating final df format")
# make final ranking df
for rank in [1, 2, 3]:
df_rank = rank_drug_lvl.loc[rank_drug_lvl["final_rank"] == rank]
df_rank = df_rank[
["store_id", "drug_id", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["store_id", "drug_id"],
how="left")
final_ranks[f"distributor_rank_{rank}"] = final_ranks[
f"distributor_rank_{rank}"].astype(float)
# add franchisee-store-drug-type level ranking
logger.info("Adding franchisee-store-drug-typ level ranking to final df")
final_ranks_type_lvl = rank_drug_type_lvl[
["store_id", "drug_type"]].drop_duplicates()
# create store-type level final ranking format
for rank in [1, 2, 3]:
df_rank = rank_drug_type_lvl.loc[
rank_drug_type_lvl["final_rank"] == rank]
df_rank = df_rank[
["store_id", "drug_type", "partial_distributor_id"]]
df_rank.rename({"partial_distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks_type_lvl = final_ranks_type_lvl.merge(df_rank,
on=["store_id",
"drug_type"],
how="left")
final_ranks_type_lvl[f"distributor_rank_{rank}"] = final_ranks_type_lvl[
f"distributor_rank_{rank}"].astype(float)
# combine store-drug lvl and store-drug-type lvl
final_ranks = pd.concat([final_ranks, final_ranks_type_lvl], axis=0)
return final_ranks | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/calculate_ranks.py | calculate_ranks.py |
from tqdm import tqdm
import numpy as np
import pandas as pd
import datetime as dt
def calculate_features(df_sb, df_rates, df_store_dc_maps, reset_date,
time_interval, logger, group_cols):
"""
DC-LEVEL: group_cols=['dc_id','distributor_id', 'drug_id']
FRANCHISEE-LEVEL: group_cols=['store_id','distributor_id', 'drug_id']
"""
# ====================== WTD.FULFILLMENT CALCULATION ======================
# BASE FF
logger.info("Calculating sb-level ff")
sb_ids = df_sb["sb_id"].unique().tolist()
df_ff_base = pd.DataFrame()
pd.options.mode.chained_assignment = None
for sb_id in tqdm(sb_ids):
df_temp = df_sb.loc[df_sb["sb_id"] == sb_id]
df_temp["required_quantity_shift"] = df_temp["required_quantity"].shift(1)
df_temp["required_quantity_shift"] = np.where(
df_temp["required_quantity_shift"].isna(),
df_temp["sb_quantity"], df_temp["required_quantity_shift"])
try:
slice_index = df_temp.loc[df_temp["required_quantity_shift"] == 0].index[0]
# if completely fulfilled, ignore further fulfillment
df_temp = df_temp.loc[:slice_index - 1]
except IndexError:
continue
df_temp["ff_perc"] = 1 - (df_temp["required_quantity"] / df_temp[
"required_quantity_shift"])
df_temp = df_temp.groupby(
["sb_id", "sb_created_on", "store_id", "drug_id", "sb_quantity",
"ordered_dist_id"], as_index=False).agg({"ff_perc": "max"})
df_ff_base = pd.concat([df_ff_base, df_temp], axis=0)
pd.options.mode.chained_assignment = 'warn'
# remove WH, LP, WORKCELL
df_ff_base = df_ff_base.loc[
~df_ff_base["ordered_dist_id"].isin([8105, 76, 5000])]
df_ff_base.rename({"ordered_dist_id": "distributor_id"}, axis=1, inplace=True)
# remove inconsistent FF cases
df_ff_base["ff_perc"] = np.where(df_ff_base["ff_perc"] < 0, 0, df_ff_base["ff_perc"])
# add store-dc map
df_ff_base = df_ff_base.merge(df_store_dc_maps[["store_id", "dc_id"]],
on="store_id", how='left')
# ensure data-type
df_ff_base["sb_created_on"] = pd.to_datetime(df_ff_base["sb_created_on"])
# WTD FF
logger.info("Calculating wtd.ff")
# get length of 3 period split
period_length = round(time_interval / 3)
# p1 : t-1 (latest period)
# p2 : t-2 period
# p3 : t-3 period
p1_end = pd.Timestamp(reset_date - dt.timedelta(6))
p1_start = p1_end - dt.timedelta(period_length)
p2_end = p1_start
p2_start = p2_end - dt.timedelta(period_length)
p3_end = p2_start
p3_start = p3_end - dt.timedelta(period_length + 1)
df_ff_1 = ff_calc(df_ff_base, group_cols, p_start=p1_start, p_end=p1_end,
period_flag="p1")
df_ff_2 = ff_calc(df_ff_base, group_cols, p_start=p2_start, p_end=p2_end,
period_flag="p2")
df_ff_3 = ff_calc(df_ff_base, group_cols, p_start=p3_start, p_end=p3_end,
period_flag="p3")
df_ff_comb = pd.concat([df_ff_1, df_ff_2, df_ff_3], axis=0)
# count cases where all 3 or 2 or 1 periods data present
df_ff_period_cnt = df_ff_comb.groupby(group_cols, as_index=False).agg(
{"period_flag": "count"})
df_ff_period_cnt.rename({"period_flag": "period_count"}, axis=1,
inplace=True)
# Cases with 3 periods present
# weighted by 0.5, 0.3, 0.2 for p1, p2, p3 respectively
df_3p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 3][
group_cols]
df_ff_comb_3p = df_ff_comb.merge(df_3p, on=group_cols, how="inner")
df_ff_comb_3p['period'] = np.tile(np.arange(1, 4), len(df_ff_comb_3p))[
:len(df_ff_comb_3p)]
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 1, 0.5, 0)
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 2, 0.3,
df_ff_comb_3p['weights'])
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 3, 0.2,
df_ff_comb_3p['weights'])
df_ff_comb_3p["wtd_ff"] = df_ff_comb_3p["ff_perc"] * df_ff_comb_3p["weights"]
df_ff_comb_3p = df_ff_comb_3p.groupby(group_cols, as_index=False).agg(
{"wtd_ff": "sum"})
# Cases with 2 periods present
# weighted by 0.6, 0.4 for latest, early respectively
df_2p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 2][
group_cols]
df_ff_comb_2p = df_ff_comb.merge(df_2p, on=group_cols, how="inner")
df_ff_comb_2p['period'] = np.tile(np.arange(1, 3), len(df_ff_comb_2p))[
:len(df_ff_comb_2p)]
df_ff_comb_2p['weights'] = np.where(df_ff_comb_2p['period'] == 1, 0.6, 0)
df_ff_comb_2p['weights'] = np.where(df_ff_comb_2p['period'] == 2, 0.4,
df_ff_comb_2p['weights'])
df_ff_comb_2p["wtd_ff"] = df_ff_comb_2p["ff_perc"] * df_ff_comb_2p["weights"]
df_ff_comb_2p = df_ff_comb_2p.groupby(group_cols, as_index=False).agg(
{"wtd_ff": "sum"})
# Cases with 1 period present
# weighted by 1 for whatever period present
df_1p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 1][group_cols]
df_ff_comb_1p = df_ff_comb.merge(df_1p, on=group_cols, how="inner")
df_ff_comb_1p = df_ff_comb_1p[group_cols + ["ff_perc"]]
df_ff_comb_1p.rename({"ff_perc": "wtd_ff"}, axis=1, inplace=True)
# combine all
df_ff_comb = pd.concat([df_ff_comb_3p, df_ff_comb_2p, df_ff_comb_1p], axis=0)
# ========================== MARGIN CALCULATION ==========================
logger.info("Calculating margin")
df_margin = df_rates.loc[(df_rates["avg_mrp"] > 0) & (df_rates["avg_purchase_rate"] > 0)]
df_margin = df_margin.loc[(df_margin["avg_mrp"] > df_margin["avg_purchase_rate"])]
df_margin["margin"] = (df_margin["avg_mrp"] - df_margin["avg_purchase_rate"]) / df_margin["avg_mrp"]
df_margin = df_margin[["distributor_id", "drug_id", "margin"]]
# ======================== DIST VOLUME CALCULATION ========================
if group_cols[0] == "dc_id":
base_lvl = 'dc'
else:
base_lvl = 'store'
logger.info(f"Calculating {base_lvl}-distributor volume")
df_ff_base["ff_requests"] = np.where(df_ff_base["ff_perc"] > 0, 1, 0)
df_vol = df_ff_base.groupby(group_cols, as_index=False).agg(
{"ff_requests": "sum"})
if base_lvl == 'dc':
# calculate request volume dc
request_volume_dc = df_vol.groupby("dc_id", as_index=False).agg(
total_requests_dc=("ff_requests", "sum"))
request_volume_dc_dist = df_vol.groupby(
["dc_id", "distributor_id"], as_index=False).agg(
total_requests_dc_dist=("ff_requests", "sum"))
df_vol = df_vol.merge(request_volume_dc_dist,
on=["dc_id", "distributor_id"],
how="left")
df_vol = df_vol.merge(request_volume_dc, on="dc_id", how="left")
df_vol["request_volume_dc_dist"] = df_vol["total_requests_dc_dist"] / \
df_vol["total_requests_dc"]
df_vol.drop(["total_requests_dc_dist", "total_requests_dc"], axis=1,
inplace=True)
else:
# calculate request volume store (franchisee)
request_volume_store = df_vol.groupby("store_id", as_index=False).agg(
total_requests_store=("ff_requests", "sum"))
request_volume_store_dist = df_vol.groupby(
["store_id", "distributor_id"], as_index=False).agg(
total_requests_store_dist=("ff_requests", "sum"))
df_vol = df_vol.merge(request_volume_store_dist,
on=["store_id", "distributor_id"],
how="left")
df_vol = df_vol.merge(request_volume_store, on="store_id", how="left")
df_vol["request_volume_store_dist"] = df_vol["total_requests_store_dist"] / \
df_vol["total_requests_store"]
df_vol.drop(["total_requests_store_dist", "total_requests_store"],
axis=1, inplace=True)
# =========================== COMPILE FEATURES ===========================
logger.info("Compiling all features")
features = df_ff_comb.merge(df_margin, on=["distributor_id", "drug_id"],
how="left")
features = features.merge(df_vol, on=group_cols, how="left")
# rounding off to 3 significant digits
features["wtd_ff"] = np.round(features["wtd_ff"], 3)
features["margin"] = np.round(features["margin"], 3)
features[f"request_volume_{base_lvl}_dist"] = np.round(
features[f"request_volume_{base_lvl}_dist"], 3)
# assume 0 margin for null cases
features["margin"] = features["margin"].fillna(0)
return features
def ff_calc(df_ff_base, group_cols, p_start=None, p_end=None, period_flag="None"):
# split base data by period
df_ff = df_ff_base.loc[(df_ff_base["sb_created_on"] > p_start) &
(df_ff_base["sb_created_on"] < p_end)]
df_ff = df_ff.groupby(group_cols, as_index=False).agg({"ff_perc": np.average})
df_ff["period_flag"] = period_flag
return df_ff | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/calculate_features1.py | calculate_features1.py |
Q_SB = """
select sb.id as "sb-id", sb."store-id" , sb."drug-id" ,
sb.quantity as "sb-quantity", sbol."ordered-dist-id" ,
sbol."required-quantity" , sbol.status , sbol."ff-status" ,
date(sb."created-at") as "sb-created-on",
sbol."created-at" as "sbol-created-at"
from
"{schema}"."short-book-1" sb
left join
"{schema}"."short-book-order-logs" sbol
on sbol."short-book-id" = sb.id
where
sb."distributor-id" not in (8105, 5000, 76)
and DATEDIFF(day, date(sb."created-at"), '{reset_date}') <= {time_interval}
and DATEDIFF(day, date(sb."created-at"), '{reset_date}') >= 7
and sb."store-id" in (2,4,7,16,54,82,231,234,244,278,297,23,28,39,216,218,235,229,280,8,13,21,26,31,45,188,208,221,222,230,241,260,264,20,36,61,134,160,184,195,215,224,226,245,252,273,281)
and sbol.status not in ('presaved', 'saved')
{franchisee_stores_execute_query}
order by
sb.id, sbol."created-at"
"""
# (2,4,7,16,54,82,231,234,244,278,297,23,28,39,216,218,235,229,280,8,13,21,26,31,45,188,208,221,222,230,241,260,264,20,36,61,134,160,184,195,215,224,226,245,252,273,281)
Q_RATES = """
select subQ."distributor-id", subQ."drug-id", avg(subQ.mrp) as avg_mrp,
avg(subQ."purchase-rate") as avg_purchase_rate
from
(
select i."distributor-id" , ii."drug-id" , i."created-at" , inv.mrp ,
inv."purchase-rate", row_number() over (partition by
i."distributor-id", ii."drug-id"
order by i."created-at" desc) as recency_rank
from
"{schema}".invoices i
left join
"{schema}"."invoice-items-1" ii
on ii."invoice-id" = i.id
left join
"{schema}"."inventory-1" inv
on inv."invoice-item-id" = ii.id
where
DATEDIFF(day, date(i."created-at"), '{reset_date}') <= {time_interval}
and DATEDIFF(day, date(i."created-at"), '{reset_date}') >= 7
and i."distributor-id" not in (8105, 76, 5000)
) as subQ
where
subQ.recency_rank <= 5
group by subQ."distributor-id", subQ."drug-id"
"""
Q_STORE_DC_MAPS = """
select subQ.*, s2."name" as "dc-name"
from
(
select sdm."store-id" , s."name" as "store-name",
"forward-dc-id" as "dc-id"
from
"{schema}"."store-dc-mapping" sdm
left join
"{schema}".stores s on sdm."store-id" = s.id
where "forward-dc-id" not in (199)
group by "store-id" , s."name", "forward-dc-id"
) as subQ
left join "{schema}".stores s2
on subQ."dc-id" = s2.id
"""
Q_DRUGS = """
select id as "drug-id", "drug-name" , "type" as "drug-type"
from "{schema}".drugs d
"""
Q_DISTRIBUTORS = """
select db.id as "distributor-id", db.name as "distributor-name",
db."credit-period" as "distributor-credit-period",
d."type" as "drug-type", count(distinct dd."drug-id") as "dist-type-portfolio-size"
from
"{schema}".distributors db
left join
"{schema}"."distributor-drugs" dd on db.id = dd."distributor-id"
left join
"{schema}".drugs d on dd."drug-id" = d.id
group by db.id, "distributor-name", "distributor-credit-period", "drug-type"
"""
Q_DC_DISTRIBUTOR_MAPPING = """
select "dc-id", "distributor-id"
from "{schema}"."dc-distributor-mapping" ddm
where "is-active" = 1
group by "dc-id" , "distributor-id"
"""
Q_DISTRIBUTOR_DRUGS = """
select "distributor-id", "drug-id"
from "{schema}"."distributor-drugs" dd
group by "distributor-id" , "drug-id"
"""
Q_FRANCHISEE_STORES = """
select distinct "id"
from "{schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and "franchisee-id" != 1
"""
def pull_data_dc(reset_date, time_interval, db, schema):
df_sb = db.get_df(Q_SB.format(
reset_date=reset_date, time_interval=time_interval, schema=schema,
franchisee_stores_execute_query=""))
df_sb.columns = [c.replace('-', '_') for c in df_sb.columns]
df_rates = db.get_df(Q_RATES.format(
reset_date=reset_date, time_interval=time_interval, schema=schema))
df_rates.columns = [c.replace('-', '_') for c in df_rates.columns]
df_store_dc_maps = db.get_df(Q_STORE_DC_MAPS.format(schema=schema))
df_store_dc_maps.columns = [c.replace('-', '_') for c in
df_store_dc_maps.columns]
df_drugs = db.get_df(Q_DRUGS.format(schema=schema))
df_drugs.columns = [c.replace('-', '_') for c in df_drugs.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(schema=schema))
df_distributors.columns = [c.replace('-', '_') for c in
df_distributors.columns]
df_distributors = df_distributors.dropna()
df_distributors = df_distributors.loc[df_distributors["drug_type"] != '']
df_dc_distributors_mapping = db.get_df(
Q_DC_DISTRIBUTOR_MAPPING.format(schema=schema))
df_dc_distributors_mapping.columns = [c.replace('-', '_') for c in
df_dc_distributors_mapping.columns]
df_distributor_drugs = db.get_df(Q_DISTRIBUTOR_DRUGS.format(schema=schema))
df_distributor_drugs.columns = [c.replace('-', '_') for c in
df_distributor_drugs.columns]
df_distributor_drugs.drop_duplicates(inplace=True)
# ensure data types
df_rates["avg_mrp"] = df_rates["avg_mrp"].astype(float)
df_rates["avg_purchase_rate"] = df_rates["avg_purchase_rate"].astype(float)
return df_sb, df_rates, df_store_dc_maps, df_drugs, df_distributors, \
df_dc_distributors_mapping, df_distributor_drugs
def pull_data_franchisee(reset_date, franchisee_stores, time_interval,
db, schema):
df_franchisee_stores = db.get_df(Q_FRANCHISEE_STORES.format(schema=schema))
all_franchisee_stores = df_franchisee_stores["id"].to_list()
if franchisee_stores == [0]:
franchisee_stores_execute_query = f"""
and sb."store-id" in {str(all_franchisee_stores).replace('[', '(').replace(']', ')')}
"""
else:
# only take valid franchisee stores
franchisee_stores = list(
set(franchisee_stores).intersection(all_franchisee_stores))
franchisee_stores_execute_query = f"""
and sb."store-id" in {str(franchisee_stores).replace('[', '(').replace(']', ')')}
"""
df_sb = db.get_df(Q_SB.format(
reset_date=reset_date, time_interval=time_interval, schema=schema,
franchisee_stores_execute_query=franchisee_stores_execute_query))
df_sb.columns = [c.replace('-', '_') for c in df_sb.columns]
df_rates = db.get_df(Q_RATES.format(
reset_date=reset_date, time_interval=time_interval, schema=schema))
df_rates.columns = [c.replace('-', '_') for c in df_rates.columns]
df_store_dc_maps = db.get_df(Q_STORE_DC_MAPS.format(schema=schema))
df_store_dc_maps.columns = [c.replace('-', '_') for c in
df_store_dc_maps.columns]
df_drugs = db.get_df(Q_DRUGS.format(schema=schema))
df_drugs.columns = [c.replace('-', '_') for c in df_drugs.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(schema=schema))
df_distributors.columns = [c.replace('-', '_') for c in
df_distributors.columns]
df_distributors = df_distributors.dropna()
df_distributors = df_distributors.loc[df_distributors["drug_type"] != '']
df_dc_distributors_mapping = db.get_df(
Q_DC_DISTRIBUTOR_MAPPING.format(schema=schema))
df_dc_distributors_mapping.columns = [c.replace('-', '_') for c in
df_dc_distributors_mapping.columns]
df_distributor_drugs = db.get_df(Q_DISTRIBUTOR_DRUGS.format(schema=schema))
df_distributor_drugs.columns = [c.replace('-', '_') for c in
df_distributor_drugs.columns]
df_distributor_drugs.drop_duplicates(inplace=True)
# ensure data types
df_rates["avg_mrp"] = df_rates["avg_mrp"].astype(float)
df_rates["avg_purchase_rate"] = df_rates["avg_purchase_rate"].astype(float)
return df_sb, df_rates, df_store_dc_maps, df_drugs, df_distributors, \
df_dc_distributors_mapping, df_distributor_drugs | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/pull_data1.py | pull_data1.py |
import pandas as pd
import numpy as np
def post_process_ranking_dc(features, rank_drug_lvl, rank_drug_type_lvl,
final_ranks, weights_dc_drug_lvl, weights_dc_type_lvl):
# add drug_id dummy column in type lvl
rank_drug_type_lvl["drug_id"] = np.nan
# add weights column
rank_drug_type_lvl["weights"] = str(weights_dc_type_lvl)
rank_drug_lvl["weights"] = str(weights_dc_drug_lvl)
# additional details to be added
drugs_info = features[["drug_id", "drug_type", "drug_name"]].drop_duplicates()
dc_info = features[["partial_dc_id", "dc_name"]].drop_duplicates()
distributor_info = features[["partial_distributor_id", "partial_distributor_name"]].drop_duplicates()
# adding details into drug_lvl_df
rank_drug_lvl = rank_drug_lvl.merge(drugs_info, on="drug_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(dc_info, on="partial_dc_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# adding details into drug_type_lvl_df
rank_drug_type_lvl = rank_drug_type_lvl.merge(dc_info,
on="partial_dc_id", how="left")
rank_drug_type_lvl = rank_drug_type_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# combine drug_lvl and drug_type_lvl df
ranked_features = pd.concat([rank_drug_lvl, rank_drug_type_lvl], axis=0)
# add details into final_ranks df
final_ranks = final_ranks.merge(dc_info, on="partial_dc_id", how="left")
final_ranks = final_ranks.merge(drugs_info[["drug_id", "drug_name"]], on="drug_id", how="left")
# add columns for franchisee rank addition because
# both dc & franchisee features/ranks needs to be written to same table.
final_ranks["franchisee_id"] = 1 # zippin id
final_ranks["store_id"] = np.nan
final_ranks["store_name"] = ""
ranked_features["franchisee_id"] = 1 # zippin id
ranked_features["store_id"] = np.nan
ranked_features["store_name"] = ""
ranked_features["request_volume_store_dist"] = np.nan
ranked_features["rank_store_dist_credit_period"] = np.nan
ranked_features["rank_store_dist_volume"] = np.nan
return final_ranks, ranked_features
def post_process_ranking_franchisee(features, rank_drug_lvl, rank_drug_type_lvl,
final_ranks, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl):
# add drug_id dummy column in type lvl
rank_drug_type_lvl["drug_id"] = np.nan
# add weights column
rank_drug_type_lvl["weights"] = str(weights_franchisee_type_lvl)
rank_drug_lvl["weights"] = str(weights_franchisee_drug_lvl)
# additional details to be added
drugs_info = features[["drug_id", "drug_type", "drug_name"]].drop_duplicates()
store_info = features[["store_id", "store_name", "franchisee_id"]].drop_duplicates()
distributor_info = features[["partial_distributor_id",
"partial_distributor_name"]].drop_duplicates()
# adding details into drug_lvl_df
rank_drug_lvl = rank_drug_lvl.merge(drugs_info, on="drug_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(store_info, on="store_id", how="left")
rank_drug_lvl = rank_drug_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# adding details into drug_type_lvl_df
rank_drug_type_lvl = rank_drug_type_lvl.merge(store_info,
on="store_id",
how="left")
rank_drug_type_lvl = rank_drug_type_lvl.merge(
distributor_info, on="partial_distributor_id", how="left")
# combine drug_lvl and drug_type_lvl df
ranked_features = pd.concat([rank_drug_lvl, rank_drug_type_lvl], axis=0)
# add details into final_ranks df
final_ranks = final_ranks.merge(store_info, on="store_id", how="left")
final_ranks = final_ranks.merge(drugs_info[["drug_id", "drug_name"]],
on="drug_id", how="left")
# add columns for dc rank addition because
# both dc & franchisee features/ranks needs to be written to same table.
final_ranks["partial_dc_id"] = np.nan
final_ranks["dc_name"] = ""
final_ranks["correction_flags"] = ""
ranked_features["partial_dc_id"] = np.nan
ranked_features["dc_name"] = ""
ranked_features["request_volume_dc_dist"] = np.nan
ranked_features["rank_dc_dist_credit_period"] = np.nan
ranked_features["rank_dc_dist_volume"] = np.nan
return final_ranks, ranked_features | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/post_process_ranking.py | post_process_ranking.py |
from functools import reduce
import numpy as np
import pandas as pd
import datetime as dt
def calculate_features(df_features, reset_date, time_interval, logger, group_cols):
"""
DC-LEVEL: group_cols=['partial_dc_id','partial_distributor_id', 'drug_id']
FRANCHISEE-LEVEL: group_cols=['store_id','partial_distributor_id', 'drug_id']
"""
dfx = df_features[df_features['invoice_count'] != 0]
# ========================== MARGIN CALCULATION ==========================
# (follows same logic as in distributor ranking 1.0)
logger.info("Calculating margin")
df_margin = dfx.copy()
df_margin['margin'] = (df_margin['mrp'] -
df_margin['distributor_rate']) / df_margin[
'mrp']
df_margin = df_margin.groupby(group_cols).agg(
margin=('margin', 'mean')).reset_index()
# sanity check
assert df_margin.shape[0] == dfx[group_cols].drop_duplicates().shape[0]
# ====================== WTD.FULFILLMENT CALCULATION ======================
logger.info("Calculating wtd.ff")
# get length of 3 period split
period_length = round(time_interval / 3)
# p1 : t-1 (latest period)
# p2 : t-2 period
# p3 : t-3 period
p1_end = pd.Timestamp(reset_date - dt.timedelta(6))
p1_start = p1_end - dt.timedelta(period_length)
p2_end = p1_start
p2_start = p2_end - dt.timedelta(period_length)
p3_end = p2_start
p3_start = p3_end - dt.timedelta(period_length + 1)
df_ff_1 = ff_calc(dfx, group_cols, p_start=p1_start, p_end=p1_end, period_flag="p1")
df_ff_2 = ff_calc(dfx, group_cols, p_start=p2_start, p_end=p2_end, period_flag="p2")
df_ff_3 = ff_calc(dfx, group_cols, p_start=p3_start, p_end=p3_end, period_flag="p3")
df_ff_comb = pd.concat([df_ff_1, df_ff_2, df_ff_3], axis=0)
# count cases where all 3 or 2 or 1 periods data present
df_ff_period_cnt = df_ff_comb.groupby(group_cols, as_index=False).agg(
{"period_flag": "count"})
df_ff_period_cnt.rename({"period_flag": "period_count"}, axis=1,
inplace=True)
# Cases with 3 periods present
# weighted by 0.5, 0.3, 0.2 for p1, p2, p3 respectively
df_3p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 3][
group_cols]
df_ff_comb_3p = df_ff_comb.merge(df_3p, on=group_cols, how="inner")
df_ff_comb_3p['period'] = np.tile(np.arange(1, 4), len(df_ff_comb_3p))[
:len(df_ff_comb_3p)]
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 1, 0.5, 0)
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 2, 0.3,
df_ff_comb_3p['weights'])
df_ff_comb_3p['weights'] = np.where(df_ff_comb_3p['period'] == 3, 0.2,
df_ff_comb_3p['weights'])
df_ff_comb_3p["wtd_ff"] = df_ff_comb_3p["ff"] * df_ff_comb_3p["weights"]
df_ff_comb_3p = df_ff_comb_3p.groupby(group_cols, as_index=False).agg(
{"wtd_ff": "sum"})
# Cases with 2 periods present
# weighted by 0.6, 0.4 for latest, early respectively
df_2p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 2][
group_cols]
df_ff_comb_2p = df_ff_comb.merge(df_2p, on=group_cols, how="inner")
df_ff_comb_2p['period'] = np.tile(np.arange(1, 3), len(df_ff_comb_2p))[
:len(df_ff_comb_2p)]
df_ff_comb_2p['weights'] = np.where(df_ff_comb_2p['period'] == 1, 0.6, 0)
df_ff_comb_2p['weights'] = np.where(df_ff_comb_2p['period'] == 2, 0.4,
df_ff_comb_2p['weights'])
df_ff_comb_2p["wtd_ff"] = df_ff_comb_2p["ff"] * df_ff_comb_2p["weights"]
df_ff_comb_2p = df_ff_comb_2p.groupby(group_cols, as_index=False).agg(
{"wtd_ff": "sum"})
# Cases with 1 period present
# weighted by 1 for whatever period present
df_1p = df_ff_period_cnt.loc[df_ff_period_cnt["period_count"] == 1][
group_cols]
df_ff_comb_1p = df_ff_comb.merge(df_1p, on=group_cols, how="inner")
df_ff_comb_1p = df_ff_comb_1p[group_cols + ["ff"]]
df_ff_comb_1p.rename({"ff": "wtd_ff"}, axis=1, inplace=True)
# combine all
df_ff_comb = pd.concat([df_ff_comb_3p, df_ff_comb_2p, df_ff_comb_1p],
axis=0)
# ======================== DIST VOLUME CALCULATION ========================
if group_cols[0] == "partial_dc_id":
base_lvl = 'dc'
else:
base_lvl = 'store'
logger.info(f"Calculating {base_lvl}-distributor volume")
df_vol = df_features.groupby(group_cols).agg(
total_lost=('is_lost', 'sum'),
total_requests=('is_lost', 'count')).reset_index()
df_vol["ff_requests"] = df_vol["total_requests"] - df_vol["total_lost"]
df_vol["ff_requests"] = np.where(df_vol["ff_requests"] < 0, 0,
df_vol["ff_requests"])
df_vol.drop(["total_lost", "total_requests"], axis=1, inplace=True)
if base_lvl == 'dc':
# calculate request volume dc
request_volume_dc = df_vol.groupby("partial_dc_id", as_index=False).agg(
total_requests_dc=("ff_requests", "sum"))
request_volume_dc_dist = df_vol.groupby(
["partial_dc_id", "partial_distributor_id"], as_index=False).agg(
total_requests_dc_dist=("ff_requests", "sum"))
df_vol = df_vol.merge(request_volume_dc_dist,
on=["partial_dc_id", "partial_distributor_id"],
how="left")
df_vol = df_vol.merge(request_volume_dc, on="partial_dc_id", how="left")
df_vol["request_volume_dc_dist"] = df_vol["total_requests_dc_dist"] / \
df_vol["total_requests_dc"]
df_vol.drop(["total_requests_dc_dist", "total_requests_dc"], axis=1,
inplace=True)
else:
# calculate request volume store (franchisee)
request_volume_store = df_vol.groupby("store_id", as_index=False).agg(
total_requests_store=("ff_requests", "sum"))
request_volume_store_dist = df_vol.groupby(
["store_id", "partial_distributor_id"], as_index=False).agg(
total_requests_store_dist=("ff_requests", "sum"))
df_vol = df_vol.merge(request_volume_store_dist,
on=["store_id", "partial_distributor_id"],
how="left")
df_vol = df_vol.merge(request_volume_store, on="store_id", how="left")
df_vol["request_volume_store_dist"] = df_vol["total_requests_store_dist"] / \
df_vol["total_requests_store"]
df_vol.drop(["total_requests_store_dist", "total_requests_store"], axis=1,
inplace=True)
# =========================== COMPILE FEATURES ===========================
logger.info("Compiling all features")
meg_list = [df_margin, df_ff_comb, df_vol]
features = reduce(
lambda left, right: pd.merge(left, right,
on=group_cols,
how='outer'), meg_list)
# rounding off to 3 significant digits
features["margin"] = np.round(features["margin"], 3)
features["wtd_ff"] = np.round(features["wtd_ff"], 3)
features[f"request_volume_{base_lvl}_dist"] = np.round(
features[f"request_volume_{base_lvl}_dist"], 3)
return features
def ff_calc(dfx, group_cols, p_start=None, p_end=None, period_flag="None"):
"""
Base FF calculation same as in distributor ranking 1.0
"""
# split base data by period
dfx = dfx.loc[
(dfx["original_created_at"] > p_start) &
(dfx["original_created_at"] < p_end)]
df_sorted = dfx.groupby(['short_book_1_id'], as_index=False).apply(
lambda x: x.sort_values(by=['partial_invoiced_at']))
# for multiple invoices, calculate cumulative fulfilled quantities
df_sorted = df_sorted.groupby(['short_book_1_id']).apply(
lambda x: x['partial_quantity'].cumsum()).reset_index().rename(
columns={'partial_quantity': 'cum_partial_quantity'})
df_sorted = df_sorted.set_index('level_1')
df_fulfillment = pd.merge(df_sorted, dfx, left_index=True,
right_index=True, how='left', suffixes=('', '_y'))
assert df_fulfillment['short_book_1_id'].equals(
df_fulfillment['short_book_1_id_y'])
df_fulfillment = df_fulfillment[
['short_book_1_id'] + group_cols + ['original_order', 'partial_quantity',
'cum_partial_quantity']]
# cum required quantity is quantity left after subtracting cum quantity from all previous invoices.
df_fulfillment['cum_required_quantity'] = df_fulfillment['original_order'] - \
df_fulfillment['cum_partial_quantity']
# the real required quantity while placing an order is quantity
# unfulfilled by the previours invoice. Hence shifted by 1
df_fulfillment['actual_required'] = df_fulfillment.groupby(
['short_book_1_id']).shift(1)['cum_required_quantity']
# fill single invoices with the original order
df_fulfillment['actual_required'] = df_fulfillment['actual_required'].fillna(
df_fulfillment['original_order'])
# put actual required = 0 when ordered exceeds required.
df_fulfillment.loc[df_fulfillment['actual_required']
< 0, 'actual_required'] = 0
df_fulfillment['redundant_order_flag'] = np.where(
df_fulfillment['actual_required'] == 0, 1, 0)
df_fulfillment = df_fulfillment[['short_book_1_id'] + group_cols +
['original_order', 'partial_quantity',
'actual_required', 'redundant_order_flag']]
df_fulfillment['ff'] = df_fulfillment['partial_quantity'] / \
df_fulfillment['actual_required']
# for those quantities where nothing was required and still order placed, take them as 0.
df_fulfillment.loc[(df_fulfillment['actual_required'] == 0) & (
df_fulfillment['partial_quantity'] > 0), 'ff'] = 1
df_fulfillment.loc[(df_fulfillment['ff'] > 1), 'ff'] = 1
# removed redundant orders here.
df_ff = df_fulfillment[df_fulfillment['redundant_order_flag'] != 1].groupby(
group_cols).agg(ff=('ff', 'mean')).reset_index()
# add period_flag
df_ff["period_flag"] = period_flag
return df_ff | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/calculate_features.py | calculate_features.py |
import pandas as pd
import numpy as np
import math
from zeno_etl_libs.utils.distributor_ranking2.correction_flag import add_corr_flag
def calc_ranks_dc(features, weights_dc_drug_lvl, weights_dc_type_lvl, logger):
# =========================== DRUG LEVEL RANKING ==========================
logger.info("DC-drug level ranking starts")
# select only relevant columns required for ranking
rank_drug_lvl = features[
['dc_id', 'distributor_id', 'drug_id', 'margin',
'wtd_ff', 'dist_type_portfolio_size', 'distributor_credit_period',
'request_volume_dc_dist']]
# set significant digits for features with decimal points
rank_drug_lvl["margin"] = np.round(rank_drug_lvl["margin"], 3)
rank_drug_lvl["wtd_ff"] = np.round(rank_drug_lvl["wtd_ff"], 3)
rank_drug_lvl["request_volume_dc_dist"] = np.round(
rank_drug_lvl["request_volume_dc_dist"], 3)
# rank each features
rank_drug_lvl["rank_margin"] = \
rank_drug_lvl.groupby(['dc_id', 'drug_id'])['margin'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_ff"] = \
rank_drug_lvl.groupby(['dc_id', 'drug_id'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_lvl.groupby(['dc_id', 'drug_id'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_lvl["rank_dc_dist_credit_period"] = \
rank_drug_lvl.groupby(['dc_id'])[
'distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_lvl['rank_dc_dist_volume'] = features.groupby(['dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin & ff
rank_drug_lvl["wtd_rank"] = (rank_drug_lvl["rank_margin"] *
weights_dc_drug_lvl["margin"]) + \
(rank_drug_lvl["rank_ff"] *
weights_dc_drug_lvl["ff"])
rank_drug_lvl["wtd_rank"] = np.round(rank_drug_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["dc_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume",
"rank_dist_type_portfolio_size"]
sort_asc_order = group_col_sort_asc_order + [True, True, True, True]
rank_drug_lvl = rank_drug_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_lvl['index'] = rank_drug_lvl.index
# final ranking based on preference order
rank_drug_lvl["final_rank"] = \
rank_drug_lvl.groupby(['dc_id', 'drug_id'])['index'].rank(
method='first', ascending=True)
rank_drug_lvl.drop('index', axis=1, inplace=True)
# ========================== D.TYPE LEVEL RANKING =========================
logger.info("DC-drug-type level ranking starts")
# select only relevant columns required for ranking
rank_drug_type_lvl = features[
['dc_id', 'distributor_id', 'drug_id', 'drug_type',
'margin', 'wtd_ff', 'dist_type_portfolio_size',
'distributor_credit_period', 'request_volume_dc_dist']]
# group by dc-distributor-drug_type level and calculate features
rank_drug_type_lvl = rank_drug_type_lvl.groupby(
["dc_id", "distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"dist_type_portfolio_size": "first",
"distributor_credit_period": "first",
"request_volume_dc_dist": "first"})
# round features to 3 significant digits
rank_drug_type_lvl["margin"] = np.round(rank_drug_type_lvl["margin"], 3)
rank_drug_type_lvl["wtd_ff"] = np.round(rank_drug_type_lvl["wtd_ff"], 3)
rank_drug_type_lvl["request_volume_dc_dist"] = np.round(
rank_drug_type_lvl["request_volume_dc_dist"], 3)
# rank each features
rank_drug_type_lvl["rank_margin"] = \
rank_drug_type_lvl.groupby(['dc_id', 'drug_type'])['margin'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_ff"] = \
rank_drug_type_lvl.groupby(['dc_id', 'drug_type'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_type_lvl.groupby(['dc_id', 'drug_type'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_type_lvl["rank_dc_dist_credit_period"] = \
rank_drug_type_lvl.groupby(['dc_id'])[
'distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_type_lvl['rank_dc_dist_volume'] = \
rank_drug_type_lvl.groupby(['dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin, ff & portfolio size
rank_drug_type_lvl["wtd_rank"] = (rank_drug_type_lvl["rank_margin"] *
weights_dc_type_lvl["margin"]) + \
(rank_drug_type_lvl["rank_ff"] *
weights_dc_type_lvl["ff"]) + \
(rank_drug_type_lvl["rank_dist_type_portfolio_size"] *
weights_dc_type_lvl["portfolio_size"])
rank_drug_type_lvl["wtd_rank"] = np.round(rank_drug_type_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["dc_id", "drug_type"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
rank_drug_type_lvl = rank_drug_type_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_type_lvl['index'] = rank_drug_type_lvl.index
# final ranking based on preference order
rank_drug_type_lvl["final_rank"] = \
rank_drug_type_lvl.groupby(['dc_id', 'drug_type'])['index'].rank(
method='first', ascending=True)
rank_drug_type_lvl.drop('index', axis=1, inplace=True)
# ================== DISQUALIFY POOR DC-DRUG-DISTRIBUTORS =================
# For the cases where a poor distributor in terms of wtd.ff
# comes in rank 3 or higher => disqualify it. As a result the rank 3 slot
# becomes vacant for the slot filling logic to assign another distributor
# which will get a chance to fulfill the order. If the assigned distributor
# performs good it will be better ranked in subsequent resets, else it will
# also get disqualified in similar way in later resets. This will keep the
# cycle to constantly look for better distributors. Else it might get locked
# in a cycle of ranking the same poor distributor over and over again.
disq_entries = rank_drug_lvl.copy()
# disqualify condition
disq_entries["disqualify"] = np.where(
(disq_entries["final_rank"] >= 3) & (disq_entries["wtd_ff"] < 0.4),
1, 0)
disq_entries = disq_entries.loc[(disq_entries["disqualify"] == 1)]
disq_entries = disq_entries[["dc_id", "distributor_id",
"drug_id", "disqualify"]]
return rank_drug_lvl, rank_drug_type_lvl, disq_entries
def get_final_ranks_dc(rank_drug_lvl, rank_drug_type_lvl, disq_entries,
features, df_distributor_drugs, df_distributors,
df_dc_distributors_mapping, weights_dc_drug_lvl, logger):
"""
get final ranking format and apply slot filling logic to rank slots
which are empty.
"""
final_ranks = rank_drug_lvl[["dc_id", "drug_id"]].drop_duplicates()
final_ranks = final_ranks.merge(
features[["drug_id", "drug_type"]].drop_duplicates(), on="drug_id",
how="left")
# remove disqualified entries
rank_drug_lvl = rank_drug_lvl.merge(
disq_entries, on=["dc_id", "distributor_id", "drug_id"],
how="left")
rank_drug_lvl = rank_drug_lvl.loc[rank_drug_lvl["disqualify"] != 1]
logger.info("Creating final df format")
# make final ranking df
for rank in [1, 2, 3]:
df_rank = rank_drug_lvl.loc[rank_drug_lvl["final_rank"] == rank]
df_rank = df_rank[
["dc_id", "drug_id", "distributor_id"]]
df_rank.rename({"distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["dc_id", "drug_id"],
how="left")
final_ranks[f"distributor_rank_{rank}"] = final_ranks[
f"distributor_rank_{rank}"].astype(float)
# ================== FILL MISSING RANK SLOTS DC-DRUG LVL ==================
# get all dc-drug with missing slots
logger.info("Get allowable dc-drug-distributors to fill slots")
missing_rank_dc_drugs = final_ranks.loc[
(final_ranks["distributor_rank_2"].isna()) | (final_ranks["distributor_rank_3"].isna())]
missing_rank_dc_drugs = missing_rank_dc_drugs[["dc_id", "drug_id", "drug_type"]]
# list all missing drugs
list_missing_rank_drugs = list(missing_rank_dc_drugs["drug_id"].unique())
# get all distributors with missing drugs in their portfolio
select_distributor_drugs = df_distributor_drugs.loc[
df_distributor_drugs["drug_id"].isin(list_missing_rank_drugs)]
# assign it to all dc
available_mappings = missing_rank_dc_drugs.merge(select_distributor_drugs,
on="drug_id", how="left")
# merge distributor details
available_mappings = available_mappings.merge(
df_distributors[["distributor_id", "distributor_credit_period"]].drop_duplicates(),
on="distributor_id", how="left")
# calculate features on drug_type level for dc-distributors (margin & ff)
distributor_type_lvl_features = features.groupby(
["dc_id", "distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"request_volume_dc_dist": "first"})
available_mappings = available_mappings.merge(
distributor_type_lvl_features, on=["dc_id",
"distributor_id",
"drug_type"], how="left")
# fill na and set significant digits
available_mappings["margin"] = available_mappings["margin"].fillna(0)
available_mappings["wtd_ff"] = available_mappings["wtd_ff"].fillna(0)
available_mappings["request_volume_dc_dist"] = available_mappings[
"request_volume_dc_dist"].fillna(0)
available_mappings["margin"] = np.round(available_mappings["margin"], 3)
available_mappings["wtd_ff"] = np.round(available_mappings["wtd_ff"], 3)
available_mappings["request_volume_dc_dist"] = np.round(
available_mappings["request_volume_dc_dist"], 3)
# remove inactive dc-distributors
available_mappings = available_mappings.merge(
df_dc_distributors_mapping, on=["dc_id", "distributor_id"],
how="inner")
# remove disqualified entries
available_mappings = available_mappings.merge(
disq_entries, on=["dc_id", "distributor_id", "drug_id"],
how="left")
available_mappings = available_mappings.loc[available_mappings["disqualify"] != 1]
# ranking distributors based on dc-drug level logic
logger.info("Ranking allowable dc-drug-distributors")
available_mapping_ranked = available_mappings.copy()
available_mapping_ranked["rank_margin"] = \
available_mapping_ranked.groupby(['dc_id', 'drug_id'])[
'margin'].rank(method='dense', ascending=False)
available_mapping_ranked["rank_ff"] = \
available_mapping_ranked.groupby(['dc_id', 'drug_id'])[
'wtd_ff'].rank(method='dense', ascending=False)
available_mapping_ranked["rank_dc_dist_credit_period"] = \
available_mapping_ranked.groupby(['dc_id'])[
'distributor_credit_period'].rank(method='dense',
ascending=False)
available_mapping_ranked['rank_dc_dist_volume'] = \
available_mapping_ranked.groupby(['dc_id'])[
'request_volume_dc_dist'].rank(method='dense', ascending=False)
# calculate wtd.ranks
available_mapping_ranked["wtd_rank"] = (available_mapping_ranked["rank_margin"] *
weights_dc_drug_lvl["margin"]) + \
(available_mapping_ranked["rank_ff"] *
weights_dc_drug_lvl["ff"])
available_mapping_ranked["wtd_rank"] = np.round(
available_mapping_ranked["wtd_rank"], 1)
# set sorting order
group_cols = ["dc_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_dc_dist_credit_period",
"rank_dc_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
available_mapping_ranked = available_mapping_ranked.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
available_mapping_ranked['index'] = available_mapping_ranked.index
# get final ranks
available_mapping_ranked["final_rank"] = \
available_mapping_ranked.groupby(['dc_id', 'drug_id'])[
'index'].rank(method='first', ascending=True)
available_mapping_ranked.drop('index', axis=1, inplace=True)
pre_corr = final_ranks.copy() # to compare pre-post correction
# adding auxiliary ranking to empty slot dc-drugs
logger.info("Filling empty rank slots with ranked distributors")
for rank in [1, 2, 3]:
df_rank = available_mapping_ranked.loc[
available_mapping_ranked["final_rank"] == rank]
df_rank = df_rank[
["dc_id", "drug_id", "distributor_id"]]
df_rank.rename(
{"distributor_id": f"aux_distributor_rank_{rank}"}, axis=1,
inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["dc_id", "drug_id"],
how="left")
final_ranks[f"aux_distributor_rank_{rank}"] = final_ranks[
f"aux_distributor_rank_{rank}"].astype(float)
for index, row in final_ranks.iterrows():
# if rank 2 empty and aux_rank present
if math.isnan(row["distributor_rank_2"]) & \
(not math.isnan(row["aux_distributor_rank_1"])):
if row["aux_distributor_rank_1"] != row["distributor_rank_1"]:
final_ranks.loc[index, "distributor_rank_2"] = row[
"aux_distributor_rank_1"]
elif not math.isnan(row["aux_distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_2"] = row[
"aux_distributor_rank_2"]
for index, row in final_ranks.iterrows():
# if rank 1 & 2 filled, rank 3 empty and aux_ranks present
if (not math.isnan(row["distributor_rank_1"])) & \
(not math.isnan(row["distributor_rank_2"])) & \
(math.isnan(row["distributor_rank_3"])):
if (not math.isnan(row["aux_distributor_rank_1"])) & \
(row["aux_distributor_rank_1"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_1"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_1"]
elif (not math.isnan(row["aux_distributor_rank_2"])) & \
(row["aux_distributor_rank_2"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_2"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_2"]
elif (not math.isnan(row["aux_distributor_rank_3"])) & \
(row["aux_distributor_rank_3"] != row["distributor_rank_1"]) & \
(row["aux_distributor_rank_3"] != row["distributor_rank_2"]):
final_ranks.loc[index, "distributor_rank_3"] = row[
"aux_distributor_rank_3"]
final_ranks = final_ranks.drop(
["aux_distributor_rank_1", "aux_distributor_rank_2",
"aux_distributor_rank_3"], axis=1)
post_corr = final_ranks.copy() # to compare pre-post correction
# add correction flags where rank2 & rank3 slot filling took place
logger.info("Adding correction flags for filled rank slots")
final_ranks = add_corr_flag(final_ranks, pre_corr, post_corr,
col_to_compare="distributor_rank_2",
corr_flag="R2F",
group_cols=["dc_id", "drug_id"])
final_ranks = add_corr_flag(final_ranks, pre_corr, post_corr,
col_to_compare="distributor_rank_3",
corr_flag="R3F",
group_cols=["dc_id", "drug_id"])
# ================== COMBINE DC-DRUG LVL & DC-TYPE LVL ===================
# add dc-drug-type level ranking
logger.info("Adding dc-drug-type level ranking to final df")
final_ranks_type_lvl = rank_drug_type_lvl[
["dc_id", "drug_type"]].drop_duplicates()
# create dc-type level final ranking format
for rank in [1, 2, 3]:
df_rank = rank_drug_type_lvl.loc[
rank_drug_type_lvl["final_rank"] == rank]
df_rank = df_rank[
["dc_id", "drug_type", "distributor_id"]]
df_rank.rename({"distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks_type_lvl = final_ranks_type_lvl.merge(df_rank,
on=["dc_id",
"drug_type"],
how="left")
final_ranks_type_lvl[f"distributor_rank_{rank}"] = final_ranks_type_lvl[
f"distributor_rank_{rank}"].astype(float)
# combine dc-drug lvl and dc-drug-type lvl
final_ranks = pd.concat([final_ranks, final_ranks_type_lvl], axis=0)
final_ranks["correction_flags"] = final_ranks["correction_flags"].fillna("")
return final_ranks
def calc_ranks_franchisee(features, weights_franchisee_drug_lvl,
weights_franchisee_type_lvl, logger):
# =========================== DRUG LEVEL RANKING ==========================
logger.info("Franchisee-store-drug level ranking starts")
# select only relevant columns required for ranking
rank_drug_lvl = features[
['store_id', 'distributor_id', 'drug_id', 'margin',
'wtd_ff', 'dist_type_portfolio_size', 'distributor_credit_period',
'request_volume_store_dist']]
# set significant digits for features with decimal points
rank_drug_lvl["margin"] = np.round(rank_drug_lvl["margin"], 3)
rank_drug_lvl["wtd_ff"] = np.round(rank_drug_lvl["wtd_ff"], 3)
rank_drug_lvl["request_volume_store_dist"] = np.round(
rank_drug_lvl["request_volume_store_dist"], 3)
# rank each features
rank_drug_lvl["rank_margin"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['margin'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_ff"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_lvl["rank_store_dist_credit_period"] = \
rank_drug_lvl.groupby(['store_id'])[
'distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_lvl['rank_store_dist_volume'] = features.groupby(['store_id'])[
'request_volume_store_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin & ff
rank_drug_lvl["wtd_rank"] = (rank_drug_lvl["rank_margin"] *
weights_franchisee_drug_lvl["margin"]) + \
(rank_drug_lvl["rank_ff"] *
weights_franchisee_drug_lvl["ff"])
rank_drug_lvl["wtd_rank"] = np.round(rank_drug_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["store_id", "drug_id"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_store_dist_credit_period",
"rank_store_dist_volume",
"rank_dist_type_portfolio_size"]
sort_asc_order = group_col_sort_asc_order + [True, True, True, True]
rank_drug_lvl = rank_drug_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_lvl['index'] = rank_drug_lvl.index
# final ranking based on preference order
rank_drug_lvl["final_rank"] = \
rank_drug_lvl.groupby(['store_id', 'drug_id'])['index'].rank(
method='first', ascending=True)
rank_drug_lvl.drop('index', axis=1, inplace=True)
# ========================== D.TYPE LEVEL RANKING =========================
logger.info("Franchisee-drug-type level ranking starts")
# select only relevant columns required for ranking
rank_drug_type_lvl = features[
['store_id', 'distributor_id', 'drug_id', 'drug_type',
'margin', 'wtd_ff', 'dist_type_portfolio_size',
'distributor_credit_period', 'request_volume_store_dist']]
# group by dc-distributor-drug_type level and calculate features
rank_drug_type_lvl = rank_drug_type_lvl.groupby(
["store_id", "distributor_id", "drug_type"],
as_index=False).agg({"margin": np.average, "wtd_ff": np.average,
"dist_type_portfolio_size": "first",
"distributor_credit_period": "first",
"request_volume_store_dist": "first"})
# round features to 3 significant digits
rank_drug_type_lvl["margin"] = np.round(rank_drug_type_lvl["margin"], 3)
rank_drug_type_lvl["wtd_ff"] = np.round(rank_drug_type_lvl["wtd_ff"], 3)
rank_drug_type_lvl["request_volume_store_dist"] = np.round(
rank_drug_type_lvl["request_volume_store_dist"], 3)
# rank each features
rank_drug_type_lvl["rank_margin"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['margin'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_ff"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['wtd_ff'].rank(
method='dense', ascending=False)
rank_drug_type_lvl["rank_dist_type_portfolio_size"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])[
'dist_type_portfolio_size'].rank(method='dense', ascending=False)
rank_drug_type_lvl["rank_store_dist_credit_period"] = \
rank_drug_type_lvl.groupby(['store_id'])[
'distributor_credit_period'].rank(method='dense',
ascending=False)
rank_drug_type_lvl['rank_store_dist_volume'] = \
rank_drug_type_lvl.groupby(['store_id'])[
'request_volume_store_dist'].rank(method='dense', ascending=False)
# primary ranking only based on margin, ff & portfolio size
rank_drug_type_lvl["wtd_rank"] = (rank_drug_type_lvl["rank_margin"] *
weights_franchisee_type_lvl["margin"]) + \
(rank_drug_type_lvl["rank_ff"] *
weights_franchisee_type_lvl["ff"]) + \
(rank_drug_type_lvl["rank_dist_type_portfolio_size"] *
weights_franchisee_type_lvl["portfolio_size"])
rank_drug_type_lvl["wtd_rank"] = np.round(rank_drug_type_lvl["wtd_rank"], 1)
# setting rules of ranking preference order in cases of ties
group_cols = ["store_id", "drug_type"]
group_col_sort_asc_order = [True, True]
sort_columns = group_cols + ["wtd_rank", "rank_store_dist_credit_period",
"rank_store_dist_volume"]
sort_asc_order = group_col_sort_asc_order + [True, True, True]
rank_drug_type_lvl = rank_drug_type_lvl.sort_values(
sort_columns, ascending=sort_asc_order).reset_index(drop=True)
rank_drug_type_lvl['index'] = rank_drug_type_lvl.index
# final ranking based on preference order
rank_drug_type_lvl["final_rank"] = \
rank_drug_type_lvl.groupby(['store_id', 'drug_type'])['index'].rank(
method='first', ascending=True)
rank_drug_type_lvl.drop('index', axis=1, inplace=True)
return rank_drug_lvl, rank_drug_type_lvl
def get_final_ranks_franchisee(rank_drug_lvl, rank_drug_type_lvl, features,
logger):
"""
get final ranking format. no slot filling logic for franchisee stores.
"""
final_ranks = rank_drug_lvl[["store_id", "drug_id"]].drop_duplicates()
final_ranks = final_ranks.merge(
features[["drug_id", "drug_type"]].drop_duplicates(), on="drug_id",
how="left")
logger.info("Creating final df format")
# make final ranking df
for rank in [1, 2, 3]:
df_rank = rank_drug_lvl.loc[rank_drug_lvl["final_rank"] == rank]
df_rank = df_rank[
["store_id", "drug_id", "distributor_id"]]
df_rank.rename({"distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks = final_ranks.merge(df_rank,
on=["store_id", "drug_id"],
how="left")
final_ranks[f"distributor_rank_{rank}"] = final_ranks[
f"distributor_rank_{rank}"].astype(float)
# add franchisee-store-drug-type level ranking
logger.info("Adding franchisee-store-drug-typ level ranking to final df")
final_ranks_type_lvl = rank_drug_type_lvl[
["store_id", "drug_type"]].drop_duplicates()
# create store-type level final ranking format
for rank in [1, 2, 3]:
df_rank = rank_drug_type_lvl.loc[
rank_drug_type_lvl["final_rank"] == rank]
df_rank = df_rank[
["store_id", "drug_type", "distributor_id"]]
df_rank.rename({"distributor_id": f"distributor_rank_{rank}"},
axis=1, inplace=True)
final_ranks_type_lvl = final_ranks_type_lvl.merge(df_rank,
on=["store_id",
"drug_type"],
how="left")
final_ranks_type_lvl[f"distributor_rank_{rank}"] = final_ranks_type_lvl[
f"distributor_rank_{rank}"].astype(float)
# combine store-drug lvl and store-drug-type lvl
final_ranks = pd.concat([final_ranks, final_ranks_type_lvl], axis=0)
return final_ranks | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/calculate_ranks1.py | calculate_ranks1.py |
Q_FEATURES = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."dc-id" as "partial-dc-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
inv."mrp",
d."drug-name",
d.type as "drug_type",
sdm."forward-dc-id",
s.name as "dc-name"
from
"{schema}"."short-book-1" sb
left join "{schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{schema}".invoices i on
sbi."invoice-id" = i.id
left join "{schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{schema}".drugs d on
sb."drug-id" = d.id
left join "{schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{schema}"."store-dc-mapping" sdm on
sb."store-id" = sdm."store-id"
and dis.type = sdm."drug-type"
left join "{schema}".stores s on
i."dc-id" = s.id
where
DATEDIFF(day, date(sb."created-at"), '{reset_date}') <= {time_interval}
and DATEDIFF(day, date(sb."created-at"), '{reset_date}') >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
and sb."store-id" in (2,4,7,16,54,82,231,234,244,278,297,23,28,39,216,218,235,229,280,8,13,21,26,31,45,188,208,221,222,230,241,260,264,20,36,61,134,160,184,195,215,224,226,245,252,273,281)
"""
# (2,4,7,16,54,82,231,234,244,278,297,23,28,39,216,218,235,229,280,8,13,21,26,31,45,188,208,221,222,230,241,260,264,20,36,61,134,160,184,195,215,224,226,245,252,273,281)
Q_FEATURES_FRANCHISEE = """
select
sb.id as "short-book-1-id" ,
sb."ordered-distributor-id" as "short-book-distributor-id",
sb."store-id",
ss."franchisee-id",
sb."drug-id",
coalesce(sb.quantity, 0) as "original-order",
sb."required-quantity" as "final-unfulfilled",
sb."created-at" as "original-created-at",
sb."re-ordered-at" as "original-created-at-2",
sbi."quantity" as "partial-quantity",
i.id as "invoice-id",
i."distributor-id" as "partial-distributor-id",
i."created-at" as "partial-created-at",
i."approved-at" as "partial-invoiced-at",
ii.id as "invoice-item-id",
ii."drug-id" as "invoice-items-drug-id",
inv.id as "inventory-id",
inv."invoice-item-id" as "inv-invoice-item-id",
inv."purchase-rate" as "distributor-rate",
inv."selling-rate",
inv."mrp",
d."drug-name",
d.type as "drug_type",
ss."name" as "store-name"
from
"{schema}"."short-book-1" sb
left join "{schema}"."short-book-invoices" sbi on
sbi."short-book-id" = sb.id
left join "{schema}".invoices i on
sbi."invoice-id" = i.id
left join "{schema}"."short-book-invoice-items" sbii on
sb.id = sbii."short-book-id"
left join "{schema}"."invoice-items" ii on
ii.id = sbii."invoice-item-id"
left join "{schema}"."invoice-items-1" ii1 on
ii1."invoice-item-reference" = ii.id
left join "{schema}"."inventory-1" inv on
inv."invoice-item-id" = ii1.id
left join "{schema}".drugs d on
sb."drug-id" = d.id
left join "{schema}".distributors dis on
dis.id = sb."ordered-distributor-id"
left join "{schema}".stores s on
i."dc-id" = s.id
left join "{schema}".stores ss on
sb."store-id" = ss.id
where
DATEDIFF(day, date(sb."created-at"), '{reset_date}') <= {time_interval}
and DATEDIFF(day, date(sb."created-at"), '{reset_date}') >= 7
and sb."quantity" > 0
and sb."ordered-distributor-id" != 76
and sb."ordered-distributor-id" != 5000
and sb."ordered-distributor-id" != 8105
and i."distributor-id" != 8105
and sb.status != 'deleted'
and ss."franchisee-id" != 1
{franchisee_stores_execute_query}
"""
Q_DISTRIBUTORS = """
select db.id as "partial-distributor-id",
db.name as "partial-distributor-name",
db."credit-period" as "partial-distributor-credit-period",
d."type" as "drug-type", count(distinct dd."drug-id") as "dist-type-portfolio-size"
from "{schema}".distributors db
left join "{schema}"."distributor-drugs" dd on db.id = dd."distributor-id"
left join "{schema}".drugs d on dd."drug-id" = d.id
group by "partial-distributor-id", "partial-distributor-name",
"partial-distributor-credit-period", "drug-type"
"""
Q_DC_DISTRIBUTOR_MAPPING = """
select "dc-id" as "partial-dc-id", "distributor-id" as "partial-distributor-id"
from "{schema}"."dc-distributor-mapping" ddm
where "is-active" = 1
group by "dc-id" , "distributor-id"
"""
Q_DISTRIBUTOR_DRUGS = """
select "distributor-id" as "partial-distributor-id" , "drug-id"
from "{schema}"."distributor-drugs" dd
group by "distributor-id" , "drug-id"
"""
def pull_data_dc(reset_date, time_interval, db, schema):
df_features = db.get_df(Q_FEATURES.format(
reset_date=reset_date, time_interval=time_interval, schema=schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(schema=schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
df_distributors = df_distributors.dropna()
df_distributors = df_distributors.loc[df_distributors["drug_type"] != '']
df_dc_distributors_mapping = db.get_df(Q_DC_DISTRIBUTOR_MAPPING.format(schema=schema))
df_dc_distributors_mapping.columns = [c.replace('-', '_') for c in
df_dc_distributors_mapping.columns]
df_distributor_drugs = db.get_df(Q_DISTRIBUTOR_DRUGS.format(schema=schema))
df_distributor_drugs.columns = [c.replace('-', '_') for c in
df_distributor_drugs.columns]
df_distributor_drugs.drop_duplicates(inplace=True)
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
df_features["mrp"] = df_features["mrp"].astype(float)
return df_features, df_distributors, df_dc_distributors_mapping, df_distributor_drugs
def pull_data_franchisee(reset_date, time_interval, franchisee_stores,
db, schema):
if franchisee_stores == [0]:
franchisee_stores_execute_query = ""
else:
franchisee_stores_execute_query = f"""
and sb."store-id" in {str(franchisee_stores).replace('[', '(').replace(']',')')}
"""
df_features = db.get_df(Q_FEATURES_FRANCHISEE.format(
reset_date=reset_date, time_interval=time_interval,
franchisee_stores_execute_query=franchisee_stores_execute_query,
schema=schema))
df_features.columns = [c.replace('-', '_') for c in df_features.columns]
df_distributors = db.get_df(Q_DISTRIBUTORS.format(schema=schema))
df_distributors.columns = [c.replace('-', '_') for c in df_distributors.columns]
df_distributors = df_distributors.dropna()
df_distributor_drugs = db.get_df(Q_DISTRIBUTOR_DRUGS.format(schema=schema))
df_distributor_drugs.columns = [c.replace('-', '_') for c in
df_distributor_drugs.columns]
df_distributor_drugs.drop_duplicates(inplace=True)
# ensure data types
df_features["distributor_rate"] = df_features["distributor_rate"].astype(float)
df_features["selling_rate"] = df_features["selling_rate"].astype(float)
df_features["mrp"] = df_features["mrp"].astype(float)
return df_features, df_distributors, df_distributor_drugs | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/distributor_ranking2/pull_data.py | pull_data.py |
from itertools import product
platform_prod = 0
store_col = 'store_id'
drug_col = 'drug_id'
date_col = 'date'
target_col = 'actual_demand'
key_col = 'ts_id'
eol_cutoff = 13
mature_cutoff = 52
forecast_horizon = 4
flag_weather = 0
flag_seasonality_index = {
'ctb': 0,
'lgbm': 0,
'xgb': 0
}
flag_sample_weights = {
'ctb': 0,
'lgbm': 0,
'xgb': 0
}
num_shift_lag = 3
lags = [1]
add_lags_diff_flag = 1
lags_diff = [(1, 2)]
add_monthly_lags_flag = 1
# monthly_lags = [1, 2, 3, 6, 12]
monthly_lags = [1, 2, 3, 6]
rolling_time_feat = {
'lags': [5, 13, 26, 53],
'agg_func_dict': {'min', 'max', 'mean', 'median', 'std'}
}
ewma_lags = [4, 8]
# trend_lags = [13, 26, 53]
trend_lags = [13, 26]
perc_noise = [0.2, 0.5, 0.1]
# fc_cols = ['preds_xgb_rf_target','preds_cb_target','preds_lgb','AE', 'croston_fcst']
# models = ['croston', 'prophet', 'ETS', 'MA', 'AE_ts', 'lgbm']
run_ml_flag = 0
runs_ts_flag = 1
models = ['MA','prophet','croston','ETS','EWM']
local_testing = 0
trend = ['additive', None]
seasonal = ['additive', None]
damped = [True, False]
seasonal_periods = [52]
use_boxcox = [True, False]
ets_params = list(
product(trend, seasonal, damped, seasonal_periods, use_boxcox))
similar_drug_type = ['generic', 'high-value-generic']
generic_share_in_first_3_months = 0.5
ss_upper_cap = 21
ss_lower_cap = 7
ss_harcoded = 2
store_age_limit = 90
drug_age_limit = 90
percentile_bucket_dict = {
'AW': 0.5, 'AX': 0.5, 'AY': 0.5, 'AZ': 0.5,
'BW': 0.5, 'BX': 0.5, 'BY': 0.6, 'BZ': 0.6,
'CW': 0.5, 'CX': 0.5, 'CY': 0.6, 'CZ': 0.6,
'DW': 0.5, 'DX': 0.5, 'DY': 0.6, 'DZ': 0.6}
# fc_cols = ['croston_fcst', 'ETS_fcst', 'ma_fcst','prophet_fcst', 'AE_ts_fcst']
# cols_rename = {
# 'preds_xgb_rf_target': 'fcst_1',
# 'preds_cb_target': 'fcst_2',
# 'preds_lgb':'fcst_3',
# 'AE':'fcst_4',
# 'croston_fcst':'fcst_5'
# }
# cols_rename = {
# 'croston_fcst': 'fcst_1',
# 'ETS_fcst': 'fcst_2',
# 'ma_fcst':'fcst_3',
# 'prophet_fcst':'fcst_4',
# 'AE_ts_fcst':'fcst_5'
# } | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/config_goodaid.py | config_goodaid.py |
import numpy as np
np.random.seed(0)
import pandas as pd
# import time
# import re
# from datetime import date
# from dateutil.relativedelta import relativedelta
# from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from zeno_etl_libs.utils.warehouse.forecast.errors import ape_calc, ae_calc
from prophet import Prophet
from statsmodels.tsa.holtwinters import ExponentialSmoothing
# from statsmodels.tsa.api import ExponentialSmoothing
# import sktime
from sktime.forecasting.ets import AutoETS
from zeno_etl_libs.utils.ipc2.helpers.helper_functions import sum_std,\
applyParallel, applyParallel_croston
# from boruta import BorutaPy
from zeno_etl_libs.utils.goodaid_forecast.engine.config_goodaid import (
date_col,
target_col,
models,
ets_params
)
import logging
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
class Goodaid_tS_forecast:
def train_test_split(self, df, train_max_date, forecast_start):
df.rename(columns={date_col: 'ds', target_col: 'y'}, inplace=True)
df.sort_values(by=['ds'], inplace=True)
train = df[df['ds'] <= train_max_date]
test = df[df['ds'] >= forecast_start]
return train, test
def Croston_TSB(self, ts, extra_periods=4, alpha=0.4, beta=0.4):
d = np.array(ts) # Transform the input into a numpy array
cols = len(d) # Historical period length
d = np.append(d, [
np.nan] * extra_periods) # Append np.nan into the demand array to cover future periods
# level (a), probability(p) and forecast (f)
a, p, f = np.full((3, cols + extra_periods), np.nan)
# Initialization
first_occurence = np.argmax(d[:cols] > 0)
a[0] = d[first_occurence]
p[0] = 1 / (1 + first_occurence)
f[0] = p[0] * a[0]
# Create all the t+1 forecasts
for t in range(0, cols):
if d[t] > 0:
a[t + 1] = alpha * d[t] + (1 - alpha) * a[t]
p[t + 1] = beta * (1) + (1 - beta) * p[t]
else:
a[t + 1] = a[t]
p[t + 1] = (1 - beta) * p[t]
f[t + 1] = p[t + 1] * a[t + 1]
# Future Forecast
a[cols + 1:cols + extra_periods] = a[cols]
p[cols + 1:cols + extra_periods] = p[cols]
f[cols + 1:cols + extra_periods] = f[cols]
df = pd.DataFrame.from_dict(
{"Demand": d, "Forecast": f, "Period": p, "Level": a,
"Error": d - f})
return np.round(df[-extra_periods:])
def ETS_forecast(self, train, test,ets_params):
try:
train = train.copy(deep=True)
test = test.copy(deep=True)
train.set_index(['ds'], inplace=True)
test.set_index(['ds'], inplace=True)
train.index.freq = train.index.inferred_freq
test.index.freq = test.index.inferred_freq
train_final = train.copy(deep=True)
out_of_sample = len(test)
horizon = len(test)
# Train for grid search
train.drop(train.tail(out_of_sample).index, inplace=True)
# dividing the series into train and validation set
drug_id = train['drug_id'].values[0]
input_series = train['y'].values
validation = train['y'].tail(out_of_sample).values
# creating dummy best fit param and fit values
best_fit_params = [None, None, False, None, False]
best_accuracy = np.inf
# running a loop for grid search
for params in ets_params:
trend, seasonal, damped, seasonal_periods, use_boxcox = params
try:
ape = []
ae = []
# model fitting
model = ExponentialSmoothing(
input_series, trend=trend, seasonal=seasonal, damped=damped,
seasonal_periods=seasonal_periods, use_boxcox=use_boxcox)
fit = model.fit(optimized=True)
# accuracy parameter can be - aic, bic, sse or mape
forecast = np.round(fit.forecast(horizon))
# print(forecast)
ape = [
ape_calc(actual, forecast)
for actual, forecast in zip(validation, forecast)]
ae = [
ae_calc(actual, forecast)
for actual, forecast in zip(validation, forecast)]
fit_mape = np.mean(ape)
# identifying the best fit params
if (fit_mape <= best_accuracy) & (fit_mape != -np.inf):
best_fit_params = params
best_accuracy = fit_mape
except Exception as error:
error_str = '''Drug {} Params {} Error: {}'''.format(
drug_id, str(params), error)
# logger.info(error_str)
pass
# creating out of output dataset
trend, seasonal, damped, seasonal_periods, use_boxcox = best_fit_params
model = ExponentialSmoothing(
train_final['y'], trend=trend, seasonal=seasonal, damped=damped,
seasonal_periods=seasonal_periods, use_boxcox=use_boxcox)
fit = model.fit(optimized=True)
forecast = np.round(fit.forecast(horizon+1))
forecast = forecast[-horizon:]
except Exception as e:
logger.info("error in ETS fcst")
logger.info(str(e))
forecast = 0
return forecast
def SES_forecast(self, train, test):
try:
train = train.copy(deep=True)
test = test.copy(deep=True)
train.set_index(['ds'], inplace=True)
test.set_index(['ds'], inplace=True)
train.index.freq = train.index.inferred_freq
test.index.freq = test.index.inferred_freq
fit = ExponentialSmoothing(train['y']).fit(optimized=True)
# preds_ses = fit.forecast(len(test) + 1)
preds_ses = np.round(fit.forecast(len(test)+1))
preds_ses = preds_ses[-len(test):]
except Exception as e:
logger.info("error in SES fcst")
logger.info(str(e))
preds_ses = 0
return preds_ses
def ma_forecast(self, data):
"""
Purpose: Compute MA forecast for the for the forecast horizon specified
Inputs: time series to create forecast
Output: series with forecasted values
"""
sma_df = data.copy(deep=True)
yhat = []
if len(data) >= 8:
for i in range(5):
sma_val = sma_df.rolling(7).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
else:
for i in range(5):
sma_val = sma_df.rolling(len(data)).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
yhat.append(sma_val)
logger.info(yhat)
return np.round(yhat[-4:])
def ewm_forecast(self, data):
sma_df = data.copy(deep=True)
yhat = []
if len(data) >= 8:
for i in range(5):
sma_val = sma_df.ewm(span=7,adjust=False).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
else:
for i in range(5):
sma_val = sma_df.ewm(span=len(data),adjust=False).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
yhat.append(sma_val)
logger.info(yhat)
return np.round(yhat[-4:])
def prophet_fcst(self, train, test, params=None):
# reg_list = []
try:
if params is None:
pro = Prophet()
else:
pro = Prophet(n_changepoints=params)
# for j in train.columns:
# if j not in col_list:
# pro.add_regressor(j)
# reg_list.append(j)
pro.fit(train[['ds', 'y']])
pred_f = pro.predict(test)
test = test[["ds", "y"]]
test = pd.merge(test, pred_f, on="ds", how="left")
except Exception as e:
logger.info("error in prophet fcst")
logger.info(str(e))
test['yhat'] = 0
return np.round(test)
def ts_forecast(self, df, train_max_date, forecast_start):
train, test = self.train_test_split(df, train_max_date=train_max_date,
forecast_start=forecast_start)
test = test.sort_values(by=['ds'])
if 'croston' in models:
preds_croston = self.Croston_TSB(train['y'])
test['preds_croston'] = preds_croston['Forecast'].values
if 'ETS' in models:
preds_ETS = self.ETS_forecast(train.copy(), test.copy(),ets_params)
try:
test['preds_ETS'] = preds_ETS.values
except:
test['preds_ETS'] = np.nan
if 'SES' in models:
preds_SES = self.SES_forecast(train.copy(), test.copy())
try:
test['preds_SES'] = preds_SES.values
except:
test['preds_SES'] = np.nan
if 'EWM' in models:
preds_ewm = self.ewm_forecast(train['y'])
test['preds_ewm'] = preds_ewm
if 'MA' in models:
preds_ma = self.ma_forecast(train['y'])
test['preds_ma'] = preds_ma
if 'prophet' in models:
preds_prophet = self.prophet_fcst(train.copy(), test.copy())
test['preds_prophet'] = preds_prophet['yhat'].values
return test
def apply_ts_forecast(self, df, train_max_date, forecast_start):
# global train_date
# train_date = train_max_date
# global forecast_start_date
# forecast_start_date = forecast_start
preds = applyParallel_croston(
df.groupby('ts_id'),
func=self.ts_forecast, train_max_date=train_max_date,
forecast_start=forecast_start
)
preds.rename(columns={'ds': date_col, 'y': target_col}, inplace=True)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
preds[col] = preds[col].replace(0, np.NaN)
preds['preds_AE_ts'] = preds[ts_fcst_cols].mean(axis=1,skipna=True)
preds['preds_ME_ts'] = preds[ts_fcst_cols].max(axis=1)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
return preds, ts_fcst_cols | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_ts_forecast.py | goodaid_ts_forecast.py |
import os
import sys
import argparse
sys.path.append('../../../..')
import pandas as pd
import numpy as np
import datetime as dt
from zeno_etl_libs.utils.goodaid_forecast.engine.config_goodaid import *
from zeno_etl_libs.utils.goodaid_forecast.engine.goodaid_data_load import GoodaidloadData,Goodaid_data_additional_processing,b2_goodaid_load_data
from zeno_etl_libs.utils.goodaid_forecast.engine.goodaid_ts_forecast import *
from zeno_etl_libs.utils.goodaid_forecast.engine.goodaid_segmentation import Segmentation
from zeno_etl_libs.utils.goodaid_forecast.engine.goodaid_data_pre_process import PreprocessData
from dateutil.relativedelta import relativedelta
def drugs_to_comp_gp(sales, sales_pred, similar_drug_mapping):
# ================== aggregate drug_id to composition_id ==================
# df to change sales_pred, sales, sales_daily
df_drug_comp_hash = similar_drug_mapping.copy(deep = True)
df_drug_comp_hash.rename(columns = {'group':'comp_gp_hash'},inplace = True)
sales_pred["drug_id"] = sales_pred["drug_id"].astype(float)
sales_pred1 = sales_pred.merge(df_drug_comp_hash, on="drug_id", how="left")
drug_reject = sales_pred1.loc[sales_pred1["comp_gp_hash"].isnull()][
"drug_id"].unique().tolist()
drug_accept = sales_pred1.loc[~sales_pred1["comp_gp_hash"].isnull()][
"drug_id"].unique().tolist()
sales_pred1 = sales_pred1.dropna()
sales_pred1 = sales_pred1.groupby(["store_id", "comp_gp_hash", "date"],
as_index=False).agg(
{"actual_demand": "sum"})
sales_pred1.rename({"comp_gp_hash": "drug_id"}, axis=1, inplace=True)
sales_pred1['ts_id'] = (
sales_pred1[store_col].astype(int).astype(str)
+ '_'
+ sales_pred1[drug_col].astype(str)
)
sales1 = sales.merge(df_drug_comp_hash, on="drug_id", how="left")
sales1 = sales1.groupby(["store_id", "comp_gp_hash", "date"],
as_index=False).agg({"actual_demand": "sum"})
sales1.rename({"comp_gp_hash": "drug_id"}, axis=1, inplace=True)
sales1['ts_id'] = (
sales1[store_col].astype(int).astype(str)
+ '_'
+ sales1[drug_col].astype(str)
)
return sales1, sales_pred1
def goodaid_ipc_forecast(store_id, reset_date, type_list, schema, db, logger):
store_id_list = ("({})").format(store_id) # for sql pass
last_date = dt.date(day=1, month=8, year=2021) # max history #baseline
# last_date = pd.to_datetime(reset_date).date() - dt.timedelta(weeks=26) # capping sales history to 6 months
# last_date = pd.to_datetime(reset_date).date() - dt.timedelta(weeks=52) # capping sales history to 12 months
load_max_date = pd.to_datetime(reset_date).date() - dt.timedelta(days = pd.to_datetime(reset_date).dayofweek+1)
# define empty variables in case of fail
weekly_fcst = pd.DataFrame()
ts_fcst = pd.DataFrame()
ts_fcst_cols = []
# Load Data
logger.info("Data Loading Started...")
data_load_obj = GoodaidloadData()
(
drug_list,
sales_history,
cfr_pr,
calendar,
first_bill_date,
first_store_drug_bill_date,
wh_goodaid_assortment,
similar_drug_mapping,
sales_history_add
) = data_load_obj.load_all_input(
type_list=type_list,
store_id_list=store_id_list,
last_date=last_date,
reset_date=reset_date,
schema=schema,
db=db
)
# PreProcess Data
logger.info("Data Pre Processing Started...")
data_prep_obj = PreprocessData()
(
sales,
sales_pred,
cal_sales,
sales_daily
) = data_prep_obj.preprocess_all(
sales=sales_history,
drug_list=drug_list,
cfr_pr=cfr_pr,
calendar=calendar,
first_bill_date=first_bill_date,
last_date=last_date
)
train_max_date = sales[date_col].max()
end_date = sales_pred[date_col].max()
#Extra PreProcessing
logger.info("Goodaid Specific Extra processing Started...")
gdad_ep_obj = Goodaid_data_additional_processing()
sales,sales_pred = gdad_ep_obj.goodaid_extra_processing_all(first_store_drug_bill_date=first_store_drug_bill_date,
sales_pred=sales_pred,
sales = sales,
reset_date=reset_date,
first_bill_date=first_bill_date,
wh_goodaid_assortment=wh_goodaid_assortment
)
#Segmentation
logger.info("Segmentation Started...")
seg_obj = Segmentation()
seg_df, drug_class = seg_obj.get_weekly_segmentation(
df=sales.copy(deep=True),
df_sales_daily=sales_daily.copy(deep=True),
train_max_date=train_max_date,
end_date=end_date
)
seg_df['reset_date'] = str(reset_date)
merged_df1 = sales_pred
# Forecasting
ts_fcst_obj = Goodaid_tS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast(
df=merged_df1.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
logger.info("Forecast Completed...")
# Composition/Similar drug Level Forecasting for B2 Bucket
data_load_obj_new = b2_goodaid_load_data()
(
drug_list_comp,
sales_history_comp,
cfr_pr_comp,
calendar_comp,
drug_info_comp,
group_info_comp
) = data_load_obj_new.load_all_input(
type_list=type_list,
store_id_list=store_id_list,
sales_pred=sales_pred,
similar_drug_mapping = similar_drug_mapping,
last_date=last_date,
reset_date=reset_date,
schema=schema,
db=db
)
logger.info("date fetched...")
(
sales_comp,
sales_pred_comp,
cal_sales_comp,
sales_daily_comp
) = data_prep_obj.preprocess_all(
sales=sales_history_comp,
drug_list=drug_list_comp,
cfr_pr=cfr_pr_comp,
calendar=calendar_comp,
first_bill_date=first_bill_date,
last_date=last_date
)
logger.info("data preprocess part 1...")
sales1, sales_pred1= drugs_to_comp_gp(sales_comp, sales_pred_comp, similar_drug_mapping)
logger.info("data preprocess part 2...")
train_max_date = sales1[date_col].max()
end_date = sales_pred1[date_col].max()
merged_df2 = sales_pred1
ts_fcst_obj = Goodaid_tS_forecast()
ts_fcst2, ts_fcst_cols2 = ts_fcst_obj.apply_ts_forecast(
df=merged_df2.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
logger.info("forecast 2 completed...")
ts_fcst2 = ts_fcst2.drop_duplicates(
subset = ['ts_id', 'date'],
keep = 'last')
ts_fcst2.drop(['store_id', 'drug_id', 'actual_demand',], axis=1, inplace=True)
weekly_fcst = ts_fcst.copy(deep=True)
weekly_fcst['reset_date'] = reset_date
weekly_fcst.drop_duplicates(inplace=True)
weekly_fcst['model'] = 'AvgTS'
weekly_fcst[[store_col, drug_col]] = weekly_fcst[key_col].str.split('_',
expand=True)
weekly_fcst.rename(columns={'preds_AE_ts': 'fcst'}, inplace=True)
weekly_fcst = pd.merge(weekly_fcst, seg_df[['ts_id', 'std', 'Mixed']],
how='left', on=['ts_id'])
weekly_fcst.rename(columns={'Mixed': 'bucket'}, inplace=True)
weekly_fcst = weekly_fcst[
['store_id', 'drug_id', 'model', 'date', 'fcst', 'std', 'bucket','age_bucket', 'wh_assortment']]
fc_cols = [i for i in weekly_fcst.columns if i.startswith('preds_')]
weekly_fcst['std'].fillna(seg_df['std'].mean(), inplace=True)
agg_fcst = weekly_fcst.groupby(
['model', 'store_id', 'drug_id', 'bucket']). \
agg({'fcst': 'sum', 'std': 'mean', 'age_bucket':'first', 'wh_assortment':'first'}).reset_index()
agg_fcst['drug_id'] = agg_fcst['drug_id'].astype(int)
weekly_fcst2 = ts_fcst2.copy(deep=True)
weekly_fcst2['reset_date'] = reset_date
weekly_fcst2.drop_duplicates(inplace=True)
weekly_fcst2['model'] = 'AvgTS'
weekly_fcst2[[store_col, drug_col]] = weekly_fcst2[key_col].str.split('_',
expand=True)
weekly_fcst2.rename(columns={'preds_AE_ts': 'fcst'}, inplace=True)
weekly_fcst2 = weekly_fcst2[
['store_id', 'drug_id', 'model', 'date', 'fcst']]
agg_fcst2 = weekly_fcst2.groupby(
['model', 'store_id', 'drug_id']). \
agg({'fcst': 'sum'}).reset_index()
agg_fcst2.rename(columns={'drug_id':'group'},inplace=True)
agg_fcst2['age_bucket'] = 'B2'
agg_fcst2.drop(['model'], axis=1, inplace=True)
agg_fcst = agg_fcst.merge(similar_drug_mapping[['drug_id','group']],on = 'drug_id',how='left')
suffix_for_similar_drugs = '_sdlf'
agg_fcst = agg_fcst.merge(agg_fcst2, on = ['store_id','group','age_bucket'],how='left',suffixes=('', suffix_for_similar_drugs))
agg_fcst.drop(['group'], axis=1, inplace=True)
agg_fcst['store_id'] = agg_fcst['store_id'].astype(int)
col = 'fcst'
condition = [agg_fcst[col + suffix_for_similar_drugs].isna(),
agg_fcst[col] >= (agg_fcst[col + suffix_for_similar_drugs]) * generic_share_in_first_3_months,
agg_fcst[col] < (agg_fcst[col + suffix_for_similar_drugs]) * generic_share_in_first_3_months]
choice = [agg_fcst[col], agg_fcst[col], (agg_fcst[col + suffix_for_similar_drugs]) * generic_share_in_first_3_months]
choice2 = ['individual','individual','composition']
agg_fcst[col + '_ol'] = agg_fcst[col]
agg_fcst[col] = np.select(condition, choice)
agg_fcst['fcst_level'] = np.select(condition, choice2,default='individual')
return agg_fcst, cal_sales, weekly_fcst, seg_df, drug_class | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_forecast_main.py | goodaid_forecast_main.py |
import pandas as pd
import datetime
import numpy as np
from zeno_etl_libs.utils.ipc2.config_ipc import date_col, store_col, \
drug_col, target_col, key_col, local_testing
class PreprocessData:
def add_ts_id(self, df):
df = df[~df[drug_col].isnull()].reset_index(drop=True)
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def preprocess_sales(self, df, drug_list):
df.rename(columns={
'net_sales_quantity': target_col
}, inplace=True)
df.rename(columns={
'sales_date': date_col
}, inplace=True)
set_dtypes = {
store_col: int,
drug_col: int,
date_col: str,
target_col: float
}
df = df.astype(set_dtypes)
df[target_col] = df[target_col].round()
df[date_col] = pd.to_datetime(df[date_col])
df = df.groupby(
[store_col, drug_col, key_col, date_col]
)[target_col].sum().reset_index()
df = df[df[drug_col].isin(drug_list[drug_col].unique().tolist())]
return df
def get_formatted_data(self, df):
df_start = df.groupby([key_col])[date_col].min().reset_index().rename(
columns={date_col: 'sales_start'})
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start=min_date,
end=end_date,
freq='d'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = pd.merge(df, df_start, how='left', on=key_col)
df = df[df[date_col] >= df['sales_start']]
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
df[[store_col, drug_col]] = df[[store_col, drug_col]].astype(int)
return df
def preprocess_cfr_pr(self, df):
set_dtypes = {
store_col: int,
drug_col: int,
'loss_quantity': int
}
df = df.astype(set_dtypes)
df['shortbook_date'] = pd.to_datetime(df['shortbook_date'])
return df
def merge_cfr_pr(self, sales, cfr_pr):
df = sales.merge(cfr_pr,
left_on=[store_col, drug_col, date_col],
right_on=[store_col, drug_col, 'shortbook_date'],
how='left')
df[date_col] = df[date_col].combine_first(df['shortbook_date'])
df[target_col].fillna(0, inplace=True)
df['loss_quantity'].fillna(0, inplace=True)
df[target_col] += df['loss_quantity']
df.drop(['shortbook_date', 'loss_quantity'], axis=1, inplace=True)
return df
def preprocess_calendar(self, df, last_date):
df.rename(columns={'date': date_col}, inplace=True)
df[date_col] = pd.to_datetime(df[date_col])
cal_sales = df.copy()
cal_sales['week_begin_dt'] = cal_sales.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
cal_sales['month_begin_dt'] = cal_sales.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
cal_sales['key'] = 1
ld = pd.to_datetime(last_date)
cal_sales = cal_sales[cal_sales[date_col] > ld]
return df, cal_sales
def merge_calendar(self, sales, calendar):
df = sales.merge(calendar,
how='left',
on=date_col
)
# df_week_days_count = df.groupby([key_col, 'year', 'week_of_year'])[date_col].count().reset_index().rename(columns = {date_col:'week_days_count'})
# df['week_days_count'] = 1
df['week_begin_dt'] = df.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
df_week_days_count = df.groupby(['ts_id', 'week_begin_dt'])[
date_col].count().reset_index().rename(
columns={date_col: 'week_days_count'})
# df = df.groupby(['ts_id', store_col, drug_col, ]).resample('W-Mon', on =date_col )[target_col].sum().reset_index()
df = df.groupby(['ts_id', store_col, drug_col, 'week_begin_dt'])[
target_col].sum().reset_index()
df = pd.merge(df, df_week_days_count, how='left',
on=[key_col, 'week_begin_dt'])
# df = df[df['week_days_count'] == 7].reset_index(drop=True)
df['actual_demand'] = round((df['actual_demand']/df['week_days_count'])*7,0)
df.drop(columns=['week_days_count'], inplace=True)
df.rename(columns={'week_begin_dt': date_col}, inplace=True)
return df
def preprocess_bill_date(self, df):
df.rename(columns={'store-id': store_col}, inplace=True)
df['bill_date'] = pd.to_datetime(df['bill_date'])
return df
def merge_first_bill_date(self, sales, first_bill_date):
df = pd.merge(sales, first_bill_date, on=[store_col])
df = df[df[date_col] >= df['bill_date']].reset_index(drop=True)
df.drop(columns=['bill_date'], inplace=True)
return df
def make_future_df(self, df):
start_date_df = (
df
.groupby(key_col)[date_col]
.min()
.reset_index()
.rename(columns={date_col: 'start_date'})
)
df = df[[key_col, date_col, target_col]]
end_date = df[date_col].max() + datetime.timedelta(weeks=5)
min_date = df[date_col].min()
date_range = pd.date_range(
start=min_date,
end=end_date,
freq="W-MON"
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = 0
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = df.merge(start_date_df, on=key_col, how='left')
df = df[
df[date_col] >= df['start_date']
]
df.drop('start_date', axis=1, inplace=True)
df[[store_col, drug_col]] = df[key_col].str.split('_', expand=True)
return df
def preprocess_all(
self,
sales=None,
cfr_pr=None,
drug_list=None,
calendar=None,
first_bill_date=None,
last_date=None,
):
sales = self.add_ts_id(sales)
# filter
#################################################
if local_testing == 1:
tsid_list = \
sales.sort_values(by=['net_sales_quantity'], ascending=False)[
key_col].unique().tolist()[:10]
sales = sales[sales[key_col].isin(tsid_list)]
#################################################
sales = self.preprocess_sales(sales, drug_list)
sales = self.get_formatted_data(sales)
cfr_pr = self.preprocess_cfr_pr(cfr_pr)
sales_daily = self.merge_cfr_pr(sales, cfr_pr)
calendar, cal_sales = self.preprocess_calendar(calendar, last_date)
sales = self.merge_calendar(sales_daily, calendar)
first_bill_date = self.preprocess_bill_date(first_bill_date)
sales = self.merge_first_bill_date(sales, first_bill_date)
sales_pred = self.make_future_df(sales.copy())
return (
sales,
sales_pred,
cal_sales,
sales_daily
) | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_data_pre_process.py | goodaid_data_pre_process.py |
import numpy as np
from zeno_etl_libs.utils.ipc.lead_time import lead_time
from zeno_etl_libs.utils.ipc2.helpers.correction_flag import compare_df, \
add_correction_flag
from zeno_etl_libs.utils.goodaid_forecast.engine.goodaid_calculate_ss import calculate_ss
from zeno_etl_libs.utils.goodaid_forecast.engine.config_goodaid import *
def safety_stock_calc(agg_fcst, cal_sales, store_id, reset_date,
schema, db, logger):
fcst_weeks = 4
order_freq = 4
# ========================= LEAD TIME CALCULATIONS =========================
lt_drug, lt_store_mean, lt_store_std = lead_time(
store_id, cal_sales, reset_date, db, schema, logger)
safety_stock_df = agg_fcst.merge(
lt_drug[['drug_id', 'lead_time_mean', 'lead_time_std']],
how='left', on='drug_id')
safety_stock_df['lead_time_mean'].fillna(lt_store_mean, inplace=True)
safety_stock_df['lead_time_mean'] = safety_stock_df['lead_time_mean'].apply(np.ceil)
safety_stock_df['lead_time_std'].fillna(lt_store_std, inplace=True)
# ==================== SS, ROP, OUP CALCULATION BEGINS =====================
# impute store_std for cases where store-drug std<1
safety_stock_df['lead_time_std'] = np.where(
safety_stock_df['lead_time_std'] < 1,
lt_store_std, safety_stock_df['lead_time_std'])
# calculate SS
safety_stock_df = calculate_ss(safety_stock_df, fcst_weeks, logger)
safety_stock_df['safety_stock_without_correction'] = safety_stock_df['safety_stock']
safety_stock_df_before = safety_stock_df
# SS-DOH CAPPING #1
logger.info(f"DOH1 (Upper Capping) Correction starts")
df_pre_corr = safety_stock_df.copy()
cap_doh = ss_upper_cap
safety_stock_df['safety_stock_max'] = np.round((safety_stock_df['fcst'] / 28) * cap_doh)
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['safety_stock'] > safety_stock_df['safety_stock_max'],
safety_stock_df['safety_stock_max'], safety_stock_df['safety_stock'])
safety_stock_df.drop('safety_stock_max', axis=1, inplace=True)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'UDOH')
# SS-DOH CAPPING #2
logger.info(f"DOH2 (lower capping) Correction starts")
df_pre_corr = safety_stock_df.copy()
cap_doh = ss_lower_cap
safety_stock_df['safety_stock_min'] = np.round((safety_stock_df['fcst'] / 28) * cap_doh)
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['safety_stock'] < safety_stock_df['safety_stock_min'],
safety_stock_df['safety_stock_min'], safety_stock_df['safety_stock'])
safety_stock_df.drop('safety_stock_min', axis=1, inplace=True)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'LDOH')
# SS-DOH CAPPING #2
logger.info(f"If WH assortment active then No drug should have 0 SS, entering harcoded value")
df_pre_corr = safety_stock_df.copy()
safety_stock_df['safety_stock'] = np.where(
((safety_stock_df['safety_stock'] < ss_harcoded)&(safety_stock_df['wh_assortment']=='Yes')),
ss_harcoded, safety_stock_df['safety_stock'])
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'HDOH')
# calculate ROP - add lead time demand to SS
safety_stock_df['reorder_point'] = safety_stock_df.apply(
lambda row: np.round(
row['lead_time_mean'] * row['fcst'] / fcst_weeks / 7),
axis=1) + safety_stock_df['safety_stock']
# calculate OUP - add order_freq demand to ROP
safety_stock_df['order_upto_point'] = (
safety_stock_df['reorder_point'] +
np.round(
np.where(
# if rounding off give 0, increase it to 4-week forecast
(safety_stock_df['reorder_point'] +
safety_stock_df[
'fcst'] * order_freq / fcst_weeks / 7 < 0.5) &
(safety_stock_df['fcst'] > 0),
safety_stock_df['fcst'],
safety_stock_df['fcst'] * order_freq / fcst_weeks / 7))
)
# correction for negative forecast
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['safety_stock'] < 0,
0, safety_stock_df['safety_stock'])
safety_stock_df['reorder_point'] = np.where(
safety_stock_df['reorder_point'] < 0,
0, safety_stock_df['reorder_point'])
safety_stock_df['order_upto_point'] = np.where(
safety_stock_df['order_upto_point'] < 0,
0, safety_stock_df['order_upto_point'])
# correction for OUP atleast 1 greater than ROP
condition = [safety_stock_df['order_upto_point']==0,safety_stock_df['order_upto_point']>safety_stock_df['reorder_point'],safety_stock_df['order_upto_point']<=safety_stock_df['reorder_point']]
choice = [safety_stock_df['order_upto_point'],safety_stock_df['order_upto_point'],safety_stock_df['order_upto_point']+1]
safety_stock_df['order_upto_point'] = np.select(condition,choice)
return safety_stock_df | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_safety_stock_calculation.py | goodaid_safety_stock_calculation.py |
import pandas as pd
import numpy as np
import time
from zeno_etl_libs.django.api import Sql
from zeno_etl_libs.db.db import MySQL
def goodaid_doid_update(data, type_list, db, schema, logger=None, only_gaid=True):
# GA skus to be omitted
ga_sku_query = f"""
select "drug-id" as drug_id
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where d."company-id" = 6984
"""
ga_sku = db.get_df(ga_sku_query)
ga_sku_list = tuple(ga_sku['drug_id'])
# import pdb; pdb.set_trace()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
data = data[['store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']]
data = data.rename(columns={
'corr_min': 'min', 'corr_ss': 'safe_stock', 'corr_max': 'max'})
data = data[data['drug_id'].isin(ga_sku_list)]
mysql = MySQL()
sql = Sql()
for store_id in data['store_id'].unique():
current_ss_query = """
SELECT doid.id, doid.`store-id` , doid.`drug-id` , doid.min,
doid.`safe-stock` , doid.max
FROM `drug-order-info-data` doid
left join drugs d
on d.id = doid.`drug-id`
where doid.`store-id` = {store_id}
and d.`type` in {type_list}
and d.id in {ga_sku_list}
and d.`company-id` = 6984
""".format(store_id=store_id,
type_list=type_list,
ga_sku_list=ga_sku_list,
schema=schema)
mysql.open_connection()
current_ss = pd.read_sql(current_ss_query, mysql.connection)
mysql.close()
current_ss.columns = [c.replace('-', '_') for c in current_ss.columns]
data_store = data.loc[
data['store_id'] == store_id,
['store_id', 'drug_id', 'min', 'safe_stock', 'max']]
# Not let the code erroneously force non-gaid drugs to zero
how = 'outer'
ss_joined = current_ss.merge(
data_store, on=['store_id', 'drug_id'], how=how,
suffixes=('_old', ''))
ss_joined['min'].fillna(0, inplace=True)
ss_joined['safe_stock'].fillna(0, inplace=True)
ss_joined['max'].fillna(0, inplace=True)
new_drug_entries = new_drug_entries.append(
ss_joined[ss_joined['id'].isna()])
ss_joined = ss_joined[~ss_joined['id'].isna()]
logger.info('Mysql upload for store ' + str(store_id))
logger.info('New entries ' + str(
ss_joined[ss_joined['id'].isna()].shape[0]))
ss_joined['flag'] = np.where(
(ss_joined['min_old'] == ss_joined['min']) &
(ss_joined['safe_stock_old'] == ss_joined['safe_stock']) &
(ss_joined['max_old'] == ss_joined['max']),
'values same', 'values changed'
)
ss_to_upload = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['id', 'min', 'safe_stock', 'max']]
logger.info('SS to update only for ' + str(
ss_joined[ss_joined['flag'] != 'values same'].shape[0]))
data_to_be_updated_list = list(ss_to_upload.apply(dict, axis=1))
if len(data_to_be_updated_list) > 0:
chunk_size = 1000
for i in range(0, len(data_to_be_updated_list), chunk_size):
status, msg = sql.update(
{'table': 'DrugOrderInfoData',
'data_to_be_updated': data_to_be_updated_list[i:i+chunk_size]}, logger)
logger.info(f"DrugOrderInfoData update API "
f"count: {min(i+chunk_size, len(data_to_be_updated_list))}, status: {status}, msg: {msg}")
drug_list = str(list(ss_joined.loc[
ss_joined['flag'] == 'values changed', 'drug_id'].unique())
).replace('[', '(').replace(']', ')')
update_test_query = """
SELECT `store-id` , `drug-id` , min , `safe-stock` , max
from `drug-order-info-data` doid
where `store-id` = {store_id}
and `drug-id` in {drug_list}
""".format(store_id=store_id,
drug_list=drug_list,
schema=schema)
time.sleep(15)
mysql.open_connection()
update_test = pd.read_sql(update_test_query, mysql.connection)
mysql.close()
update_test.columns = [c.replace('-', '_') for c in update_test.columns]
update_test = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['store_id', 'drug_id', 'min', 'safe_stock', 'max']].merge(
update_test, on=['store_id', 'drug_id'],
suffixes=('_new', '_prod'))
update_test['mismatch_flag'] = np.where(
(update_test['min_new'] == update_test['min_prod']) &
(update_test['safe_stock_new'] == update_test[
'safe_stock_prod']) &
(update_test['max_new'] == update_test['max_prod']),
'updated', 'not updated'
)
missed_entries = missed_entries.append(
update_test[update_test['mismatch_flag'] == 'not updated'])
logger.info(
'Entries updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'updated'].shape[0]))
logger.info(
'Entries not updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'not updated'].shape[
0]))
return new_drug_entries, missed_entries | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_doid_update_ss.py | goodaid_doid_update_ss.py |
import numpy as np
import pandas as pd
import datetime as dt
from zeno_etl_libs.utils.goodaid_forecast.engine.config_goodaid import (
store_age_limit,
drug_age_limit,
store_col,
drug_col,
key_col,
similar_drug_type
)
class GoodaidloadData:
def load_file(self, db, query):
df = db.get_df(query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def load_all_input(
self,
type_list=None,
store_id_list=None,
last_date=None,
reset_date=None,
schema=None,
db=None
):
drug_list = self.load_file(
query="""
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema),
db=db
)
sales_history = self.load_file(
query="""
select date("created-at") as "sales-date","store-id", "drug-id" ,
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" in {store_id_list}
and s."company-id" = 6984
-- and s."drug-id" = 487502
and date("created-at") >= '{last_date}'
and date("created-at") < '{reset_date}'
group by "store-id", "drug-id", "sales-date"
""".format(
store_id_list=store_id_list, last_date=last_date,
reset_date=reset_date, schema=schema),
db=db
)
cfr_pr = self.load_file(
query=f"""
select cfr."store-id", cfr."drug-id",cfr."shortbook-date",
sum(cfr."loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request" cfr
left join "{schema}".drugs d
on cfr."drug-id" = d.id
where cfr."shortbook-date" >= '{last_date}'
and d."company-id" = 6984
-- and d."id" = 487502
and cfr."shortbook-date" < '{reset_date}'
and cfr."drug-id" <> -1
and (cfr."drug-category" = 'chronic' or cfr."repeatability-index" >= 40)
and cfr."loss-quantity" > 0
and cfr."drug-type" in {type_list}
and cfr."store-id" in {store_id_list}
group by cfr."store-id",cfr."drug-id", cfr."shortbook-date"
""",
db=db
)
calendar = self.load_file(
query="""
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
where date < '{reset_date}'
""".format(schema=schema, reset_date=reset_date),
db=db
)
first_bill_date = self.load_file(
query="""
select "store-id" , min(date("created-at")) as bill_date from "{schema}".sales
where "store-id" in {store_id_list}
group by "store-id"
""".format(schema=schema, store_id_list=store_id_list),
db=db
)
first_store_drug_bill_date = self.load_file(
query="""
select
s."store-id" ,
s."drug-id" ,
min(date(s."created-at")) as "first-store-drug-bill"
from
"{schema}".sales s
where
s."company-id" = 6984
and s."store-id" in {store_id_list}
group by
s."store-id" ,
s."drug-id"
""".format(schema=schema, store_id_list=store_id_list),
db=db
)
wh_goodaid_assortment = self.load_file(
query="""
select
d."id" as "drug-id",
case
when wssm."add-wh" is not null then wssm."add-wh"
else 'No-Entry'
end as "wh-assortment"
from
"{schema}".drugs d
left join "{schema}"."wh-sku-subs-master" wssm
on
d.id = wssm."drug-id"
where
d."type" not in ('discontinued-products','banned')
and d.company = 'GOODAID'
""".format(schema=schema),
db=db
)
similar_drug_mapping = self.load_file(
query="""
select
ducm."drug-id" ,
ducm."group",
d."type"
from
"{schema}"."drug-unique-composition-mapping" ducm
left join "{schema}".drugs d
on
ducm."drug-id" = d.id
""".format(schema=schema),
db=db
)
# Exception handling
date_to_add = (dt.datetime.strptime(reset_date, '%Y-%m-%d') - dt.timedelta(days=8)).strftime('%Y-%m-%d')
date_to_add2 = (dt.datetime.strptime(reset_date, '%Y-%m-%d') - dt.timedelta(days=15)).strftime('%Y-%m-%d')
sales_history_add = pd.DataFrame(columns=sales_history.columns)
first_store_drug_bill_date_add = pd.DataFrame(columns=first_store_drug_bill_date.columns)
for stores in store_id_list:
if stores in (0,'(',')'):
continue
stores = int(stores)
drugs_in_ga_assortment = tuple(map(int, wh_goodaid_assortment['drug_id'].unique()))
drugs_in_sales_history = tuple(map(int, sales_history[sales_history['store_id']==stores]['drug_id'].unique()))
drugs_in_first_store_drug_bill = tuple(map(int, first_store_drug_bill_date[first_store_drug_bill_date['store_id']==stores]['drug_id'].unique()))
drugs_in_assortment_but_not_in_sales_history = tuple(
set(drugs_in_ga_assortment) - set(drugs_in_sales_history))
drugs_in_sales_history_but_not_in_assortment = tuple(
set(drugs_in_sales_history) - set(drugs_in_ga_assortment))
drugs_in_assortment_but_not_in_first_store_drug_bill = tuple(
set(drugs_in_ga_assortment) - set(drugs_in_first_store_drug_bill))
dict = {'sales_date': [date_to_add]*len(drugs_in_assortment_but_not_in_sales_history),
'store_id': [stores]*len(drugs_in_assortment_but_not_in_sales_history),
'drug_id': list(drugs_in_assortment_but_not_in_sales_history),
'net_sales_quantity':1
}
sales_history_add_store = pd.DataFrame(dict)
sales_history_add = pd.concat([sales_history_add_store,sales_history_add],sort = True)
dict = {'sales_date': [date_to_add2]*len(drugs_in_assortment_but_not_in_sales_history),
'store_id': [stores]*len(drugs_in_assortment_but_not_in_sales_history),
'drug_id': list(drugs_in_assortment_but_not_in_sales_history),
'net_sales_quantity':1
}
sales_history_add_store = pd.DataFrame(dict)
sales_history_add = pd.concat([sales_history_add_store, sales_history_add], sort=True)
dict2 = {'store_id':[stores]*len(drugs_in_assortment_but_not_in_first_store_drug_bill),
'drug_id':list(drugs_in_assortment_but_not_in_first_store_drug_bill),
'first_store_drug_bill':[date_to_add2]*len(drugs_in_assortment_but_not_in_first_store_drug_bill)}
first_store_drug_bill_date_add_store = pd.DataFrame(dict2)
first_store_drug_bill_date_add = pd.concat([first_store_drug_bill_date_add_store,first_store_drug_bill_date_add],sort = True)
sales_history_add[['store_id','drug_id','net_sales_quantity']] = sales_history_add[['store_id','drug_id','net_sales_quantity']].astype(int)
sales_history = pd.concat([sales_history,sales_history_add],sort=True)
first_store_drug_bill_date = pd.concat([first_store_drug_bill_date, first_store_drug_bill_date_add], sort=True)
return (
drug_list,
sales_history,
cfr_pr,
calendar,
first_bill_date,
first_store_drug_bill_date,
wh_goodaid_assortment,
similar_drug_mapping,
sales_history_add
)
class Goodaid_data_additional_processing:
def add_ts_id(self, df):
df = df[~df[drug_col].isnull()].reset_index(drop=True)
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def age_bucket_bifurcation(self, df,reset_date):
df['reset_date'] = reset_date
df['reset_date'] = pd.to_datetime(df['reset_date'])
df['bill_date'] = pd.to_datetime(df['bill_date'])
df['first_store_drug_bill'] = pd.to_datetime(df['first_store_drug_bill'])
df['store_age'] = (df['reset_date'] - df['bill_date']).dt.days
df['drug_store_age'] = (df['reset_date'] - df['first_store_drug_bill']).dt.days
conditions = [((df['store_age'] >= store_age_limit) & (df['drug_store_age'] >= drug_age_limit)),
((df['store_age'] >= store_age_limit) & (df['drug_store_age'] < drug_age_limit)),
((df['store_age'] < store_age_limit) & (df['drug_store_age'] >= drug_age_limit)),
((df['store_age'] < store_age_limit) & (df['drug_store_age'] < drug_age_limit))]
choice = ['B1', 'B2', 'B2', 'B2']
df['age_bucket'] = np.select(conditions, choice)
df.drop(columns=['bill_date', 'reset_date', 'first_store_drug_bill', 'store_age'],
inplace=True)
# df.drop(columns=['bill_date', 'reset_date', 'first_store_drug_bill', 'store_age', 'drug_store_age'],
# inplace=True)
return df
def merge_first_store_drug_bill_date(self,sales,first_store_drug_bill_date):
sales = sales.merge(first_store_drug_bill_date, on = [key_col,store_col,drug_col],how = 'left')
return sales
def merge_first_bill_date(self,sales,first_bill_date):
sales = sales.merge(first_bill_date, on='store_id', how='left')
return sales
def age_bucketing(self,first_store_drug_bill_date,sales_pred,sales,reset_date,first_bill_date):
sales = self.merge_first_bill_date(sales,first_bill_date)
sales_pred = self.merge_first_bill_date(sales_pred,first_bill_date)
first_store_drug_bill_date = self.add_ts_id(first_store_drug_bill_date)
sales = self.merge_first_store_drug_bill_date(sales,first_store_drug_bill_date)
sales = self.age_bucket_bifurcation(sales,reset_date)
sales_pred = self.merge_first_store_drug_bill_date(sales_pred,first_store_drug_bill_date)
sales_pred = self.age_bucket_bifurcation(sales_pred,reset_date)
return sales,sales_pred
def merge_assortment(self,sales,wh_goodaid_assortment):
sales = sales.merge(wh_goodaid_assortment,on = drug_col, how = 'left')
return sales
def add_wh_current_assortment(self,sales_pred,sales,wh_goodaid_assortment):
sales = self.merge_assortment(sales,wh_goodaid_assortment)
sales_pred = self.merge_assortment(sales_pred,wh_goodaid_assortment)
return sales, sales_pred
def formatting_column_type_int(self,df,col):
df[col] = df[col].astype(int)
return df
def goodaid_extra_processing_all(self,first_store_drug_bill_date,sales_pred,sales,reset_date,first_bill_date,wh_goodaid_assortment):
sales = self.formatting_column_type_int(sales,store_col)
sales = self.formatting_column_type_int(sales,drug_col)
sales_pred = self.formatting_column_type_int(sales_pred,store_col)
sales_pred = self.formatting_column_type_int(sales_pred,drug_col)
first_store_drug_bill_date = self.formatting_column_type_int(first_store_drug_bill_date,store_col)
first_store_drug_bill_date = self.formatting_column_type_int(first_store_drug_bill_date,drug_col)
first_bill_date = self.formatting_column_type_int(first_bill_date,store_col)
sales, sales_pred = self.age_bucketing(first_store_drug_bill_date,sales_pred,sales,reset_date,first_bill_date)
sales, sales_pred = self.add_wh_current_assortment(sales_pred,sales,wh_goodaid_assortment)
return sales,sales_pred
class b2_goodaid_load_data:
def load_file(self, db, query):
df = db.get_df(query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def load_all_input(
self,
type_list=None,
store_id_list=None,
sales_pred = None,
similar_drug_mapping = None,
last_date=None,
reset_date=None,
schema=None,
db=None
):
drug_list = self.load_file(
query="""
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema),
db=db
)
b2_drugs = tuple(map(int, sales_pred[sales_pred['age_bucket'] == 'B2']['drug_id'].unique()))
group_info = similar_drug_mapping[similar_drug_mapping["drug_id"].isin(b2_drugs)]
groups = tuple(map(str,group_info["group"].unique()))
drug_info = similar_drug_mapping[similar_drug_mapping["group"].isin(groups)]
drug_info = drug_info[drug_info['type'].isin(similar_drug_type)]
similar_drugs = tuple(map(int, drug_info['drug_id'].unique()))
sales_history = self.load_file(
query="""
select date(s."created-at") as "sales-date",s."store-id", d1."drug-id" as "drug-id",
sum(s."net-quantity") as "net-sales-quantity"
from "{schema}".sales s
left join "{schema}"."drug-unique-composition-mapping" d1
on
s."drug-id" = d1."drug-id"
where "store-id" in {store_id_list}
and s."drug-id" in {similar_drugs}
and date(s."created-at") >= '{last_date}'
and date(s."created-at") < '{reset_date}'
group by s."store-id", d1."drug-id", "sales-date"
""".format(similar_drugs=similar_drugs + (0,0),groups = groups ,
store_id_list=store_id_list, last_date=last_date,
reset_date=reset_date, schema=schema),
db=db
)
cfr_pr = self.load_file(
query=f"""
select cfr."store-id", d1."drug-id" as "drug-id",cfr."shortbook-date",
sum(cfr."loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request" cfr
left join "{schema}".drugs d
on cfr."drug-id" = d.id
left join "{schema}"."drug-unique-composition-mapping" d1
on
cfr."drug-id" = d1."drug-id"
where cfr."shortbook-date" >= '{last_date}'
and cfr."drug-id" in {similar_drugs}
and cfr."shortbook-date" < '{reset_date}'
and cfr."drug-id" <> -1
and (cfr."drug-category" = 'chronic' or cfr."repeatability-index" >= 40)
and cfr."loss-quantity" > 0
and cfr."drug-type" in {type_list}
and cfr."store-id" in {store_id_list}
group by cfr."store-id",d1."drug-id", cfr."shortbook-date"
""",
db=db
)
calendar = self.load_file(
query="""
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
where date < '{reset_date}'
""".format(schema=schema, reset_date=reset_date),
db=db
)
return (
drug_list,
sales_history,
cfr_pr,
calendar,
drug_info,
group_info
) | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_data_load.py | goodaid_data_load.py |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
from zeno_etl_libs.utils.ipc2.config_ipc import (
date_col,
target_col,
store_col,
drug_col,
eol_cutoff
)
class Segmentation:
def add_ts_id(self, df):
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[drug_col].astype(int).astype(str)
)
return df
def _calc_abc(self, df52):
B_cutoff = 0.8
C_cutoff = 0.95
D_cutoff = 0.995
tot_sales = (
df52.groupby([
'ts_id'
])[target_col].sum().reset_index()
)
tot_sales.rename(columns={target_col: 'total_LY_sales'}, inplace=True)
tot_sales.sort_values('total_LY_sales', ascending=False, inplace=True)
tot_sales["perc_sales"] = (
tot_sales['total_LY_sales'] / tot_sales['total_LY_sales'].sum()
)
tot_sales["cum_perc_sales"] = tot_sales.perc_sales.cumsum()
tot_sales["ABC"] = "A"
tot_sales.loc[tot_sales.cum_perc_sales > B_cutoff, "ABC"] = "B"
tot_sales.loc[tot_sales.cum_perc_sales > C_cutoff, "ABC"] = "C"
tot_sales.loc[tot_sales.cum_perc_sales > D_cutoff, "ABC"] = "D"
# tot_sales = self.add_ts_id(tot_sales)
return tot_sales[['ts_id', 'ABC', 'total_LY_sales']]
# TODO: lower COV cutoffs
def get_abc_classification(self, df52):
province_abc = df52.groupby(
[store_col]
).apply(self._calc_abc)
province_abc = province_abc[['ts_id', "ABC"]].reset_index(drop=True)
# one
tot_sales = (
df52
.groupby(['ts_id'])[target_col]
.agg(['sum', 'mean'])
.reset_index()
)
tot_sales.rename(
columns={'sum': 'total_LY_sales', 'mean': 'avg_ly_sales'},
inplace=True)
tot_sales = tot_sales.merge(
province_abc,
on=['ts_id'],
how='left'
)
tot_sales = tot_sales.drop_duplicates()
# tot_sales = self.add_ts_id(tot_sales)
tot_sales = tot_sales[['ts_id', 'ABC']]
return tot_sales
def get_xyzw_classification(self, df1):
input_ts_id = df1['ts_id'].unique()
df1 = df1[df1[target_col] > 0]
cov_df = df1.groupby(['ts_id'])[target_col].agg(
["mean", "std", "count", "sum"])
cov_df.reset_index(drop=False, inplace=True)
cov_df['cov'] = np.where(
((cov_df["count"] > 2) & (cov_df["sum"] > 0)),
(cov_df["std"]) / (cov_df["mean"]),
np.nan
)
cov_df['WXYZ'] = 'Z'
cov_df.loc[cov_df['cov'] <= 1.2, 'WXYZ'] = 'Y'
cov_df.loc[cov_df['cov'] <= 0.8, 'WXYZ'] = 'X'
cov_df.loc[cov_df['cov'] <= 0.5, 'WXYZ'] = 'W'
# cov_df = self.add_ts_id(cov_df)
cov_df = cov_df[['ts_id', 'cov', 'WXYZ']]
non_mapped_ts_ids = list(
set(input_ts_id) - set(cov_df['ts_id'].unique())
)
non_mapped_cov = pd.DataFrame({
'ts_id': non_mapped_ts_ids,
'cov': [np.nan] * len(non_mapped_ts_ids),
'WXYZ': ['Z'] * len(non_mapped_ts_ids)
})
cov_df = pd.concat([cov_df, non_mapped_cov], axis=0)
cov_df = cov_df.reset_index(drop=True)
return cov_df
def get_std(self, df1):
input_ts_id = df1['ts_id'].unique()
# df1 = df1[df1[target_col]>0]
std_df = df1.groupby(['ts_id'])[target_col].agg(["std"])
return std_df
def calc_interval_mean(self, x, key):
df = pd.DataFrame({"X": x, "ts_id": key}).reset_index(
drop=True).reset_index()
df = df[df.X > 0]
df["index_shift"] = df["index"].shift(-1)
df["interval"] = df["index_shift"] - df["index"]
df = df.dropna(subset=["interval"])
df['ADI'] = np.mean(df["interval"])
return df[['ts_id', 'ADI']]
def calc_adi(self, df):
# df = self.add_ts_id(df)
logger.info(
'Combinations entering adi: {}'.format(df['ts_id'].nunique()))
dict_of = dict(iter(df.groupby(['ts_id'])))
logger.info("Total tsids in df: {}".format(df.ts_id.nunique()))
logger.info("Total dictionary length: {}".format(len(dict_of)))
list_dict = [
self.calc_interval_mean(dict_of[x][target_col], x) for x in
tqdm(dict_of.keys())
]
data = (
pd.concat(list_dict)
.reset_index(drop=True)
.drop_duplicates()
.reset_index(drop=True)
)
logger.info('Combinations exiting adi: {}'.format(data.ts_id.nunique()))
return data
def get_PLC_segmentation(self, df, mature_cutoff_date, eol_cutoff_date):
df1 = df[df[target_col] > 0]
df1 = df1.groupby(['ts_id']).agg({date_col: [min, max]})
df1.reset_index(drop=False, inplace=True)
df1.columns = [' '.join(col).strip() for col in df1.columns.values]
df1['PLC Status L1'] = 'Mature'
df1.loc[
(df1[date_col + ' min'] > mature_cutoff_date), 'PLC Status L1'
] = 'NPI'
df1.loc[
(df1[date_col + ' max'] <= eol_cutoff_date), 'PLC Status L1'
] = 'EOL'
# df1 = self.add_ts_id(df1)
df1 = df1[['ts_id', 'PLC Status L1']]
return df1
def get_group_mapping(self, seg_df):
seg_df['Mixed'] = seg_df['ABC'].astype(str) + seg_df['WXYZ'].astype(str)
seg_df['Group'] = 'Group3'
group1_mask = seg_df['Mixed'].isin(['AW', 'AX', 'BW', 'BX'])
seg_df.loc[group1_mask, 'Group'] = 'Group1'
group2_mask = seg_df['Mixed'].isin(['AY', 'AZ', 'BY', 'BZ'])
seg_df.loc[group2_mask, 'Group'] = 'Group2'
return seg_df
def calc_dem_pat(self, cov_df, adi_df):
logger.info('Combinations entering calc_dem_pat: {}'.format(
cov_df.ts_id.nunique()))
logger.info('Combinations entering calc_dem_pat: {}'.format(
adi_df.ts_id.nunique()))
df = pd.merge(cov_df, adi_df, how='left', on='ts_id')
df["cov2"] = np.power(df["cov"], 2)
df["classification"] = "Lumpy"
df.loc[
(df.ADI >= 1.32) & (df.cov2 < 0.49), "classification"
] = "Intermittent"
df.loc[
(df.ADI < 1.32) & (df.cov2 >= 0.49), "classification"
] = "Erratic"
df.loc[
(df.ADI < 1.32) & (df.cov2 < 0.49), "classification"
] = "Smooth"
logger.info(
'Combinations exiting calc_dem_pat: {}'.format(df.ts_id.nunique()))
return df[['ts_id', 'classification']]
def get_start_end_dates_df(self, df, key_col, date_col, target_col,
train_max_date, end_date):
start_end_date_df = (
df[df[target_col] > 0]
.groupby(key_col)[date_col]
.agg({'min', 'max'})
.reset_index()
.rename(columns={'min': 'start_date', 'max': 'end_date'})
)
start_end_date_df.loc[
(
start_end_date_df['end_date'] > (
train_max_date - relativedelta(weeks=eol_cutoff)
)
), 'end_date'
] = end_date
return start_end_date_df
def get_weekly_segmentation(self, df, df_sales_daily, train_max_date,
end_date):
df = df[df[date_col] <= train_max_date]
df1 = df[
df[date_col] > (train_max_date - relativedelta(weeks=52))
].copy(deep=True)
df_std = df_sales_daily[
df_sales_daily[date_col] > (train_max_date - relativedelta(days=90))
].copy(deep=True)
df1 = self.add_ts_id(df1)
abc_df = self._calc_abc(df1)
xyzw_df = self.get_xyzw_classification(df1)
std_df = self.get_std(df_std)
adi_df = self.calc_adi(df1)
demand_pattern_df = self.calc_dem_pat(xyzw_df[['ts_id', 'cov']], adi_df)
mature_cutoff_date = train_max_date - relativedelta(weeks=52)
eol_cutoff_date = train_max_date - relativedelta(weeks=13)
plc_df = self.get_PLC_segmentation(df, mature_cutoff_date,
eol_cutoff_date)
start_end_date_df = self.get_start_end_dates_df(
df, key_col='ts_id',
date_col=date_col,
target_col=target_col,
train_max_date=train_max_date,
end_date=end_date
)
seg_df = plc_df.merge(abc_df, on='ts_id', how='outer')
seg_df = seg_df.merge(xyzw_df, on='ts_id', how='outer')
seg_df = seg_df.merge(adi_df, on='ts_id', how='outer')
seg_df = seg_df.merge(demand_pattern_df, on='ts_id', how='outer')
seg_df = seg_df.merge(start_end_date_df, on='ts_id', how='outer')
seg_df = seg_df.merge(std_df, on='ts_id', how='outer')
seg_df = self.get_group_mapping(seg_df)
seg_df['Mixed'] = np.where(seg_df['Mixed']=='nannan', np.nan, seg_df['Mixed'])
drug_class = seg_df[
['ts_id', 'total_LY_sales', 'std', 'cov', 'ABC', 'WXYZ']]
drug_class[[store_col, drug_col]] = drug_class['ts_id'].str.split('_',
expand=True)
drug_class.rename(
columns={'total_LY_sales': 'net_sales', 'std': 'sales_std_dev',
'cov': 'sales_cov', 'ABC': 'bucket_abc',
'WXYZ': 'bucket_xyz'}, inplace=True)
drug_class.drop(columns=['ts_id'], inplace=True)
# seg_df[[store_col, drug_col]] = seg_df['ts_id'].str.split('_', expand = True)
# seg_df.drop(columns=['ts_id'],inplace=True)
# seg_df.rename(columns={'std':'sales_std_dev', 'cov':'sales_cov', 'ABC':'bucket_abcd', 'WXYZ':'bucket_wxyz', 'Mixed':'bucket'}, inplace=True)
# seg_df['PLC Status L1'] = np.where(seg_df['PLC Status L1']=='NPI', 'New_Product', seg_df['PLC Status L1'])
# seg_df['start_date'] = seg_df['start_date'].astype(str)
# seg_df = seg_df[[store_col, drug_col,'PLC Status L1', 'total_LY_sales', 'bucket_abcd', 'bucket_wxyz', 'bucket', 'classification', 'Group', 'sales_std_dev', 'sales_cov', 'ADI', 'start_date' ]]
# seg_df = pd.merge(seg_df, drug_class[[store_col, 'store_name', drug_col, ]])
return seg_df, drug_class | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/goodaid_forecast/engine/goodaid_segmentation.py | goodaid_segmentation.py |
import numpy as np
# Global Queries
Q_REPEATABLE = """
SELECT
id AS "drug-id",
"is-repeatable"
FROM
"{schema}".drugs
WHERE
"is-repeatable" = 1
"""
Q_PTR = """
select
"drug-id",
AVG(ptr) as ptr
FROM
"{schema}"."inventory-1"
GROUP BY
"drug-id"
"""
Q_STORES = """
select
id as "store-id",
name as "store-name"
FROM
"{schema}".stores
"""
Q_DRUG_INFO = """
select
id as "drug-id",
"drug-name",
type,
category
FROM
"{schema}".drugs
"""
# Queries with parameters
def prep_data_from_sql(query_pass, db):
data_fetched = db.get_df(query_pass)
data_fetched.columns = [c.replace('-', '_') for c in data_fetched.columns]
return data_fetched
def query_drug_grade(store_id, schema):
query = """
SELECT
"drug-id",
"drug-grade"
FROM
"{schema}"."drug-order-info"
WHERE
"store-id" = {0}
""".format(store_id, schema=schema)
return query
def query_max_zero(store_id, schema):
query = """
SELECT
"store-id",
"drug-id"
FROM
"{schema}"."drug-order-info"
WHERE
"store-id" = {0}
and max = 0
""".format(store_id, schema=schema)
return query
def query_inventory(store_id, schema):
query = """
SELECT
"store-id",
"drug-id",
SUM(quantity) AS "current-inventory"
FROM
"{schema}"."inventory-1"
WHERE
"store-id" = {0}
GROUP BY
"store-id",
"drug-id"
""".format(store_id, schema=schema)
return query
def get_drug_info(store_id, db, schema):
# Inventory and PTR info for order value
# Also, drug-type and drug-grade
q_inv = query_inventory(store_id, schema)
data_inv = prep_data_from_sql(q_inv, db)
data_ptr = prep_data_from_sql(Q_PTR.format(schema=schema), db)
data_ptr["ptr"] = data_ptr["ptr"].astype(float)
data_drug_info = prep_data_from_sql(Q_DRUG_INFO.format(schema=schema), db)
q_drug_grade = query_drug_grade(store_id, schema)
data_drug_grade = prep_data_from_sql(q_drug_grade, db)
data_stores = prep_data_from_sql(Q_STORES.format(schema=schema), db)
return data_inv, data_ptr, data_drug_info, data_drug_grade, data_stores
def order_value_report(ss_drug_sales):
ss_drug_sales['to_order_quantity'] = np.where(
ss_drug_sales['current_inventory'] < ss_drug_sales['safety_stock'],
ss_drug_sales['max'] - ss_drug_sales['current_inventory'], 0
)
ss_drug_sales['to_order_value'] = (
ss_drug_sales['to_order_quantity'] * ss_drug_sales['ptr'])
order_value = ss_drug_sales.groupby(
['type', 'store_name', 'drug_grade']). \
agg({'to_order_quantity': 'sum', 'to_order_value': 'sum'}). \
reset_index()
return order_value | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/new_stores/helper_functions.py | helper_functions.py |
from datetime import datetime
from datetime import timedelta
from zeno_etl_libs.db.db import PostGre
from scipy.stats import norm
from zeno_etl_libs.utils.new_stores.new_store_stock_triggers import *
from zeno_etl_libs.utils.new_stores.helper_functions import *
def new_stores_ss_calc(store_id, run_date, db, schema, logger):
#####################################################
# Final function for new stores (1-3 months) safety stock
# Combines base and triggers algorithm
#####################################################
# Get demand
data_demand = get_demand(store_id, db, schema)
# Get lead time
data_lt, lt_store_mean, lt_store_std = get_lead_time(store_id, run_date)
# Service level - hardcoded
service_level = 0.95
z = norm.ppf(service_level)
#####################################################
# SS calculation - Base algo
#####################################################
data = ss_calc(data_demand, data_lt, lt_store_mean, lt_store_std, z, db, schema)
data['algo_type'] = 'base'
logger.info("Length Base algo data {}".format(len(data)))
# Max>0
data_forecast_pos = data[data['max'] > 0].copy()
logger.info("Length Base algo forecast positive - data {}".format(len(data_forecast_pos)))
#####################################################
# Triggers
#####################################################
# Put max==0 logic here, and pass those drug-ids, for given store
data_algo_max0 = data[data['max'] == 0][['drug_id']].drop_duplicates()
data_algo_max0_list = data_algo_max0['drug_id'].to_list()
logger.info("Max 0 drugs from base algo, length is {}".format(len(data_algo_max0_list)))
# But this is max0 from base algo, there maybe other max0 in drug-order-info
# Fetching them
# Formatted SQL queries
q_max0 = query_max_zero(store_id, schema)
data_doi_max0 = prep_data_from_sql(q_max0, db)
data_doi_max0 = data_doi_max0[['drug_id']].drop_duplicates()
logger.info("Max 0 drugs from mysql drug-order-info, length is {}".format(len(data_doi_max0)))
# Remove drugs for which forecast is already positive
data_forecast_pos_list = data_forecast_pos['drug_id'].drop_duplicates().to_list()
data_doi_max0_forecast0 = data_doi_max0[~data_doi_max0['drug_id'].isin(data_forecast_pos_list)]
logger.info("Max 0 drugs from mysql drug-order-info, after removing forecast positive,"
"length is {}".format(len(data_doi_max0_forecast0)))
# Append both and take unique
data_doi_max0_forecast0_append = data_doi_max0_forecast0[~data_doi_max0_forecast0['drug_id'].isin(
data_algo_max0_list)]
logger.info("Max 0 drugs from mysql drug-order-info, non overlapping with forecast 0, "
"length is {}".format(len(data_doi_max0_forecast0_append)))
max0_drugs_df = data_algo_max0.append(data_doi_max0_forecast0_append)
max0_drugs_df = max0_drugs_df.drop_duplicates(subset='drug_id')
logger.info("Final Max 0 drugs, length is {}".format(len(max0_drugs_df)))
triggers_data, triggers_summary, \
triggers_store_report = triggers_combined(store_id, run_date,
max0_drugs_df, db, schema)
triggers_data = triggers_data[['drug_id', 'min', 'safety_stock', 'max']]
triggers_data['algo_type'] = 'non_sales_triggers'
# Output to s3 bucket
# triggers_summary.to_csv(output_dir_path + f'triggers_summary_{store_id}_{run_date}.csv',
# index=False)
# triggers_store_report.to_csv(output_dir_path +
# f'triggers_store_report_{store_id}_{run_date}.csv', index=False)
logger.info("Length Triggers algo data raw {}".format(len(triggers_data)))
# Remove those that are already part of base algo and already max>0
drugs_base = data_forecast_pos['drug_id'].drop_duplicates().to_list()
# Overlapping
triggers_data_overlap = triggers_data[triggers_data['drug_id'].isin(drugs_base)]
logger.info("Length triggers algo data overlapping {}".format(len(triggers_data_overlap)))
triggers_data_append = triggers_data[~triggers_data['drug_id'].isin(drugs_base)]
logger.info("Length triggers algo data non-overlapping {}".format(len(triggers_data_append)))
# Append base algo, and triggers algo output
data_final = data_forecast_pos.append(triggers_data_append)
logger.info("Length data final {}".format(len(data_final)))
# Put store id
data_final['store_id'] = store_id
# Final schema
data_final = data_final[['store_id', 'drug_id', 'min', 'safety_stock', 'max', 'algo_type']]
return data_final
def get_demand(store_id, db, schema):
# sales query
q_sales = f"""
select "store-id", "drug-id", date("created-at") as "sales-date",
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" = {store_id}
group by "store-id", "drug-id", "sales-date"
"""
data_s = db.get_df(q_sales)
data_s.columns = [c.replace('-', '_') for c in data_s.columns]
data_s['sales_date'] = pd.to_datetime(data_s['sales_date'])
# cfr pr loss
q_cfr_pr = f"""
select "store-id", "drug-id",
"attributed-loss-date" as "sales-date",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "store-id" = {store_id}
and "drug-id" > 0
and "loss-quantity" > 0
group by "store-id", "drug-id", "attributed-loss-date"
"""
data_cfr_pr = db.get_df(q_cfr_pr)
data_cfr_pr["loss-quantity"] = data_cfr_pr["loss-quantity"].astype(float)
data_cfr_pr['sales-date'] = pd.to_datetime(data_cfr_pr['sales-date'])
data_cfr_pr.columns = [c.replace('-', '_') for c in data_cfr_pr.columns]
# Merge
merge_data = data_s.merge(data_cfr_pr, how='outer', on=['store_id', 'drug_id', 'sales_date'])
for i in ['net_sales_quantity', 'loss_quantity']:
merge_data[i] = merge_data[i].fillna(0)
merge_data['demand_quantity'] = merge_data['net_sales_quantity'] + merge_data['loss_quantity']
data_demand = merge_data.groupby(['drug_id', 'sales_date'])['demand_quantity'].sum().reset_index()
data_demand = data_demand.sort_values(by=['drug_id', 'sales_date'])
return data_demand
def get_lead_time(store_id, run_date):
# Shortbook is normally created after some delay, of actual trigger event
sb_creation_delay_ethical = 1
sb_creation_delay_other = 1
sb_creation_delay_generic = 2
# Fetch data last 'N' days
end_date = str(datetime.strptime(run_date, '%Y-%m-%d') - timedelta(7))
begin_date = str(datetime.strptime(run_date, '%Y-%m-%d') - timedelta(97))
# ==== TEMP READ FROM PG ====
pg = PostGre()
pg.open_connection()
# ===========================
lead_time_query = '''
select
store_id,
drug_id,
drug_type,
status,
created_at,
received_at
from
ops_fulfillment
where
request_type = 'Auto Short'
and store_id = {store_id}
and created_at <= '{end_date}'
and created_at >= '{begin_date}'
and status not in ('failed', 'deleted')
'''.format(
store_id=store_id, end_date=end_date, begin_date=begin_date)
lead_time = pd.read_sql_query(lead_time_query, pg.connection)
# Convert null received at, to true null
lead_time['created_at'] = pd.to_datetime(lead_time['created_at'])
lead_time['received_at'].replace({'0000-00-00 00:00:00': ''}, inplace=True)
lead_time['received_at'] = pd.to_datetime(lead_time['received_at'])
# Calculate lead time
lead_time['lead_time'] = (lead_time['received_at'] -
lead_time['created_at']).astype('timedelta64[h]') / 24
# Missing value impute
lead_time['lead_time'].fillna(7, inplace=True)
# Incorporate delay values
lead_time['lead_time'] = np.select(
[lead_time['drug_type'] == 'generic',
lead_time['drug_type'] == 'ethical'],
[lead_time['lead_time'] + sb_creation_delay_generic,
lead_time['lead_time'] + sb_creation_delay_ethical],
default=lead_time['lead_time'] + sb_creation_delay_other
)
# Store averages
lt_store_mean = round(lead_time.lead_time.mean(), 2)
lt_store_std = round(lead_time.lead_time.std(ddof=0), 2)
# Summarize at drug level
lt_drug = lead_time.groupby('drug_id'). \
agg({'lead_time': [np.mean, np.std]}).reset_index()
lt_drug.columns = ['drug_id', 'lead_time_mean', 'lead_time_std']
# Impute for std missing
lt_drug['lead_time_std'] = np.where(
lt_drug['lead_time_std'].isin([0, np.nan]),
lt_store_std, lt_drug['lead_time_std']
)
# ===== CLOSE PG =====
pg.close_connection()
# ====================
return lt_drug, lt_store_mean, lt_store_std
def ss_calc(data_demand, data_lt, lt_store_mean, lt_store_std, z, db, schema):
# Drug type restrictions if any
q_drugs = f"""
select
id as "drug-id", type
from "{schema}".drugs
"""
# where `type` in ('ethical','generic')
data_drugs = db.get_df(q_drugs)
data_drugs.columns = [c.replace('-', '_') for c in data_drugs.columns]
# Avg and standard deviation demand
data_demand_min_date = data_demand['sales_date'].min()
data_demand_max_date = data_demand['sales_date'].max()
# Create full demand list, across all calendar dates, drug_id level
drugs = data_demand[['drug_id']].drop_duplicates()
dates = pd.DataFrame({'sales_date': pd.date_range(data_demand_min_date, data_demand_max_date, freq='D')})
drugs['key'] = 0
dates['key'] = 0
drug_dates = drugs[['drug_id', 'key']].merge(dates, on='key', how='outer')[['drug_id', 'sales_date']]
data_demand_all = drug_dates.merge(data_demand, how='left', on=['drug_id', 'sales_date'])
data_demand_all['demand_quantity'] = data_demand_all['demand_quantity'].fillna(0)
# Merge with drugs master
data_demand_all = data_demand_all.merge(data_drugs, how='left', on='drug_id')
# Treat outliers
'''
data_demand_all['demand_quantity'] = np.where(data_demand_all['demand_quantity'] > 20,
np.log(data_demand_all['demand_quantity']),
data_demand_all['demand_quantity'])
'''
# Calculate demand mean and std
data_demand_all_mean_std = data_demand_all.groupby(['drug_id', 'type'])['demand_quantity'].agg(
['mean', 'std']).reset_index()
data_demand_all_mean_std = data_demand_all_mean_std.rename(columns={'mean': 'demand_mean',
'std': 'demand_std'})
# Merge with lead time mean and std
data = data_demand_all_mean_std.merge(data_lt, how='left', on='drug_id')
data['lead_time_mean'] = data['lead_time_mean'].fillna(lt_store_mean)
data['lead_time_std'] = data['lead_time_std'].fillna(lt_store_std)
# Safety stock calculation
data['safety_stock'] = np.round(z * np.sqrt(data['lead_time_mean'] * np.square(data['demand_std'])
+ np.square(data['demand_mean']) * np.square(data['lead_time_std'])))
data['reorder_point'] = np.round(data['lead_time_mean'] * data['demand_mean'] + data['safety_stock'])
# Keep 30days stock by default
data['order_upto_point'] = np.round(data['demand_mean'] * 30)
# Adjustment for ethical
data['order_upto_point'] = np.round(np.where(data['type'].isin(['ethical', 'high-value-ethical']),
data['order_upto_point'] * (1 / 2),
data['order_upto_point'] * (2 / 3)))
# Sanity check, order_upto_point (max) to be not less than reorder point
data['order_upto_point'] = np.round(np.where(data['order_upto_point'] < data['reorder_point'],
data['reorder_point'], data['order_upto_point']))
# Where re-order point is 1,2,3 and is same as order_upto_point (max) then do max = max+1
data['order_upto_point'] = np.round(np.where(((data['reorder_point'].isin([1, 2, 3])) &
(data['order_upto_point'] == data['reorder_point'])),
data['order_upto_point'] + 1, data['order_upto_point']))
# order-upto-point 1,2,3 corrections
# Source - ops/ipc/safety_stock
one_index = data[
data['order_upto_point'].isin([1])].index
data.loc[one_index, 'safety_stock'] = 0
data.loc[one_index, 'reorder_point'] = 1
data.loc[one_index, 'order_upto_point'] = 2
two_index = data[
data['order_upto_point'].isin([2])].index
data.loc[two_index, 'safety_stock'] = 0
data.loc[two_index, 'reorder_point'] = 1
data.loc[two_index, 'order_upto_point'] = 2
three_index = data[
data['order_upto_point'].isin([3])].index
data.loc[three_index, 'safety_stock'] = 1
data.loc[three_index, 'reorder_point'] = 2
data.loc[three_index, 'order_upto_point'] = 3
# Where re-order point is >=4 and is same as order_upto_point (max) then do max = 1.5*max
data['order_upto_point'] = np.round(np.where(((data['reorder_point'] >= 4) &
(data['order_upto_point'] == data['reorder_point'])),
data['order_upto_point'] * 1.5, data['order_upto_point']))
# Sanity check for max again
data['order_upto_point'] = np.round(np.where(data['order_upto_point'] < data['reorder_point'],
data['reorder_point'], data['order_upto_point']))
data = data.rename(columns={'safety_stock': 'min',
'reorder_point': 'safety_stock',
'order_upto_point': 'max'})
data = data[['drug_id', 'min', 'safety_stock', 'max']]
return data | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/new_stores/new_stores_ipc.py | new_stores_ipc.py |
from zeno_etl_libs.utils.new_stores.helper_functions import *
import pandas as pd
import numpy as np
def query_search(store_id, schema):
query = f"""
SELECT
id,
"store-id",
"drug-id",
"created-at"
FROM
"{schema}".searches
WHERE
"store-id" = {store_id}
"""
return query
def query_patient_request(store_id, schema):
query = f"""
SELECT
id,
"store-id",
"drug-id",
"quantity",
"created-at"
FROM
"{schema}"."short-book-1"
WHERE
"auto-short" = 0
and "auto-generated" = 0
and "store-id" = {store_id}
"""
return query
def query_manual_short(store_id, schema):
query = f"""
SELECT
id,
"store-id",
"drug-id",
"quantity",
"created-at"
FROM
"{schema}"."short-book-1"
WHERE
"auto-short" = 1
and "home-delivery" = 0
and "patient-id" != 4480
and "store-id" = {store_id}
"""
return query
def query_local_purchase(store_id, schema):
query = f"""
SELECT
i."store-id",
i."drug-id",
i."created-at",
ii."invoice-item-reference",
ii."actual-quantity" as quantity,
ii."net-value" as "lp-value"
FROM
"{schema}"."inventory-1" i
LEFT JOIN
"{schema}"."invoice-items-1" ii ON ii.id = i."invoice-item-id"
WHERE
i."store-id" = {store_id}
AND ii."invoice-item-reference" IS NULL
"""
return query
def query_stock_transfer(store_id, schema):
query = f"""
SELECT
a."source-store",
a."destination-store",
b."inventory-id",
c."drug-id",
b.quantity,
b."received-at"
FROM
"{schema}"."stock-transfers-1" a
INNER JOIN "{schema}"."stock-transfer-items-1" b
on a.id = b."transfer-id"
LEFT JOIN "{schema}"."inventory-1" c
on b."inventory-id" = c.id
WHERE
a."destination-store" = {store_id}
"""
return query
def triggers_combined(store_id, run_date, max0_drugs_df, db, schema):
#############################
# Main consolidated function, for triggers
#############################
# Get formatted SQL queries
q_search = query_search(store_id, schema)
q_pr = query_patient_request(store_id, schema)
q_ms = query_manual_short(store_id, schema)
q_lp = query_local_purchase(store_id, schema)
q_st = query_stock_transfer(store_id, schema)
# Data prep, using SQL
data_merge_c = data_prep_triggers(q_search, q_pr, q_ms, q_lp, q_st, run_date, db, schema)
# Augment with max0 info, current inventory info, ptr info
data_merge_c = data_augment_doi_inv(data_merge_c, store_id, max0_drugs_df, db, schema)
# Rule for which drugs to set max for
data_merge_c = make_keep_col(data_merge_c)
# Some extra filters and final df
# max_set_final is the final df at drug level
max_set_final, max_set_summary, max_set_f_store_summary = final_reset_sku(data_merge_c, db, schema)
return max_set_final, max_set_summary, max_set_f_store_summary
def pre_trigger_data_prep_c(data_pass, run_date):
data = data_pass.copy()
data['created_at'] = pd.to_datetime(data['created_at'])
data_merge = data.copy()
data_merge['day_diff_current'] = (data_merge['created_at'] - pd.to_datetime(run_date)).dt.days
# Last 84days
data_merge_before = data_merge[data_merge['day_diff_current'].between(-84, -1)].copy()
data_merge_before['trigger_date'] = data_merge_before['created_at'].dt.date
# Group, to calculate unique days of trigger, and trigger quantity
data_merge_before_grp = data_merge_before.groupby(['store_id',
'drug_id']).agg({'created_at': 'count',
'trigger_date': 'nunique',
'quantity': 'sum'}).reset_index()
# Rename columns
data_merge_before_grp = data_merge_before_grp.rename(columns={'created_at': 'times_trigger',
'trigger_date': 'days_trigger'})
# Change to integer
for i in ['drug_id', 'quantity']:
data_merge_before_grp[i] = data_merge_before_grp[i].astype(int)
return data_merge_before_grp
def data_prep_triggers(q_search, q_pr, q_ms, q_lp, q_st, run_date, db, schema):
########################################
# Search
########################################
data_search_c = prep_data_from_sql(q_search, db)
data_search_c['quantity'] = 1
data_search_grp_c = pre_trigger_data_prep_c(data_search_c, run_date)
data_search_grp_c = data_search_grp_c.rename(columns={'times_trigger': 'times_searched',
'days_trigger': 'days_searched',
'quantity': 'quantity_searched'})
########################################
# PR
########################################
data_pr_c = prep_data_from_sql(q_pr, db)
data_pr_grp_c = pre_trigger_data_prep_c(data_pr_c, run_date)
data_pr_grp_c = data_pr_grp_c.rename(columns={'times_trigger': 'times_pr',
'days_trigger': 'days_pr',
'quantity': 'quantity_pr'})
########################################
# MS
########################################
data_ms_c = prep_data_from_sql(q_ms, db)
data_ms_grp_c = pre_trigger_data_prep_c(data_ms_c, run_date)
data_ms_grp_c = data_ms_grp_c.rename(columns={'times_trigger': 'times_ms',
'days_trigger': 'days_ms',
'quantity': 'quantity_ms'})
########################################
# LP
########################################
data_lp_c = prep_data_from_sql(q_lp, db)
data_lp_grp_c = pre_trigger_data_prep_c(data_lp_c, run_date)
data_lp_grp_c = data_lp_grp_c.rename(columns={'times_trigger': 'times_lp',
'days_trigger': 'days_lp',
'quantity': 'quantity_lp'})
########################################
# Stock transfer
########################################
data_st_c = prep_data_from_sql(q_st, db)
data_st_c['received_at'] = pd.to_datetime(data_st_c['received_at'], errors='coerce')
data_st_c = data_st_c[~data_st_c['received_at'].isnull()]
# Exclude central stores from source-stores
data_st_c = data_st_c[~data_st_c['source_store'].isin([52, 60, 92, 111])]
data_st_c['store_id'] = data_st_c['destination_store']
data_st_c['created_at'] = data_st_c['received_at']
data_st_grp_c = pre_trigger_data_prep_c(data_st_c, run_date)
data_st_grp_c = data_st_grp_c.rename(columns={'times_trigger': 'times_st',
'days_trigger': 'days_st',
'quantity': 'quantity_st'})
########################################
# Merge all
########################################
data_merge_c = data_search_grp_c.merge(data_pr_grp_c, how='outer', on=['store_id', 'drug_id'])
data_merge_c = data_merge_c.merge(data_ms_grp_c, how='outer', on=['store_id', 'drug_id'])
data_merge_c = data_merge_c.merge(data_lp_grp_c, how='outer', on=['store_id', 'drug_id'])
data_merge_c = data_merge_c.merge(data_st_grp_c, how='outer', on=['store_id', 'drug_id'])
# Fill missing values with 0
data_merge_c = data_merge_c.fillna(0).astype(int)
# Binary columns, which will be used later
for i in ['times_searched', 'times_pr', 'times_ms', 'times_lp', 'times_st']:
data_merge_c[i + '_b'] = np.where(data_merge_c[i] > 0, 1, 0)
# Aggregate
data_merge_c['num_triggers'] = (data_merge_c['times_searched_b'] + data_merge_c['times_pr_b'] +
data_merge_c['times_ms_b'] + data_merge_c['times_lp_b'] +
data_merge_c['times_st_b'])
# Repeatable info merge
data_r = prep_data_from_sql(Q_REPEATABLE.format(schema=schema), db)
data_merge_c = data_merge_c.merge(data_r, how='left', on='drug_id')
data_merge_c['is_repeatable'] = data_merge_c['is_repeatable'].fillna(0)
# Columns about repeat event flags
for i in ['days_searched', 'days_pr', 'days_ms', 'days_lp', 'days_st']:
data_merge_c[i + '_r'] = np.where(data_merge_c[i] > 1, 1, 0)
# Number of repeat triggers sum
data_merge_c['num_repeat_triggers'] = (data_merge_c['days_searched_r'] + data_merge_c['days_pr_r'] +
data_merge_c['days_ms_r'] + data_merge_c['days_lp_r'] +
data_merge_c['days_st_r'])
# Number of non search triggers
data_merge_c['num_repeat_triggers_non_search'] = (data_merge_c['days_pr_r'] + data_merge_c['days_ms_r'] +
data_merge_c['days_lp_r'] + data_merge_c['days_st_r'])
return data_merge_c
def data_augment_doi_inv(data_pass, store_id, max0_drugs_df, db, schema):
# Formatted SQL queries
# q_max0 = query_max_zero(store_id)
q_inv = query_inventory(store_id, schema)
data_merge_c = data_pass.copy()
########################################
# Max0 drugs
########################################
# connection = current_config.mysql_conn()
# data_max0 = prep_data_from_sql(q_max0, connection)
# data_max0['max0'] = 1
# Take max0 from df passed
data_max0 = max0_drugs_df.copy()
data_max0['store_id'] = store_id
data_max0['max0'] = 1
########################################
# Current inventory
########################################
q_inv = query_inventory(store_id, schema=schema)
data_inv = prep_data_from_sql(q_inv, db)
data_inv['curr_inv0'] = np.where(data_inv['current_inventory'] == 0, 1, 0)
########################################
# PTR
########################################
# SQL
data_ptr = prep_data_from_sql(Q_PTR.format(schema=schema), db)
data_ptr["ptr"] = data_ptr["ptr"].astype(float)
# Merge Max info, and impute if not present
data_merge_c = data_merge_c.merge(data_max0, how='inner', on=['store_id', 'drug_id'])
data_merge_c['max0'] = data_merge_c['max0'].fillna(0)
# Merge inventory and impute if not present
data_merge_c = data_merge_c.merge(data_inv, how='left', on=['store_id', 'drug_id'])
data_merge_c['curr_inv0'] = data_merge_c['curr_inv0'].fillna(1)
# Merge PTR and impute an average value if null
data_merge_c = data_merge_c.merge(data_ptr, how='left', on=['drug_id'])
data_merge_c['ptr'] = data_merge_c['ptr'].fillna(67)
# Max0, inv0 both
data_merge_c['max0_inv0'] = data_merge_c['max0'] * data_merge_c['curr_inv0']
return data_merge_c
def make_keep_col(data_pass):
data = data_pass.copy()
# Rule is written here, for if we want to set max for a drug
data['keep'] = np.where(((data['num_triggers'] >= 4) |
((data['num_triggers'] == 3) & (data['num_repeat_triggers'] >= 1)) |
((data['num_triggers'] == 3) & (data['num_repeat_triggers'] == 0) & (
data['is_repeatable'] == 1)) |
((data['num_triggers'] == 2) & (data['num_repeat_triggers'] >= 2)) |
((data['num_triggers'] == 2) & (data['num_repeat_triggers'] == 1) & (
data['is_repeatable'] == 1)) |
((data['num_triggers'] == 2) & (data['num_repeat_triggers'] == 1) & (
data['num_repeat_triggers_non_search'] == 1)) |
((data['num_triggers'] == 1) & (data['num_repeat_triggers'] == 1) & (
data['is_repeatable'] == 1)) |
((data['num_triggers'] == 1) & (data['num_repeat_triggers'] == 1) & (
data['num_repeat_triggers_non_search'] == 1))
),
1, 0)
# Rounding off to 2 decimals
for i in ['max0', 'curr_inv0', 'max0_inv0']:
data[i] = np.round(data[i], 2)
# Columns for order information
data['sku'] = 1
data['keep_sku'] = (data['sku'] * data['keep'] * data['max0']).astype(int)
data['order_sku'] = (data['sku'] * data['keep'] * data['max0_inv0']).astype(int)
data['max_value'] = data['keep_sku'] * data['ptr']
data['order_value'] = data['order_sku'] * data['ptr']
return data
def final_reset_sku(data_pass, db, schema):
data_merge_c = data_pass.copy()
########################################
# Some hardcoded decisions, to control inventory rise
########################################
# Should be revisited later
max_set = data_merge_c[(data_merge_c['keep_sku'] == 1)].copy()
# Summary by triggers
max_set_summary = max_set.groupby(['num_triggers',
'num_repeat_triggers',
'num_repeat_triggers_non_search',
'is_repeatable']).agg({'drug_id': 'count',
'max0': 'mean',
'curr_inv0': 'mean',
'max0_inv0': 'mean'}).reset_index()
max_set_summary = max_set_summary.rename(columns={'drug_id': 'drugs'})
max_set_summary['is_repeatable'] = max_set_summary['is_repeatable'].astype('int')
max_set_summary = max_set_summary.sort_values(by=['num_triggers',
'num_repeat_triggers',
'is_repeatable',
'num_repeat_triggers_non_search'],
ascending=(False, False, False, False))
# Some high value ethical drugs, can increase order value
max_set_f1 = max_set[max_set['ptr'] <= 300].copy()
# Keep only 2+ triggers for now
max_set_f2 = max_set_f1[max_set_f1['num_triggers'] >= 2].copy()
# Stores info merge
# SQL
stores = prep_data_from_sql(Q_STORES.format(schema=schema), db)
max_set_f = max_set_f2.merge(stores, how='left', on='store_id')
# Order summary for store
max_set_f_store_summary = max_set_f.groupby(['store_id', 'store_name'])[
'keep_sku', 'order_sku', 'max_value', 'order_value'].sum().reset_index()
for i in ['max_value', 'order_value']:
max_set_f_store_summary[i] = np.round(max_set_f_store_summary[i], 0).astype(int)
# Min, SS, Max to be set as 0,0,1
# Can be revisited later if policy change or more confidence
max_set_final = max_set_f[['store_id', 'drug_id']].drop_duplicates()
max_set_final['min'] = 0
max_set_final['safety_stock'] = 0
max_set_final['max'] = 1
# 'max_set_final' is the final df, at drug level
# Rest data-frames are summary data-frames
return max_set_final, max_set_summary, max_set_f_store_summary | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/new_stores/new_store_stock_triggers.py | new_store_stock_triggers.py |
import numpy as np
import pandas as pd
def get_ga_composition_sku(db, schema, substition_type=['generic'], logger=None):
''' to get good aid sku and top sku '''
# Good Aid SKU list
ga_sku_query = """
select wh."drug-id" , d.composition
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where wh."add-wh" = 'Yes'
and d."company-id" = 6984
and d.type in {0}
""".format(str(substition_type).replace('[', '(').replace(']', ')'),
schema=schema)
ga_sku = db.get_df(ga_sku_query)
ga_sku.columns = [c.replace('-', '_') for c in ga_sku.columns]
logger.info('GoodAid SKU list ' + str(ga_sku.shape[0]))
# ga_sku_query = '''
# select drug_id, composition
# from good_aid_substitution_sku
# where start_date <= '{}'
# '''.format(current_date)
# pg_connection = current_config.data_science_postgresql_conn()
# ga_sku = pd.read_sql_query(ga_sku_query, pg_connection)
# pg_connection.close()
# logger.info('GoodAid SKU list ' + str(ga_sku.shape[0]))
# Generic Top SKU
ga_active_composition = tuple(ga_sku['composition'].values)
top_sku_query = """
select wh."drug-id" , d.composition
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where wh."add-wh" = 'Yes'
and d."company-id" != 6984
and d.type in {0}
and d.composition in {1}
""".format(str(substition_type).replace('[', '(').replace(']', ')'),
str(ga_active_composition), schema=schema)
top_sku = db.get_df(top_sku_query)
top_sku.columns = [c.replace('-', '_') for c in top_sku.columns]
logger.info('GoodAid comp Top SKU list ' + str(top_sku.shape[0]))
# ga_active_composition = tuple(ga_sku['composition'].values)
# top_sku_query = '''
# select drug_id, composition
# from good_aid_generic_sku
# where active_flag = 'YES'
# and composition in {}
# '''.format(str(ga_active_composition))
# pg_connection = current_config.data_science_postgresql_conn()
# top_sku = pd.read_sql_query(top_sku_query, pg_connection)
# pg_connection.close()
# logger.info('GoodAid comp Top SKU list ' + str(top_sku.shape[0]))
# SS substition for other drugs
rest_sku_query = """
select id as drug_id, composition
from "{schema}".drugs
where composition in {0}
and id not in {1}
and type in {2}
""".format(str(ga_active_composition),
str(tuple(top_sku['drug_id'].values)),
str(substition_type).replace('[', '(').replace(']', ')'),
schema=schema)
rest_sku = db.get_df(rest_sku_query)
logger.info('GoodAid comp rest SKU list ' + str(rest_sku.shape[0]))
return ga_sku, top_sku, rest_sku
def update_ga_ss(safety_stock_df, store_id, db, schema, ga_inv_weight=0.5,
rest_inv_weight=0, top_inv_weight=1,
substition_type=['generic'], min_column='safety_stock',
ss_column='reorder_point', max_column='order_upto_point',
logger=None):
'''updating safety stock for good aid '''
# good aid ss log
good_aid_ss_log = pd.DataFrame()
pre_max_qty = safety_stock_df[max_column].sum()
# get drug list
logger.info('Getting SKU list')
ga_sku, top_sku, rest_sku = get_ga_composition_sku(db, schema,
substition_type,
logger)
# get composition level ss numbers
logger.info('Aggregating composition level SS')
ga_composition = pd.concat([ga_sku, top_sku, rest_sku], axis=0)
columns_list = ['drug_id', 'composition',
min_column, ss_column, max_column]
ga_composition_ss = ga_composition.merge(
safety_stock_df, on='drug_id')[columns_list]
ga_composition_ss_agg = ga_composition_ss.groupby(
['composition'])[min_column, ss_column, max_column].sum(). \
reset_index()
# get index for different drug lists
rest_sku_index = safety_stock_df[
safety_stock_df['drug_id'].isin(rest_sku['drug_id'])].index
top_sku_index = safety_stock_df[
safety_stock_df['drug_id'].isin(top_sku['drug_id'])].index
ga_sku_index = safety_stock_df[
safety_stock_df['drug_id'].isin(ga_sku['drug_id'])].index
logger.info('Updating safety stock')
# logging rest SKU ss from algo
prev_rest_sku_ss = safety_stock_df.loc[rest_sku_index]. \
merge(rest_sku)[columns_list]
prev_rest_sku_ss['sku_type'] = 'rest generic'
good_aid_ss_log = good_aid_ss_log.append(prev_rest_sku_ss)
# setting rest SKU ss
safety_stock_df.loc[rest_sku_index, min_column] = np.round(
rest_inv_weight * safety_stock_df.loc[rest_sku_index, min_column])
safety_stock_df.loc[rest_sku_index, ss_column] = np.round(
rest_inv_weight * safety_stock_df.loc[rest_sku_index, ss_column])
safety_stock_df.loc[rest_sku_index, max_column] = np.round(
rest_inv_weight * safety_stock_df.loc[rest_sku_index, max_column])
# logging rest SKU ss from algo
prev_top_sku_ss = safety_stock_df.loc[top_sku_index]. \
merge(top_sku)[columns_list]
prev_top_sku_ss['sku_type'] = 'top generic'
good_aid_ss_log = good_aid_ss_log.append(prev_top_sku_ss)
# settng top SKU ss
safety_stock_df.loc[top_sku_index, min_column] = np.round(
top_inv_weight * safety_stock_df.loc[top_sku_index, min_column])
safety_stock_df.loc[top_sku_index, ss_column] = np.round(
top_inv_weight * safety_stock_df.loc[top_sku_index, ss_column])
safety_stock_df.loc[top_sku_index, max_column] = np.round(
top_inv_weight * safety_stock_df.loc[top_sku_index, max_column])
# logging goodaid SKU ss from algo
prev_ga_sku_ss = safety_stock_df.loc[ga_sku_index]. \
merge(ga_sku)[columns_list]
prev_ga_sku_ss['sku_type'] = 'good aid'
good_aid_ss_log = good_aid_ss_log.append(prev_ga_sku_ss)
# setting goodaid SKU ss
ga_sku_ss = ga_composition_ss_agg.merge(ga_sku)[columns_list]
ga_sku_ss[min_column] = np.round(ga_inv_weight * ga_sku_ss[min_column])
ga_sku_ss[ss_column] = np.round(ga_inv_weight * ga_sku_ss[ss_column])
ga_sku_ss[max_column] = np.round(ga_inv_weight * ga_sku_ss[max_column])
ss_df_columns = safety_stock_df.columns
safety_stock_df = safety_stock_df.merge(
ga_sku_ss, how='left', on=['drug_id'])
safety_stock_df[min_column] = np.max(
safety_stock_df[[min_column + '_y', min_column + '_x']], axis=1)
safety_stock_df[ss_column] = np.max(
safety_stock_df[[ss_column + '_y', ss_column + '_x']], axis=1)
safety_stock_df[max_column] = np.max(
safety_stock_df[[max_column + '_y', max_column + '_x']], axis=1)
safety_stock_df = safety_stock_df[ss_df_columns]
# updating new good aid skus
ga_sku_new_entries = ga_sku_ss.loc[
~ga_sku_ss['drug_id'].isin(safety_stock_df['drug_id'])]
if len(ga_sku_new_entries) > 0:
ga_sku_new_entries_drug_list = str(
list(ga_sku_new_entries['drug_id'])
).replace('[', '(').replace(']', ')')
ga_sku_drug_info_query = """
select d.id as drug_id, "drug-name" as drug_name, type,
coalesce(doi."drug-grade", 'NA') as drug_grade
from "{schema}".drugs d left join "{schema}"."drug-order-info" doi
on d.id = doi."drug-id"
where d.id in {0}
and doi."store-id" = {1}
""".format(ga_sku_new_entries_drug_list, store_id, schema=schema)
ga_sku_drug_info = db.get_df(ga_sku_drug_info_query)
ga_sku_new_entries = ga_sku_new_entries.merge(ga_sku_drug_info)
# filling the relevant columns
ga_sku_new_entries['model'] = 'NA'
ga_sku_new_entries['bucket'] = 'NA'
ga_sku_new_entries['fcst'] = 0
ga_sku_new_entries['std'] = 0
ga_sku_new_entries['lead_time_mean'] = 0
ga_sku_new_entries['lead_time_std'] = 0
ga_sku_new_entries['correction_flag'] = 'N'
safety_stock_df = safety_stock_df.append(
ga_sku_new_entries)[ss_df_columns]
good_aid_ss_log.insert(
loc=0, column='store_id', value=store_id)
# renaming min/ss/max column name according to the table
good_aid_ss_log.rename(
columns={min_column: 'safety_stock',
ss_column: 'reorder_point',
max_column: 'order_upto_point'},
inplace=True)
post_max_qty = safety_stock_df[max_column].sum()
print('Reduction in max quantity:',
str(round(100 * (1 - post_max_qty / pre_max_qty), 2)) + '%')
return safety_stock_df, good_aid_ss_log | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/goodaid_substitution.py | goodaid_substitution.py |
import numpy as np
import math
import datetime as dt
"""
Steps -
1. Get Auto short total time -> from creation to received at store
2. If marked as lost make it 7 days
3. Max LT capped at 7 days
Recent corrections:
1. AS & MS added (earlier only AS)
2. In case of no history in past 90 days, set default store_lt = 4 days
"""
def lead_time(store_id, cal_sales, reset_date, db, schema, logger=None):
# sb_creation_delay_ethical = 1
# sb_creation_delay_other = 1
# sb_creation_delay_generic = 2
end_date = str((
dt.datetime.strptime(reset_date, '%Y-%m-%d') -
dt.timedelta(7)).date())
begin_date = str(cal_sales.date.dt.date.max() - dt.timedelta(97))
logger.info("Lead Time Calculation Starts")
logger.info(f"SB Begin Date: {begin_date}, SB End Date: {end_date}")
lead_time_query = f"""
select "store-id" , "drug-id" , "type" , status , "created-to-delivery-hour" as "lt-hrs"
from "{schema}"."as-ms" am
where "as-ms" in ('AS', 'MS')
and "store-id" = {store_id}
and date("created-at") <= '{end_date}'
and date("created-at") >= '{begin_date}'
and status not in ('failed', 'deleted')
"""
lead_time = db.get_df(lead_time_query)
lead_time.columns = [c.replace('-', '_') for c in lead_time.columns]
# classify all types into generic, ethical & others
lead_time["type"] = np.where(
lead_time["type"].isin(['ethical', 'high-value-ethical']), 'ethical',
lead_time["type"])
lead_time["type"] = np.where(lead_time["type"].isin(['ethical', 'generic']),
lead_time["type"], 'others')
lead_time["lt_days"] = lead_time["lt_hrs"] / 24
lead_time["lt_days"] = lead_time["lt_days"].fillna(7)
lead_time["lt_days"] = np.where(lead_time["lt_days"] < 1, 1, lead_time["lt_days"]) # min cutoff
lead_time["lt_days"] = np.where(lead_time["lt_days"] > 7, 7, lead_time["lt_days"]) # max cutoff
# add SB creation delay
# lead_time['lt_days'] = np.select(
# [lead_time['type'] == 'generic',
# lead_time['type'] == 'ethical'],
# [lead_time['lt_days'] + sb_creation_delay_generic,
# lead_time['lt_days'] + sb_creation_delay_ethical],
# default=lead_time['lt_days'] + sb_creation_delay_other)
lt_store_mean = round(lead_time.lt_days.mean(), 2)
lt_store_std = round(lead_time.lt_days.std(ddof=0), 2)
# to handle cases where no AS,MS history in past 90 days
if math.isnan(lt_store_mean):
lt_store_mean = 4
if math.isnan(lt_store_std):
lt_store_std = 0
lt_drug = lead_time.groupby('drug_id'). \
agg({'lt_days': [np.mean, np.std]}).reset_index()
lt_drug.columns = ['drug_id', 'lead_time_mean', 'lead_time_std']
lt_drug['lead_time_std'] = np.where(
lt_drug['lead_time_std'].isin([0, np.nan]),
lt_store_std, lt_drug['lead_time_std'])
logger.info("Lead Time Calculation Completed")
return lt_drug, lt_store_mean, lt_store_std | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/lead_time.py | lead_time.py |
import numpy as np
def generic_portfolio(safety_stock_df, db, schema, logger=None):
"""
To keep at least 1 drug in every active generic compositions
"""
comp_drugs_to_keep = get_preference_drugs(db, schema, logger)
# get compositions of all generic drugs in store with OUP>0
all_drugs = tuple(safety_stock_df.loc[
safety_stock_df["order_upto_point"] > 0][
"drug_id"].unique())
q_gen_drugs = f"""
select id as "drug-id", composition
from "{schema}".drugs d
where id in {all_drugs}
and "type" = 'generic'
"""
df_gen_drugs = db.get_df(q_gen_drugs)
df_gen_drugs.columns = [c.replace('-', '_') for c in df_gen_drugs.columns]
df_gen_drugs = df_gen_drugs.loc[df_gen_drugs["composition"] != '']
compostitions_in_store = list(df_gen_drugs["composition"].unique())
# get additional composition-drugs to add
compositon_not_in_store = comp_drugs_to_keep.loc[
~comp_drugs_to_keep["composition"].isin(compostitions_in_store)]
logger.info(f"To keep {compositon_not_in_store.shape[0]} additional "
f"composition-drugs in store")
# drugs to add in current ss table
drugs_to_add = compositon_not_in_store[["drug_id", "std_qty"]]
final_df = safety_stock_df.merge(drugs_to_add, on="drug_id",
how="outer")
# handle NaN columns for additional drugs
final_df["model"] = final_df["model"].fillna('NA')
final_df["bucket"] = final_df["bucket"].fillna('NA')
final_df['fcst'] = final_df['fcst'].fillna(0)
final_df['std'] = final_df['std'].fillna(0)
final_df['lead_time_mean'] = final_df['lead_time_mean'].fillna(0)
final_df['lead_time_std'] = final_df['lead_time_std'].fillna(0)
final_df["safety_stock"] = final_df["safety_stock"].fillna(0)
final_df["reorder_point"] = final_df["reorder_point"].fillna(0)
final_df["order_upto_point"] = final_df["order_upto_point"].fillna(0)
# set OUP=STD_QTY for added drugs
final_df["order_upto_point"] = np.where(final_df["std_qty"].notna(),
final_df["std_qty"],
final_df["order_upto_point"])
final_df = final_df.drop("std_qty", axis=1)
return final_df
def get_preference_drugs(db, schema, logger=None):
"""
Get all active generic compositions in WH and the preferred drugs in that
compositions, the preference order is as follows:
* Choose GAID if available
* Else choose highest selling drug in past 90 days at system level
"""
q_wh_gen_sku = f"""
select wssm."drug-id" , d.composition , d."company-id"
from "{schema}"."wh-sku-subs-master" wssm
left join "{schema}".drugs d on wssm."drug-id" = d.id
where "add-wh" = 'Yes'
and d."type" = 'generic'
"""
df_wh_gen_sku = db.get_df(q_wh_gen_sku)
df_wh_gen_sku.columns = [c.replace('-', '_') for c in df_wh_gen_sku.columns]
# clear drugs with no composition present
df_wh_gen_sku = df_wh_gen_sku.loc[df_wh_gen_sku["composition"] != '']
logger.info(f"Distinct generic compositions in WH: {len(df_wh_gen_sku.composition.unique())}")
logger.info(f"Distinct generic drugs in WH: {df_wh_gen_sku.shape[0]}")
drug_ids = tuple(df_wh_gen_sku.drug_id.unique())
# get past 90 days sales info of the preferred drugs
q_sales = f"""
select "drug-id" , sum("revenue-value") as "gross-sales"
from "{schema}".sales s
where "drug-id" in {drug_ids}
and datediff('day', date("created-at"), CURRENT_DATE ) < 90
and "bill-flag" = 'gross'
group by "drug-id"
"""
df_sales = db.get_df(q_sales)
df_sales.columns = [c.replace('-', '_') for c in df_sales.columns]
df_wh_gen_sku = df_wh_gen_sku.merge(df_sales, on="drug_id", how="left")
df_wh_gen_sku["gross_sales"] = df_wh_gen_sku["gross_sales"].fillna(0)
df_wh_gen_sku["is_gaid"] = np.where(df_wh_gen_sku["company_id"] == 6984, 1, 0)
# order priority: GA, Sales
df_wh_gen_sku = df_wh_gen_sku.sort_values(
by=['composition', 'is_gaid', 'gross_sales'],
ascending=False)
# choose the first preference for every composition
comp_drug_list = df_wh_gen_sku.groupby('composition', as_index=False).agg(
{'drug_id': 'first'})
# get std-qty to keep
q_drug_std_info = f"""
select "drug-id" , "std-qty"
from "{schema}"."drug-std-info" dsi
"""
df_drug_std_info = db.get_df(q_drug_std_info)
df_drug_std_info.columns = [c.replace('-', '_') for c in df_drug_std_info.columns]
comp_drug_list = comp_drug_list.merge(df_drug_std_info, on="drug_id",
how="left")
# fill NA values with defaults
comp_drug_list["std_qty"] = comp_drug_list["std_qty"].fillna(1)
return comp_drug_list | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/store_portfolio_additions.py | store_portfolio_additions.py |
import pandas as pd
import numpy as np
import time
from zeno_etl_libs.django.api import Sql
from zeno_etl_libs.db.db import MySQL
def doid_update(data, type_list, db, schema, logger=None, gaid_omit=True):
# GA skus to be omitted
ga_sku_query = f"""
select "drug-id" as drug_id
from "{schema}"."wh-sku-subs-master" wh
left join "{schema}".drugs d
on d.id = wh."drug-id"
where d."company-id" = 6984
"""
ga_sku = db.get_df(ga_sku_query)
ga_sku_list = tuple(ga_sku['drug_id'])
# import pdb; pdb.set_trace()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
data = data[['store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']]
data = data.rename(columns={
'corr_min': 'min', 'corr_ss': 'safe_stock', 'corr_max': 'max'})
# check if gaid drugs need not to be omitted
if gaid_omit == False:
ga_sku_list = (0, 0)
data = data[~data['drug_id'].isin(ga_sku_list)]
mysql = MySQL()
sql = Sql()
for store_id in data['store_id'].unique():
current_ss_query = """
SELECT doid.id, doid.`store-id` , doid.`drug-id` , doid.min,
doid.`safe-stock` , doid.max
FROM `drug-order-info-data` doid
left join drugs d
on d.id = doid.`drug-id`
where doid.`store-id` = {store_id}
and d.`type` in {type_list}
and d.id not in {ga_sku_list}
""".format(store_id=store_id,
type_list=type_list,
ga_sku_list=ga_sku_list,
schema=schema)
mysql.open_connection()
current_ss = pd.read_sql(current_ss_query, mysql.connection)
mysql.close()
current_ss.columns = [c.replace('-', '_') for c in current_ss.columns]
data_store = data.loc[
data['store_id'] == store_id,
['store_id', 'drug_id', 'min', 'safe_stock', 'max']]
# Not let the code erroneously force non-gaid drugs to zero
how = 'outer'
if not gaid_omit:
how = 'right'
ss_joined = current_ss.merge(
data_store, on=['store_id', 'drug_id'], how=how,
suffixes=('_old', ''))
ss_joined['min'].fillna(0, inplace=True)
ss_joined['safe_stock'].fillna(0, inplace=True)
ss_joined['max'].fillna(0, inplace=True)
new_drug_entries = new_drug_entries.append(
ss_joined[ss_joined['id'].isna()])
ss_joined = ss_joined[~ss_joined['id'].isna()]
logger.info('Mysql upload for store ' + str(store_id))
logger.info('New entries ' + str(
ss_joined[ss_joined['id'].isna()].shape[0]))
ss_joined['flag'] = np.where(
(ss_joined['min_old'] == ss_joined['min']) &
(ss_joined['safe_stock_old'] == ss_joined['safe_stock']) &
(ss_joined['max_old'] == ss_joined['max']),
'values same', 'values changed'
)
ss_to_upload = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['id', 'min', 'safe_stock', 'max']]
logger.info('SS to update only for ' + str(
ss_joined[ss_joined['flag'] != 'values same'].shape[0]))
data_to_be_updated_list = list(ss_to_upload.apply(dict, axis=1))
if len(data_to_be_updated_list) > 0:
chunk_size = 1000
for i in range(0, len(data_to_be_updated_list), chunk_size):
status, msg = sql.update(
{'table': 'DrugOrderInfoData',
'data_to_be_updated': data_to_be_updated_list[i:i+chunk_size]}, logger)
logger.info(f"DrugOrderInfoData update API "
f"count: {min(i+chunk_size, len(data_to_be_updated_list))}, status: {status}, msg: {msg}")
drug_list = str(list(ss_joined.loc[
ss_joined['flag'] == 'values changed', 'drug_id'].unique())
).replace('[', '(').replace(']', ')')
update_test_query = """
SELECT `store-id` , `drug-id` , min , `safe-stock` , max
from `drug-order-info-data` doid
where `store-id` = {store_id}
and `drug-id` in {drug_list}
""".format(store_id=store_id,
drug_list=drug_list,
schema=schema)
time.sleep(15)
mysql.open_connection()
update_test = pd.read_sql(update_test_query, mysql.connection)
mysql.close()
update_test.columns = [c.replace('-', '_') for c in update_test.columns]
update_test = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['store_id', 'drug_id', 'min', 'safe_stock', 'max']].merge(
update_test, on=['store_id', 'drug_id'],
suffixes=('_new', '_prod'))
update_test['mismatch_flag'] = np.where(
(update_test['min_new'] == update_test['min_prod']) &
(update_test['safe_stock_new'] == update_test[
'safe_stock_prod']) &
(update_test['max_new'] == update_test['max_prod']),
'updated', 'not updated'
)
missed_entries = missed_entries.append(
update_test[update_test['mismatch_flag'] == 'not updated'])
logger.info(
'Entries updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'updated'].shape[0]))
logger.info(
'Entries not updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'not updated'].shape[
0]))
return new_drug_entries, missed_entries | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/doid_update_ss.py | doid_update_ss.py |
import numpy as np
def post_processing(store_id, drug_class, weekly_fcst, safety_stock_df,
db, schema, logger):
''' getting drug name, type, grades, store name'''
drug_id_list = tuple(drug_class.drug_id.unique())
drug_info_query = """
select d.id as drug_id, "drug-name" as drug_name, type,
coalesce(doi."drug-grade", 'NA') as drug_grade
from "{schema}".drugs d
left join "{schema}"."drug-order-info" doi
on d.id = doi."drug-id"
where d.id in {0}
and doi."store-id" = {1}
""".format(str(drug_id_list), store_id, schema=schema)
drug_info = db.get_df(drug_info_query)
q_store_name = f""" select name from "{schema}".stores where id = {store_id} """
store_name = db.get_df(q_store_name)['name'][0]
safety_stock_df['store_id'] = store_id
safety_stock_df['store_name'] = store_name
safety_stock_df = safety_stock_df.merge(
drug_info, on='drug_id', how='left')
safety_stock_df['drug_grade'].fillna('NA', inplace=True)
safety_stock_df = safety_stock_df[[
'store_id', 'store_name', 'model', 'drug_id', 'drug_name', 'type',
'drug_grade', 'bucket', 'percentile', 'fcst', 'std',
'lead_time_mean', 'lead_time_std', 'safety_stock', 'reorder_point',
'order_upto_point', 'safety_stock_days', 'reorder_days',
'order_upto_days', 'fptr', 'curr_inventory', 'max_value','correction_flag']]
weekly_fcst['store_id'] = store_id
weekly_fcst['store_name'] = store_name
weekly_fcst = weekly_fcst[['store_id', 'store_name', 'model',
'drug_id', 'date', 'fcst', 'std']]
drug_class['store_id'] = store_id
drug_class['store_name'] = store_name
drug_class = drug_class.merge(
drug_info[['drug_id', 'drug_grade', 'type']], on='drug_id', how='left')
drug_class['drug_grade'].fillna('NA', inplace=True)
drug_class = drug_class[['store_id', 'store_name', 'drug_id', 'drug_grade',
'type', 'net_sales', 'sales_std_dev', 'sales_cov',
'bucket_abc', 'bucket_xyz']]
'''Getting order value'''
safety_stock_df['to_order_quantity'] = np.where(
safety_stock_df['curr_inventory'] <= safety_stock_df['reorder_point'],
safety_stock_df['order_upto_point'] - safety_stock_df['curr_inventory'], 0)
safety_stock_df['to_order_value'] = (
safety_stock_df['to_order_quantity'] * safety_stock_df['fptr'])
order_value = safety_stock_df.pivot_table(
index=['type', 'store_name', 'drug_grade'],
values=['to_order_quantity', 'to_order_value'], aggfunc='sum',
margins=True, margins_name='Total').reset_index()
return drug_class, weekly_fcst, safety_stock_df, order_value | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/post_processing.py | post_processing.py |
import numpy as np
def abc_xyz_classification(cal_drug_sales_monthly, logger=None):
cut_cov = (0.3, 1.0)
cut_sales = (4, 30)
# taking last 12 months data only for classification
n = 12
prev_n_month_dt = cal_drug_sales_monthly[
['month_begin_dt']].drop_duplicates().\
sort_values('month_begin_dt', ascending=False)['month_begin_dt'].\
head(n)
cal_drug_sales_classification = cal_drug_sales_monthly[
cal_drug_sales_monthly.month_begin_dt.isin(prev_n_month_dt)]
print(len(cal_drug_sales_classification))
# monthly averages for classification
drug_class = cal_drug_sales_classification.\
groupby('drug_id').agg({'net_sales_quantity': [np.mean, np.std]}).\
reset_index()
drug_class.columns = ['drug_id', 'net_sales', 'sales_std_dev']
drug_class = drug_class[drug_class['net_sales'] >= 0]
drug_class['sales_cov'] = (
drug_class['sales_std_dev'] /
drug_class['net_sales'])
# assertion error to check all sales positive
assert len(drug_class[
drug_class['net_sales'] < 0]) == 0
# handling infs
drug_class['sales_cov'] = np.where(
drug_class['sales_cov'] == np.inf,
drug_class['sales_std_dev'],
drug_class['sales_cov']
)
# assigning buckets
drug_class['bucket_abc'] = np.select(
[(drug_class['net_sales'] <= cut_sales[0]),
(drug_class['net_sales'] > cut_sales[0]) &
(drug_class['net_sales'] <= cut_sales[1]),
(drug_class['net_sales'] > cut_sales[1])],
['C', 'B', 'A'],
default='NA')
drug_class['bucket_xyz'] = np.select(
[drug_class['sales_cov'] <= cut_cov[0],
(drug_class['sales_cov'] > cut_cov[0]) &
(drug_class['sales_cov'] <= cut_cov[1]),
drug_class['sales_cov'] > cut_cov[1]],
['X', 'Y', 'Z'],
default='NA')
print(drug_class.drug_id.nunique())
# summary
bucket_sales = drug_class.groupby(
['bucket_abc', 'bucket_xyz']).agg(
{'drug_id': 'count', 'net_sales': ['sum', 'mean'],
'sales_cov': 'mean'}).reset_index()
bucket_sales.columns = ['bucket_abc', 'bucket_xyz', 'drug_id', 'net_sales',
'avg_sales_per_drug', 'sales_cov']
bucket_sales['net_sales_frac'] = round(
100*bucket_sales['net_sales']/drug_class.net_sales.sum(), 2)
bucket_sales['drug_frac'] = round(
100*bucket_sales['drug_id']/drug_class.drug_id.nunique(), 2)
bucket_sales['avg_sales_per_drug'] = (
bucket_sales['net_sales']/bucket_sales['drug_id'])
print(bucket_sales)
return drug_class, bucket_sales | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/item_classification.py | item_classification.py |
import datetime
import numpy as np
import pandas as pd
def forecast_data_prep(store_id_list, type_list, reset_date, db, schema,
logger=None, last_date=None, is_wh='N'):
''' FETCHING HISTORICAL SALES AND SALES LOSS DATA '''
if last_date is None:
last_date = datetime.date(day=1, month=4, year=2019)
print('Date range', str(last_date), str(reset_date))
# store list
if type(store_id_list) is not list:
store_id_list = [store_id_list]
store_id_list = str(store_id_list).replace('[', '(').replace(']', ')')
# drug list
drug_list_query = """
select id as drug_id from "{schema}".drugs where type in {0}
""".format(type_list, schema=schema)
drug_list = db.get_df(drug_list_query)
# sales query
sales_query = """
select date("created-at") as "sales-date", "drug-id" ,
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" in {store_id_list}
and date("created-at") >= '{last_date}'
and date("created-at") < '{reset_date}'
group by "sales-date", "drug-id"
""".format(
store_id_list=store_id_list, last_date=last_date,
reset_date=reset_date, schema=schema)
sales_history = db.get_df(sales_query)
sales_history.columns = [c.replace('-', '_') for c in sales_history.columns]
calendar_query = """
select date, year, month, "week-of-year", "day-of-week"
from "{schema}".calendar
""".format(schema=schema)
calendar = db.get_df(calendar_query)
calendar.columns = [c.replace('-', '_') for c in calendar.columns]
sales_history = sales_history.merge(drug_list, how='inner', on='drug_id')
# cfr pr loss
cfr_pr_query = f"""
select "attributed-loss-date", "drug-id",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "shortbook-date" >= '{last_date}'
and "shortbook-date" < '{reset_date}'
and "drug-id" <> -1
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
and "drug-type" in {type_list}
and "store-id" in {store_id_list}
group by "attributed-loss-date", "drug-id"
"""
cfr_pr = db.get_df(cfr_pr_query)
cfr_pr["loss-quantity"] = cfr_pr["loss-quantity"].astype(float)
cfr_pr.columns = [c.replace('-', '_') for c in cfr_pr.columns]
print(sales_history.sales_date.max())
print(cfr_pr.attributed_loss_date.max())
sales_history = sales_history.groupby(
['sales_date', 'drug_id']).sum().reset_index()
# imputing days with no sales with zero sales
sales_history['sales_date'] = pd.to_datetime(sales_history['sales_date'])
sales_history = get_formatted_data(sales_history, 'drug_id', 'sales_date', 'net_sales_quantity')
cfr_pr['attributed_loss_date'] = pd.to_datetime(cfr_pr['attributed_loss_date'])
# total demand merge
sales = sales_history.merge(
cfr_pr, left_on=['sales_date', 'drug_id'],
right_on=['attributed_loss_date', 'drug_id'], how='left')
sales['sales_date'] = sales['sales_date'].combine_first(
sales['attributed_loss_date'])
sales['net_sales_quantity'].fillna(0, inplace=True)
sales['loss_quantity'].fillna(0, inplace=True)
sales['net_sales_quantity'] += sales['loss_quantity']
sales.drop(['attributed_loss_date', 'loss_quantity'], axis=1, inplace=True)
print(sales.drug_id.nunique())
#To get daily demand deviation drugwise
demand_daily_deviation = sales[sales['sales_date'] > pd.to_datetime(reset_date) - datetime.timedelta(days = 29)]
demand_daily_deviation = demand_daily_deviation.groupby('drug_id').std().reset_index()
demand_daily_deviation = demand_daily_deviation.rename(columns={'net_sales_quantity': 'demand_daily_deviation'})
'''
CREATING DAY-DRUG SALES CROSS TABLE
'''
calendar['date'] = pd.to_datetime(calendar['date'])
sales['sales_date'] = pd.to_datetime(sales['sales_date'])
print('Distinct drug count', sales.drug_id.nunique())
print('No of days', sales.sales_date.nunique())
cal_sales_weekly = calendar.loc[
(pd.to_datetime(calendar['date']) >= sales.sales_date.min()) &
(calendar['date'] <= sales.sales_date.max())]
cal_sales_monthly = calendar.loc[
(pd.to_datetime(calendar['date']) >= sales.sales_date.min()) &
(calendar['date'] <= sales.sales_date.max())]
# removing the first week if it has less than 7 days
min_year = cal_sales_weekly.year.min()
x = cal_sales_weekly.loc[(cal_sales_weekly.year == min_year)]
min_month = x.month.min()
x = x.loc[(x.month == min_month)]
min_week = x.week_of_year.min()
if x.loc[x.week_of_year == min_week].shape[0] < 7:
print('removing dates for', min_year, min_month, min_week)
cal_sales_weekly = cal_sales_weekly.loc[
~((cal_sales_weekly.week_of_year == min_week) &
(cal_sales_weekly.year == min_year))]
# removing the latest week if it has less than 7 days
max_year = cal_sales_weekly.year.max()
x = cal_sales_weekly.loc[(cal_sales_weekly.year == max_year)]
max_month = x.month.max()
x = x.loc[(x.month == max_month)]
max_week = x.week_of_year.max()
if x.loc[x.week_of_year == max_week].shape[0] < 7:
print('removing dates for', max_year, max_month, max_week)
cal_sales_weekly = cal_sales_weekly.loc[
~((cal_sales_weekly.week_of_year == max_week) &
(cal_sales_weekly.year == max_year))]
# adding week begin date
cal_sales_weekly['week_begin_dt'] = cal_sales_weekly.apply(
lambda x: x['date'] - datetime.timedelta(x['day_of_week']), axis=1)
cal_sales_weekly['month_begin_dt'] = cal_sales_weekly.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
cal_sales_monthly['week_begin_dt'] = cal_sales_monthly.apply(
lambda x: x['date'] - datetime.timedelta(x['day_of_week']), axis=1)
cal_sales_monthly['month_begin_dt'] = cal_sales_monthly.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
drugs = sales[['drug_id']].drop_duplicates()
drugs['key'] = 1
cal_sales_weekly['key'] = 1
cal_drug_w = drugs.merge(cal_sales_weekly, on='key', how='inner')
cal_drug_w.drop('key', axis=1, inplace=True)
cal_drug_sales_w = cal_drug_w.merge(
sales, left_on=['drug_id', 'date'], right_on=['drug_id', 'sales_date'],
how='left')
del cal_drug_w
cal_drug_sales_w.drop('sales_date', axis=1, inplace=True)
cal_drug_sales_w.net_sales_quantity.fillna(0, inplace=True)
cal_sales_monthly['key'] = 1
cal_drug_m = drugs.merge(cal_sales_monthly, on='key', how='inner')
cal_drug_m.drop('key', axis=1, inplace=True)
cal_drug_sales_m = cal_drug_m.merge(
sales, left_on=['drug_id', 'date'], right_on=['drug_id', 'sales_date'],
how='left')
del cal_drug_m
cal_drug_sales_m.drop('sales_date', axis=1, inplace=True)
cal_drug_sales_m.net_sales_quantity.fillna(0, inplace=True)
# assertion test to check no of drugs * no of days equals total entries
drug_count = cal_drug_sales_w.drug_id.nunique()
day_count = cal_drug_sales_w.date.nunique()
print('Distinct no of drugs', drug_count)
print('Distinct dates', day_count)
print('DF shape', cal_drug_sales_w.shape[0])
# assert drug_count*day_count == cal_drug_sales.shape[0]
# checking for history available and store opening date
first_bill_query = """
select min(date("created-at")) as bill_date from "{schema}"."bills-1"
where "store-id" in {store_id_list}
""".format(schema=schema, store_id_list=store_id_list)
first_bill_date = db.get_df(first_bill_query).values[0][0]
print(first_bill_date)
cal_drug_sales_w = cal_drug_sales_w.query(
'date >= "{}"'.format(first_bill_date))
cal_drug_sales_m = cal_drug_sales_m.query(
'date >= "{}"'.format(first_bill_date))
'''
AGGREGATION AT WEEKLY LEVEL
'''
cal_drug_sales_weekly = cal_drug_sales_w.groupby(
['drug_id', 'week_begin_dt', 'week_of_year']
)['net_sales_quantity'].sum().reset_index()
del cal_drug_sales_w
print(cal_drug_sales_weekly.drug_id.nunique())
# getting drug ids that havent been sold in the last 26 weeks
n = 26
prev_n_week_dt = (
cal_drug_sales_weekly.week_begin_dt.max() - datetime.timedelta(n * 7))
prev_n_week_sales = cal_drug_sales_weekly[
cal_drug_sales_weekly['week_begin_dt'] > prev_n_week_dt]. \
groupby('drug_id')['net_sales_quantity'].sum().reset_index()
prev_no_sales_drug_weekly = prev_n_week_sales.loc[
prev_n_week_sales['net_sales_quantity'] <= 0, 'drug_id'].values
cal_drug_sales_weekly = cal_drug_sales_weekly[
~cal_drug_sales_weekly.drug_id.isin(prev_no_sales_drug_weekly)]
print(cal_drug_sales_weekly.drug_id.nunique())
cal_drug_sales_weekly.rename(
columns={'week_begin_dt': 'date'}, inplace=True)
validation_week = 4
validation_weeks = cal_drug_sales_weekly['date'].drop_duplicates(). \
nlargest(validation_week)
print(validation_weeks)
cal_drug_sales_weekly['sample_flag'] = np.where(
cal_drug_sales_weekly['date'].isin(validation_weeks),
'validation', 'insample')
'''
AGGREGATION AT MONTHLY LEVEL
'''
cal_drug_sales_monthly = cal_drug_sales_m.groupby(
['drug_id', 'month_begin_dt', 'year', 'month']
)['net_sales_quantity'].sum().reset_index()
del cal_drug_sales_m
if is_wh == 'N':
# removing incomplete month's sales
cal_drug_sales_monthly = cal_drug_sales_monthly[
cal_drug_sales_monthly.month_begin_dt != max(
cal_drug_sales_monthly.month_begin_dt)]
# getting drug ids that havent been sold in the 6 months
print(cal_drug_sales_monthly.drug_id.nunique())
n = 6
prev_n_month_dt = cal_drug_sales_monthly[
['month_begin_dt']].drop_duplicates(). \
sort_values('month_begin_dt', ascending=False
)['month_begin_dt'].head(n - 1)
prev_n_month_sales = cal_drug_sales_monthly[
cal_drug_sales_monthly['month_begin_dt'].isin(prev_n_month_dt)]. \
groupby('drug_id')['net_sales_quantity'].sum().reset_index()
prev_no_sales_drug_monthly = prev_n_month_sales.loc[
prev_n_month_sales['net_sales_quantity'] <= 0, 'drug_id'].values
# removing such drugs
cal_drug_sales_monthly = cal_drug_sales_monthly[
(~cal_drug_sales_monthly.drug_id.isin(prev_no_sales_drug_monthly))
]
print(cal_drug_sales_monthly.drug_id.nunique())
if is_wh == 'Y':
return cal_drug_sales_weekly, cal_drug_sales_monthly, cal_sales_weekly, demand_daily_deviation
else:
return cal_drug_sales_weekly, cal_drug_sales_monthly, cal_sales_weekly
def get_formatted_data(df, key_col, date_col, target_col):
df_start = df.groupby([key_col])[date_col].min().reset_index().rename(columns={date_col: 'sales_start'})
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start=min_date,
end=end_date,
freq='d'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = pd.merge(df, df_start, how='left', on=key_col)
df = df[df[date_col] >= df['sales_start']]
return df | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/data_prep.py | data_prep.py |
import pandas as pd
import numpy as np
import time
from scipy.stats import norm
from zeno_etl_libs.utils.ipc.data_prep import forecast_data_prep
from zeno_etl_libs.utils.ipc.item_classification import abc_xyz_classification
from zeno_etl_libs.utils.ipc.forecasting_modules.helper_functions import sum_std,\
applyParallel, applyParallel_lstm
from zeno_etl_libs.utils.ipc.forecasting_modules.lstm import lstm_forecast
from zeno_etl_libs.utils.ipc.forecasting_modules.moving_average import moving_average
from zeno_etl_libs.utils.ipc.forecasting_modules.prophet import prophet_weekly_predict
from zeno_etl_libs.utils.ipc.lead_time import lead_time
from zeno_etl_libs.utils.ipc.safety_stock import safety_stock_calc
def ipc_forecast_reset(
store_id, type_list, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, db, schema,
drug_type_list_v4, v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, chronic_max_flag='N', logger=None):
'''DATA PREPATION'''
cal_drug_sales_weekly, cal_drug_sales_monthly,\
cal_sales = forecast_data_prep(store_id, type_list, reset_date,
db, schema, logger)
'''ITEM CLASSIFICATION'''
drug_class, bucket_sales = abc_xyz_classification(
cal_drug_sales_monthly, logger)
'''FORECASTING'''
forecast_horizon = 4
# LSTM
week_in = 8
week_out = 4
epochs = 200
n_neurons = 8
use_dropout = 0.2
error_factor = 2
lstm_drug_list = drug_class.loc[
(drug_class['bucket_abc'] == 'A') & (drug_class['bucket_xyz'] == 'X') |
(drug_class['bucket_abc'] == 'A') & (drug_class['bucket_xyz'] == 'Y') |
(drug_class['bucket_abc'] == 'B') & (drug_class['bucket_xyz'] == 'X'),
'drug_id']
lstm_data_weekly = cal_drug_sales_weekly.loc[
cal_drug_sales_weekly['drug_id'].isin(lstm_drug_list)]
start = time.time()
lstm_weekly_fcst = applyParallel_lstm(
lstm_data_weekly.groupby('drug_id'), lstm_forecast,
n_neurons=n_neurons, week_in=week_in, week_out=week_out,
forecast_horizon=forecast_horizon, epochs=epochs,
use_dropout=use_dropout, error_factor=error_factor).\
reset_index(drop=True)
end = time.time()
print('Run time ', end-start)
# MOVING AVERAGES
ma_drug_list = drug_class.loc[
(drug_class['bucket_abc'] == 'B') & (drug_class['bucket_xyz'] == 'Y') |
(drug_class['bucket_abc'] == 'B') & (drug_class['bucket_xyz'] == 'Z') |
(drug_class['bucket_abc'] == 'C') & (drug_class['bucket_xyz'] == 'X'),
'drug_id']
ma_data_weekly = cal_drug_sales_weekly.loc[
cal_drug_sales_weekly['drug_id'].isin(ma_drug_list)]
start = time.time()
ma_weekly_fcst = ma_data_weekly.groupby('drug_id').\
apply(moving_average).reset_index(drop=True)
end = time.time()
print('Run time ', end-start)
# PROPHET
prophet_drug_list = drug_class.loc[
(drug_class['bucket_abc'] == 'C') & (drug_class['bucket_xyz'] == 'Y') |
(drug_class['bucket_abc'] == 'C') & (drug_class['bucket_xyz'] == 'Z') |
(drug_class['bucket_abc'] == 'A') & (drug_class['bucket_xyz'] == 'Z'),
'drug_id']
prophet_data_weekly = cal_drug_sales_weekly.loc[
cal_drug_sales_weekly['drug_id'].isin(prophet_drug_list)]
start = time.time()
prophet_weekly_fcst = applyParallel(
prophet_data_weekly.groupby('drug_id'), prophet_weekly_predict).\
reset_index(drop=True)
end = time.time()
print('Run time ', end-start)
'''COMPILING OUTPUT AND PERCENTILE FORECAST'''
columns = ['model', 'drug_id', 'date', 'fcst', 'std']
ma_weekly_fcst['model'] = 'MA'
ma_weekly_fcst = ma_weekly_fcst[columns]
prophet_weekly_fcst['model'] = 'Prophet'
prophet_weekly_fcst = prophet_weekly_fcst[columns]
lstm_weekly_fcst['model'] = 'LSTM'
lstm_weekly_fcst = lstm_weekly_fcst[columns]
weekly_fcst = pd.concat(
[ma_weekly_fcst, prophet_weekly_fcst, lstm_weekly_fcst], axis=0)
percentile_bucket_dict = {
'AX': 0.5, 'AY': 0.5, 'AZ': 0.5,
'BX': 0.5, 'BY': 0.6, 'BZ': 0.6,
'CX': 0.5, 'CY': 0.6, 'CZ': 0.6}
print(weekly_fcst.drug_id.nunique())
weekly_fcst = weekly_fcst.merge(
drug_class[['drug_id', 'bucket_abc', 'bucket_xyz']],
on='drug_id', how='inner')
weekly_fcst['bucket'] = (
weekly_fcst['bucket_abc'] + weekly_fcst['bucket_xyz'])
weekly_fcst.drop(['bucket_abc', 'bucket_xyz'], axis=1, inplace=True)
for key in percentile_bucket_dict.keys():
print(key, percentile_bucket_dict[key])
indexs = weekly_fcst[weekly_fcst.bucket == key].index
weekly_fcst.loc[indexs, 'percentile'] = percentile_bucket_dict[key]
weekly_fcst.loc[indexs, 'fcst'] = np.round(
weekly_fcst.loc[indexs, 'fcst'] +
norm.ppf(percentile_bucket_dict[key]) *
weekly_fcst.loc[indexs, 'std'])
agg_fcst = weekly_fcst.groupby(
['model', 'drug_id', 'bucket', 'percentile']).\
agg({'fcst': 'sum', 'std': sum_std}).reset_index()
'''LEAD TIME CALCULATION'''
lt_drug, lt_store_mean, lt_store_std = lead_time(
store_id, cal_sales, reset_date, db, schema, logger)
'''SAFETY STOCK CALCULATION'''
safety_stock_df, df_corrections, df_corrections_111, \
drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 = safety_stock_calc(
agg_fcst, store_id, forecast_horizon, lt_drug,
lt_store_mean, lt_store_std, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, chronic_max_flag,
v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, drug_type_list_v4, db, schema, logger)
return drug_class, weekly_fcst, safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/forecast_reset.py | forecast_reset.py |
import pandas as pd
import numpy as np
from scipy.stats import norm
from datetime import datetime
from zeno_etl_libs.utils.ipc.heuristics.base import base_heuristics
from zeno_etl_libs.utils.ipc.heuristics.ipcv4_heuristics import ipcv4_heuristics
from zeno_etl_libs.utils.ipc.heuristics.ipcv5_heuristics import v5_corrections
# from scripts.ops.ipc.heuristics.ipcv6_heuristics import v6_corrections
'''
service level - 95%
safety stock = z-score * sqrt(std_lead_time^2 * avg_demand^2 +
avg_lead_time^2 * std_demand^2)
re-order point = avg_lead_time + avg_demand + safety stock
'''
def safety_stock_calc(agg_fcst, store_id, forecast_horizon, lt_drug,
lt_store_mean, lt_store_std, reset_date,
corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
chronic_max_flag, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off,
drug_type_list_v4, db, schema, logger):
service_level = 0.95
fcst_weeks = 4
order_freq = 4
z = norm.ppf(service_level)
print(lt_store_mean, lt_store_std)
safety_stock_df = agg_fcst.merge(
lt_drug[['drug_id', 'lead_time_mean', 'lead_time_std']],
how='left', on='drug_id')
safety_stock_df['lead_time_mean'].fillna(lt_store_mean, inplace=True)
safety_stock_df['lead_time_std'].fillna(lt_store_std, inplace=True)
# heuristics #1
safety_stock_df['lead_time_std'] = np.where(
safety_stock_df['lead_time_std'] < 1,
lt_store_std, safety_stock_df['lead_time_std'])
# safeyty stock value - variation in demand & lead time
safety_stock_df['safety_stock'] = safety_stock_df.apply(
lambda row: np.round(z * np.sqrt(
(row['lead_time_mean'] * np.square(
row['std'] / np.sqrt(fcst_weeks * 7)) +
np.square(row['lead_time_std'] * row['fcst'] / fcst_weeks / 7))
)), axis=1)
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['fcst'] == 0, 0, safety_stock_df['safety_stock'])
# consider avg fulfillment times
safety_stock_df['reorder_point'] = safety_stock_df.apply(
lambda row: np.round(
row['lead_time_mean'] * row['fcst'] / fcst_weeks / 7),
axis=1) + safety_stock_df['safety_stock']
# ordering frequency 7 days
# heuristics #2
safety_stock_df['order_upto_point'] = (
safety_stock_df['reorder_point'] +
np.round(
np.where(
# if rounding off give 0, increase it to 4-week forecast
(safety_stock_df['reorder_point'] +
safety_stock_df[
'fcst'] * order_freq / fcst_weeks / 7 < 0.5) &
(safety_stock_df['fcst'] > 0),
safety_stock_df['fcst'],
safety_stock_df['fcst'] * order_freq / fcst_weeks / 7))
)
# correction for negative forecast
safety_stock_df['safety_stock'] = np.where(
safety_stock_df['safety_stock'] < 0,
0, safety_stock_df['safety_stock'])
safety_stock_df['reorder_point'] = np.where(
safety_stock_df['reorder_point'] < 0,
0, safety_stock_df['reorder_point'])
safety_stock_df['order_upto_point'] = np.where(
safety_stock_df['order_upto_point'] < 0,
0, safety_stock_df['order_upto_point'])
safety_stock_df['safety_stock_days'] = np.round(
7 * forecast_horizon * safety_stock_df['safety_stock'] /
safety_stock_df['fcst'])
safety_stock_df['reorder_days'] = np.round(
7 * forecast_horizon * safety_stock_df['reorder_point'] /
safety_stock_df['fcst'])
safety_stock_df['order_upto_days'] = np.round(
7 * forecast_horizon * safety_stock_df['order_upto_point'] /
safety_stock_df['fcst'])
# heuristics #3
safety_stock_df['order_upto_point'] = np.where(
safety_stock_df['order_upto_days'] < 14,
np.round(14 * safety_stock_df['fcst'] / fcst_weeks / 7),
safety_stock_df['order_upto_point']
)
safety_stock_df['order_upto_days'] = np.round(
7 * forecast_horizon * safety_stock_df['order_upto_point'] /
safety_stock_df['fcst'])
# recent actuals base adjustments
safety_stock_df = base_heuristics(
store_id, safety_stock_df, reset_date, db, schema, logger)
# getting order value
drug_list = list(safety_stock_df['drug_id'].unique())
print(len(drug_list))
drug_str = str(drug_list).replace('[', '(').replace(']', ')')
fptr_query = """
select "drug-id" , avg(ptr) as fptr, sum(quantity) as curr_inventory
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {drug_str}
group by "drug-id"
""".format(store_id=store_id, drug_str=drug_str, schema=schema)
fptr = db.get_df(fptr_query)
fptr.columns = [c.replace('-', '_') for c in fptr.columns]
fptr["fptr"] = fptr["fptr"].astype(float)
final_pred_ss_df = safety_stock_df.merge(fptr, on='drug_id', how='left')
final_pred_ss_df['fptr'].fillna(100, inplace=True)
final_pred_ss_df['max_value'] = (
final_pred_ss_df['fptr'] * final_pred_ss_df['order_upto_point'])
print(final_pred_ss_df.groupby('bucket')['max_value'].sum().reset_index())
print(28 * final_pred_ss_df['order_upto_point'].sum() /
final_pred_ss_df['fcst'].sum())
print(final_pred_ss_df['max_value'].sum())
# correction plugin - Start
if corrections_flag:
final_pred_ss_df['correction_flag'] = 'N'
final_pred_ss_df['store_id'] = store_id
print("corrections code is running now:")
q_prob = f"""select * from "{schema}"."ipc-corrections-rest-cases" """
q_prob_111 = f"""select * from "{schema}"."ipc-corrections-111-cases" """
prob_matrix = db.get_df(q_prob)
df_111 = db.get_df(q_prob_111)
prob_matrix.columns = [c.replace('-', '_') for c in prob_matrix.columns]
df_111.columns = [c.replace('-', '_') for c in df_111.columns]
# list of drugs for which corrections is required. i.e. max value 0.
df_corrections_list = final_pred_ss_df[
final_pred_ss_df['order_upto_point'] == 0][['store_id', 'drug_id']]
df_corrections = pd.merge(
df_corrections_list, prob_matrix, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections = df_corrections.drop(columns={'corrected_max'})
df_corrections['order_upto_point'] = np.round(
df_corrections['current_ma_3_months'])
df_corrections_1 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_less_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections[
(df_corrections['cumm_prob'] >=
corrections_cumulative_probability_cutoff['ma_more_than_2']) &
(df_corrections['current_flag_ma_less_than_2'] == 0)]
df_corrections_1 = df_corrections_1[
(df_corrections_1['selling_probability'] >=
corrections_selling_probability_cutoff['ma_less_than_2']) &
(df_corrections_1['current_flag_ma_less_than_2'] == 1)]
df_corrections_2 = df_corrections_2[
(df_corrections_2['selling_probability'] >=
corrections_selling_probability_cutoff['ma_more_than_2']) &
(df_corrections_2['current_flag_ma_less_than_2'] == 0)]
df_corrections = pd.concat(
[df_corrections_1, df_corrections_2]).reset_index(drop=True)
df_corrections_final = df_corrections.copy()[
['store_id', 'drug_id', 'current_bucket', 'selling_probability',
'cumm_prob', 'current_flag_ma_less_than_2',
'avg_ptr', 'current_ma_3_months']]
# adding run time current inventory
df_corrections_final = pd.merge(
df_corrections_final,
final_pred_ss_df[['store_id', 'drug_id', 'curr_inventory']],
on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df_corrections = df_corrections[
['store_id', 'drug_id', 'order_upto_point']]
df_corrections['reorder_point'] = np.floor(
df_corrections['order_upto_point'] / 2)
df_corrections['safety_stock'] = np.floor(
df_corrections['order_upto_point'] / 4)
df_corrections['correction_flag'] = 'Y'
df_corrections['is_ipc'] = 'Y'
df_corrections = df_corrections.set_index(['store_id', 'drug_id'])
final_pred_ss_df = final_pred_ss_df.set_index(['store_id', 'drug_id'])
final_pred_ss_df.update(df_corrections)
final_pred_ss_df = final_pred_ss_df.reset_index()
df_corrections = df_corrections.reset_index()
df_corrections = pd.merge(
df_corrections, df_corrections_final, on=['store_id', 'drug_id'],
how='left', validate='one_to_one')
# update 111 cases here.
df_corrections_111 = pd.merge(
df_corrections_list, df_111, how='inner',
on=['store_id', 'drug_id'], validate='one_to_one')
df_corrections_111 = df_corrections_111.drop(
columns={'current_inventory', 'original_max', 'corrected_max',
'inv_impact', 'max_impact'}, axis=1)
df_corrections_111['order_upto_point'] = np.round(
df_corrections_111['ma_3_months'])
df_corrections_111['reorder_point'] = np.floor(
df_corrections_111['order_upto_point'] / 2)
df_corrections_111['safety_stock'] = np.floor(
df_corrections_111['order_upto_point'] / 4)
df_corrections_111['correction_flag'] = 'Y'
df_corrections_111['is_ipc'] = 'Y'
# adding run time current inventory
df_corrections_111 = pd.merge(
df_corrections_111,
final_pred_ss_df[['store_id', 'drug_id', 'curr_inventory']],
on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df_corrections_111 = df_corrections_111.set_index(
['store_id', 'drug_id'])
final_pred_ss_df = final_pred_ss_df.set_index(['store_id', 'drug_id'])
final_pred_ss_df.update(df_corrections_111)
final_pred_ss_df = final_pred_ss_df.reset_index()
df_corrections_111 = df_corrections_111.reset_index()
# set reset date
curr_date = str(datetime.now())
df_corrections['reset_date'] = curr_date
df_corrections_111['reset_date'] = curr_date
else:
print('corrections block skipped :')
final_pred_ss_df["store_id"] = store_id
final_pred_ss_df["correction_flag"] = 'N'
df_corrections = pd.DataFrame()
df_corrections_111 = pd.DataFrame()
# Correction plugin - End #
final_pred_ss_df = final_pred_ss_df.drop(['store_id'], axis=1)
# Chronic drug changes
if chronic_max_flag == 'Y':
# based on ME OOS feedback - keep chronic drugs
drug_max_zero = tuple(
final_pred_ss_df.query('order_upto_point == 0')['drug_id'])
# reading chronic drug list
drug_chronic_max_zero_query = '''
select id as drug_id from "{schema}".drugs
where category = 'chronic'
and id in {0}
'''.format(str(drug_max_zero), schema=schema)
drug_chronic_max_zero = db.get_df(drug_chronic_max_zero_query)['drug_id']
# setting non zero max for such drugs
final_pred_ss_df.loc[
(final_pred_ss_df['drug_id'].isin(drug_chronic_max_zero)) &
(final_pred_ss_df['order_upto_point'] == 0),
'order_upto_point'] = 1
final_pred_ss_df.loc[
(final_pred_ss_df['drug_id'].isin(drug_chronic_max_zero)) &
(final_pred_ss_df['order_upto_point'] == 0),
'correction_flag'] = 'Y_chronic'
# Min/SS/Max overlap correction
final_pred_ss_df['safety_stock_days'].fillna(0, inplace=True)
final_pred_ss_df['reorder_days'].fillna(0, inplace=True)
final_pred_ss_df['order_upto_days'].fillna(0, inplace=True)
final_pred_ss_df = ipcv4_heuristics(final_pred_ss_df, drug_type_list_v4, db, schema)
if v5_active_flag == "Y":
logger.info("IPC V5 Correction Starts")
final_pred_ss_df = v5_corrections(store_id, final_pred_ss_df, logger)
logger.info("IPC V5 Correction Successful")
# if v6_active_flag == "Y":
# logger.info("IPC V6 Correction Starts")
# final_pred_ss_df, drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 = \
# v6_corrections(store_id, final_pred_ss_df, reset_date, v6_type_list,
# v6_ptr_cut_off, logger)
#
# # add algo name to v6 write table
# drugs_max_to_lock_ipcv6["algo"] = 'ipc'
# drug_rejects_ipcv6["algo"] = 'ipc'
# logger.info("IPC V6 Correction Successful")
# else:
drugs_max_to_lock_ipcv6 = pd.DataFrame()
drug_rejects_ipcv6 = pd.DataFrame()
return final_pred_ss_df, df_corrections, df_corrections_111, \
drugs_max_to_lock_ipcv6, drug_rejects_ipcv6 | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/safety_stock.py | safety_stock.py |
import datetime
import pandas as pd
import numpy as np
import keras.backend as K
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
import tensorflow as tf
tf.__version__
def ae_weight_calc(y_true, y_pred, pos_error_weight):
# dim of y_pred, y_true [n_batch, output var]
error = y_true - y_pred
greater = K.greater(error, 0)
# 0 for y pred is more, 1 for y_pred is less
greater = K.cast(greater, K.floatx())
greater = greater + pos_error_weight
error = K.abs(error)
error = K.mean(error*greater, axis=1)
return error
def custom_loss(pos_error_weight):
def ae_specific_loss(y_true, y_pred):
return ae_weight_calc(y_true, y_pred, pos_error_weight)
# Returns the (y_true, y_pred) loss function
return ae_specific_loss
# create a differenced series for stationarity
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return pd.Series(diff)
def series_to_supervised(df, n_in=1, n_out=1, dropnan=True):
if type(df) == pd.DataFrame:
data = df[['net_sales_quantity']].values
else:
data = df
data_df = pd.DataFrame(data)
n_vars = 1
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(data_df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(data_df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
def prepare_data(df, n_test=1, n_in=1, n_out=1):
np.random.seed(1234)
# transform into lag and lead
supervised_df = series_to_supervised(df, n_in=n_in, n_out=n_out)
date_df = df[['date']].reset_index(drop=True)
supervised_df = supervised_df.merge(
date_df, how='inner', left_index=True, right_index=True)
# marking test and train
supervised_df['sample_flag'] = ''
supervised_df.iloc[0:-n_test]['sample_flag'] = 'train'
supervised_df.iloc[-n_test:]['sample_flag'] = 'validation'
# transform data to be stationary
raw_values = df[['net_sales_quantity']].values
diff_series = difference(raw_values, 1)
diff_values = diff_series.values
diff_values = diff_values.reshape(len(diff_values), 1)
# rescale values to -1, 1
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_values = scaler.fit_transform(diff_values)
scaled_values = scaled_values.reshape(len(scaled_values), 1)
# transform into supervised learning problem X, y
supervised_scaled_df = series_to_supervised(
scaled_values, n_in=n_in, n_out=n_out)
supervised_scaled_df = supervised_scaled_df.merge(
date_df, how='inner', left_index=True, right_index=True)
# marking test and train for scaled version
supervised_scaled_df['sample_flag'] = ''
supervised_scaled_df.iloc[0:-n_test]['sample_flag'] = 'train'
supervised_scaled_df.iloc[-n_test:]['sample_flag'] = 'validation'
return supervised_df, supervised_scaled_df, scaler
# fit an LSTM network to training data
def fit_lstm(
X, y, n_in=1, n_out=1, n_batch=1, nb_epoch=1000,
n_neurons=4, use_dropout=False, error_factor=1):
# reshape training into [samples, timesteps, features]
X = X.reshape(X.shape[0], 1, X.shape[1])
# design network
model = Sequential()
model.add(LSTM(
n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2]),
stateful=True))
if use_dropout is not False:
model.add(Dropout(use_dropout))
model.add(Dense(y.shape[1]))
loss = custom_loss(error_factor)
model.compile(loss=loss, optimizer='adam')
# print(model.summary())
# model.compile(loss='mean_squared_error', optimizer='adam')
# fit network
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)
model.reset_states()
return model
# make one forecast with an LSTM,
def forecast_lstm(model, X, n_batch):
# reshape input pattern to [samples, timesteps, features]
forecasts = []
# make forecast
for i in range(X.shape[0]):
X_input = X[i, :].reshape(1, n_batch, X.shape[1])
forecast = model.predict(X_input, batch_size=n_batch)
# convert to array
forecasts.append(list(forecast.reshape(forecast.shape[1])))
return forecasts
def inverse_transform(data_df, scaler, undifferenced_df, col_names):
undifferenced_df = undifferenced_df.loc[data_df.index]
for col in undifferenced_df.columns:
if (data_df[col].dtype == float):
data_df[col] = scaler.inverse_transform(data_df[[col]])
data_df[col] += undifferenced_df[col]
col_names = ['var1(t-1)'] + col_names
for i in list((range(1, len(col_names)))):
data_df[col_names[i]] = scaler.inverse_transform(
data_df[[col_names[i]]])
data_df[col_names[i]] += data_df[col_names[i - 1]]
data_df[col_names] = np.round(data_df[col_names])
return data_df
def lstm_horizon_ape(df, col_names):
predicted = df[col_names].sum(axis=1)
actual = df[[x.replace('_hat', '') for x in col_names]].sum(axis=1)
return abs(predicted - actual)/actual
def lstm_forecast(
df, n_neurons=1, week_in=1, week_out=1, forecast_horizon=4, epochs=90,
use_dropout=False, n_batch=1, error_factor=1):
drug_id = df['drug_id'].unique()[0]
start_date = df.date.max()
date_list = [
start_date + datetime.timedelta(days=d*7)
for d in range(1, forecast_horizon+1)]
fcst = [0] * forecast_horizon
# setting seed for reproducibility
np.random.seed(1234)
tf.random.set_seed(1234)
supervised_df, supervised_scaled_df, scaler = prepare_data(
df, n_test=forecast_horizon, n_in=week_in, n_out=4)
train = supervised_scaled_df
_, test, _ = prepare_data(
df, n_test=forecast_horizon, n_in=week_in, n_out=0)
variable_name = list(train.columns)
variable_name = variable_name[:-2]
X_train, y_train = (
train[variable_name].values[:, 0:week_in],
train[variable_name].values[:, week_in:])
X_test = test[variable_name[:week_in]].iloc[-1]
X_test = np.reshape(np.ravel(X_test), (1, X_test.shape[0]))
model = fit_lstm(
X_train, y_train, n_in=week_in, n_out=week_out, n_batch=n_batch,
nb_epoch=epochs, n_neurons=n_neurons, use_dropout=use_dropout,
error_factor=error_factor)
hat_col = variable_name[week_in:]
hat_col = [x + '_hat' for x in hat_col]
scaler_test_fcst = forecast_lstm(model, X_test, n_batch=n_batch)
test_fcst = scaler.inverse_transform(scaler_test_fcst)
test_fcst = np.ravel(test_fcst)
for i in range(len(test_fcst)):
fcst[i] = df.net_sales_quantity.iloc[-1] + np.sum(test_fcst[:i])
if fcst[i] < 0:
fcst[i] = 0
fcst_df = pd.DataFrame({
'drug_id': drug_id, 'date': date_list, 'fcst': np.round(fcst),
'std': np.round(df.net_sales_quantity.iloc[-8:].std())})
return fcst_df
def lstm_wide_long(df, supervised_hat, hat_col):
drug_id = df['drug_id'].values[0]
supervised_hat = supervised_hat[supervised_hat['drug_id'] == drug_id]
return_df = df.copy()
fcst = (
list(supervised_hat.iloc[:-1][hat_col[0]].values) +
list(supervised_hat.iloc[-1][hat_col].values))
return_df.loc[-len(fcst):, 'fcst'] = pd.Series(
fcst, index=df.index[-len(fcst):])
return return_df
def hinge_error(error):
return sum(error < 0) | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/forecasting_modules/lstm.py | lstm.py |
from ast import literal_eval
import pandas as pd
import numpy as np
def ipcv4_heuristics(final_pred_ss_df, drug_type_list_v4, db, schema):
''' drug_tupe_list_v4 variable has format as
drug_type_list_v4 = {'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',
'ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',
'others':'{0:[0,0,0], 1:[0,0,2], 2:[0,1,2],3:[1,2,3]}'}
final_pred_ss_df has the following format Index(['drug_id', 'model', 'bucket', 'percentile', 'fcst', 'std',
'lead_time_mean', 'lead_time_std', 'safety_stock', 'reorder_point',
'order_upto_point', 'safety_stock_days', 'reorder_days',
'order_upto_days', 'fptr', 'curr_inventory', 'max_value',
'correction_flag'],
dtype='object')
'''
q_drug_type_info = f""" select id as drug_id, "type" as drug_type from "{schema}".drugs """
drug_type_info = db.get_df(q_drug_type_info)
#convert drug types which are not generic or ethical as 'others'
drug_type_info['drug_type'] = np.where(
(drug_type_info['drug_type'] == 'ethical') | (drug_type_info['drug_type'] == 'generic'),
drug_type_info['drug_type'],'others')
final_pred_ss_df_v4 = pd.merge(final_pred_ss_df, drug_type_info, on=['drug_id'], how='left')
for drug_type in drug_type_list_v4.keys():
for max_value, ops_value in literal_eval(drug_type_list_v4[drug_type]).items():
safety_stock = ops_value[0] # min value
reorder_point = ops_value[1] # ss value
order_upto_point = ops_value[2] # max value
index_list = final_pred_ss_df_v4[
(final_pred_ss_df_v4['order_upto_point'].isin([max_value])) & (
final_pred_ss_df_v4['drug_type'] == drug_type)].index
final_pred_ss_df_v4.loc[index_list, 'safety_stock'] = safety_stock
final_pred_ss_df_v4.loc[index_list, 'reorder_point'] = reorder_point
final_pred_ss_df_v4.loc[index_list, 'order_upto_point'] = order_upto_point
print('Cases with {0} max: {1} for drug_type:{2} '.format(max_value, len(index_list), drug_type))
#remove the drug type column that was previously added
final_pred_ss_df_v4 = final_pred_ss_df_v4.drop(['drug_type'], axis = 1)
return final_pred_ss_df_v4 | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/heuristics/ipcv4_heuristics.py | ipcv4_heuristics.py |
import numpy as np
from datetime import datetime, timedelta
def get_demand_heuristics(start_date, end_date, drug_list, store_id,
db, schema, logger):
# sales query
print('getting data for store', store_id)
sales_query = f"""
select date("created-at") as "sales-date", "drug-id" ,
sum("net-quantity") as "net-sales-quantity"
from "{schema}".sales s
where "store-id" = {store_id}
and date("created-at") >= '{start_date}'
and date("created-at") < '{end_date}'
and "drug-id" in {drug_list}
group by "sales-date", "drug-id"
"""
sales_history = db.get_df(sales_query)
sales_history.columns = [c.replace('-', '_') for c in sales_history.columns]
# cfr pr loss
cfr_pr_query = f"""
select "attributed-loss-date", "drug-id",
sum("loss-quantity") as "loss-quantity"
from "{schema}"."cfr-patient-request"
where "shortbook-date" >= '{start_date}'
and "shortbook-date" < '{end_date}'
and "drug-id" <> -1
and ("drug-category" = 'chronic' or "repeatability-index" >= 40)
and "loss-quantity" > 0
and "drug-id" in {drug_list}
and "store-id" = {store_id}
group by "attributed-loss-date", "drug-id"
"""
cfr_pr = db.get_df(cfr_pr_query)
cfr_pr["loss-quantity"] = cfr_pr["loss-quantity"].astype(float)
cfr_pr.columns = [c.replace('-', '_') for c in cfr_pr.columns]
# total demand merge
demand = sales_history.merge(
cfr_pr, left_on=['sales_date', 'drug_id'],
right_on=['attributed_loss_date', 'drug_id'], how='left')
demand['sales_date'] = demand['sales_date'].combine_first(
demand['attributed_loss_date'])
demand['net_sales_quantity'].fillna(0, inplace=True)
demand['loss_quantity'].fillna(0, inplace=True)
demand['net_sales_quantity'] += demand['loss_quantity']
demand.drop(
['attributed_loss_date', 'loss_quantity'], axis=1, inplace=True)
# aggregating demand at level
demand_agg = demand.groupby(
['drug_id'])['net_sales_quantity'].sum().reset_index()
demand_agg.columns = ['drug_id', 'historical_demand']
# getting drug type
drug_type_query = """
select id as drug_id, type as drug_type
from "{schema}".drugs
where id in {0}
""".format(drug_list, schema=schema)
drug_type = db.get_df(drug_type_query)
demand_agg = demand_agg.merge(drug_type, on=['drug_id'], how='left')
return demand_agg
def base_heuristics(
store_id, safety_stock_df, reset_date, db, schema, logger=None,
raf_range=(0.25, 0.75), corr_raf=0.5):
# getting time period for last 4 weeks
date = datetime.strptime(reset_date, '%Y-%m-%d')
end_date = (date - timedelta(days=date.weekday())).date()
start_date = end_date - timedelta(days=28)
end_date = str(end_date)
start_date = str(start_date)
logger.info(
'Getting last 4 week data for base heuristic from' + start_date +
'to' + end_date)
# getting demand for heuristics - A/B class only
bucket_class_list = ['AX', 'AY', 'AZ', 'BX', 'BY', 'BZ']
drug_list = tuple(list(safety_stock_df.loc[
safety_stock_df.bucket.isin(bucket_class_list),
'drug_id']))
demand = get_demand_heuristics(
start_date, end_date, drug_list, store_id, db, schema, logger)
safety_stock_adj = safety_stock_df.merge(
demand, how='left', on=['drug_id'])
safety_stock_adj['historical_demand'].fillna(0, inplace=True)
# RAF factor calculation
safety_stock_adj['raf'] = np.select(
[safety_stock_adj['historical_demand'] == 0],
[0.5],
default=safety_stock_adj['order_upto_point'] /
safety_stock_adj['historical_demand'])
# adjustment using RAF: for low
low_raf_index = safety_stock_adj[
(safety_stock_adj['bucket'].isin(bucket_class_list)) &
(safety_stock_adj['raf'] < raf_range[0])
].index
safety_stock_adj.loc[low_raf_index, 'order_upto_point'] = np.round(
np.where(
safety_stock_adj.loc[low_raf_index, 'order_upto_point'] == 0,
safety_stock_adj.loc[low_raf_index, 'historical_demand']*corr_raf,
(safety_stock_adj.loc[low_raf_index, 'order_upto_point']*corr_raf /
safety_stock_adj.loc[low_raf_index, 'raf'])
))
safety_stock_adj.loc[low_raf_index, 'reorder_point'] = np.round(
safety_stock_adj.loc[low_raf_index, 'order_upto_point']/2)
safety_stock_adj.loc[low_raf_index, 'safety_stock'] = np.round(
safety_stock_adj.loc[low_raf_index, 'reorder_point']/2)
# print(safety_stock_adj.head())
# adjustment using RAF: for high
high_raf_index = safety_stock_adj[
(safety_stock_adj['bucket'].isin(bucket_class_list)) &
(safety_stock_adj['raf'] > raf_range[1])
].index
safety_stock_adj.loc[high_raf_index, 'order_upto_point'] = np.round(
safety_stock_adj.loc[high_raf_index, 'order_upto_point'] *
corr_raf / safety_stock_adj['raf'])
safety_stock_adj.loc[high_raf_index, 'reorder_point'] = np.round(
safety_stock_adj.loc[high_raf_index, 'order_upto_point']/2)
safety_stock_adj.loc[high_raf_index, 'safety_stock'] = np.round(
safety_stock_adj.loc[high_raf_index, 'reorder_point']/2)
logger.info(
'Out of total line items ' + str(len(safety_stock_adj)) + '\n' +
'Decreased: Total ' + str(len(high_raf_index)) + '\n' +
'Decreased: Generic ' +
str(len(safety_stock_adj.iloc[high_raf_index].
query('drug_type == "generic"'))) + '\n' +
'Decreased: Ethical ' +
str(len(safety_stock_adj.iloc[high_raf_index].
query('drug_type == "ethical"'))) + '\n' +
'Increased: Total ' + str(len(low_raf_index)) + '\n' +
'Increased: Generic ' +
str(len(safety_stock_adj.iloc[low_raf_index].
query('drug_type == "generic"'))) + '\n' +
'Increased: Ethical ' +
str(len(safety_stock_adj.iloc[low_raf_index].
query('drug_type == "ethical"')))
)
return safety_stock_adj[safety_stock_df.columns] | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/heuristics/base.py | base.py |
import pandas as pd
import numpy as np
import datetime as dt
def v5_corrections(store_id, safety_stock_df, db, schema, logger):
"""
Main function to perform V5 corrections
"""
# Get Drug STD Qty and list of repeatable drug_ids
df_3m_drugs, unique_drugs_3m = get_3m_drug_std_qty(store_id, db, schema, logger)
# Locate drugs to perform correction check
df_std_check = safety_stock_df.loc[safety_stock_df["drug_id"].isin(
unique_drugs_3m)][["drug_id", "fcst", "safety_stock", "reorder_point", "order_upto_point"]]
# Drugs not forecasted by IPC
drugs_3m_not_set = list(set(unique_drugs_3m) ^ set(df_std_check["drug_id"].unique()))
logger.info(f"Number of drugs not forecasted: {len(drugs_3m_not_set)}")
# Merge STD Qty with SS table and find drugs correction areas
df_std_check = df_3m_drugs.merge(df_std_check, on="drug_id", how="left")
df_std_check = df_std_check.dropna()
df_std_check["rop>=std_qty"] = np.where(
df_std_check["reorder_point"] >= df_std_check["std_qty"], "Y", "N")
tot_rep_drugs = df_std_check.shape[0]
corr_req = df_std_check.loc[df_std_check['rop>=std_qty'] == 'N'].shape[0]
corr_not_req = df_std_check.loc[df_std_check['rop>=std_qty'] == 'Y'].shape[0]
logger.info(f"Number of repeatable drugs: {tot_rep_drugs}")
logger.info(f"Number of repeatable drugs corrections required: {corr_req}")
logger.info(f"Number of repeatable drugs corrections not required: {corr_not_req}")
# CORRECTION STARTS
order_freq = 4
column_order = list(df_std_check.columns)
column_order += ["corr_ss", "corr_rop", "corr_oup"]
# CASE1: No changes required
df_no_change = df_std_check.loc[df_std_check["rop>=std_qty"] == "Y"].copy()
df_no_change["corr_ss"] = df_no_change["safety_stock"].astype(int)
df_no_change["corr_rop"] = df_no_change["reorder_point"].astype(int)
df_no_change["corr_oup"] = df_no_change["order_upto_point"].astype(int)
# CASE2: SS & ROP & OUP is Non Zero
df_change1 = df_std_check.loc[(df_std_check["rop>=std_qty"] == "N") &
(df_std_check["safety_stock"] != 0) &
(df_std_check["reorder_point"] != 0) &
(df_std_check["order_upto_point"] != 0)].copy()
df_change1["mul_1"] = df_change1["reorder_point"] / df_change1["safety_stock"]
df_change1["mul_2"] = df_change1["order_upto_point"] / df_change1["reorder_point"]
df_change1["corr_rop"] = df_change1["std_qty"]
df_change1["corr_ss"] = np.ceil(df_change1["corr_rop"] / df_change1["mul_1"]).astype(int)
# If ROP >= OUP, then in those cases, increase OUP.
df_change11 = df_change1.loc[
df_change1["corr_rop"] >= df_change1["order_upto_point"]].copy()
df_change12 = df_change1.loc[
df_change1["corr_rop"] < df_change1["order_upto_point"]].copy()
df_change11["corr_oup"] = np.ceil(df_change11["corr_rop"] + (
df_change11["fcst"] * order_freq / 28)).astype(int)
df_change12["corr_oup"] = np.ceil(df_change12["corr_rop"] + (
df_change12["fcst"] * order_freq / 28)).astype(int)
df_change1 = df_change11.append(df_change12)
df_change1 = df_change1[column_order]
# CASE3: Any of SS & ROP & OUP is Zero
df_change2 = df_std_check.loc[(df_std_check["rop>=std_qty"] == "N")].copy()
df_change2 = df_change2.loc[~((df_change2["safety_stock"] != 0) &
(df_change2["reorder_point"] != 0) &
(df_change2["order_upto_point"] != 0))].copy()
df_change2["corr_rop"] = df_change2["std_qty"].astype(int)
df_change2["corr_ss"] = np.floor(df_change2["corr_rop"] / 2).astype(int)
# If ROP >= OUP, then in those cases, increase OUP.
df_change21 = df_change2.loc[
df_change2["corr_rop"] >= df_change2["order_upto_point"]].copy()
df_change22 = df_change2.loc[
df_change2["corr_rop"] < df_change2["order_upto_point"]].copy()
df_change21["corr_oup"] = np.ceil(df_change21["corr_rop"] + (
df_change21["fcst"] * order_freq / 28)).astype(int)
df_change22["corr_oup"] = np.ceil(df_change22["corr_rop"] + (
df_change22["fcst"] * order_freq / 28)).astype(int)
df_change2 = df_change21.append(df_change22)
df_change2 = df_change2[column_order]
# Combine all 3 cases
df_corrected = df_no_change.append(df_change1)
df_corrected = df_corrected.append(df_change2)
df_corrected = df_corrected.sort_index(ascending=True)
# Get DF of corrected drugs and merge with input DF
df_corrected_to_merge = df_corrected.loc[df_corrected["rop>=std_qty"] == "N"][
["drug_id", "corr_ss", "corr_rop", "corr_oup"]]
corr_safety_stock_df = safety_stock_df.merge(df_corrected_to_merge,
on="drug_id", how="left")
# Make corrections for required drugs
corr_safety_stock_df["safety_stock"] = np.where(
corr_safety_stock_df["corr_ss"] >= 0, corr_safety_stock_df["corr_ss"],
corr_safety_stock_df["safety_stock"])
corr_safety_stock_df["reorder_point"] = np.where(
corr_safety_stock_df["corr_rop"] >= 0, corr_safety_stock_df["corr_rop"],
corr_safety_stock_df["reorder_point"])
corr_safety_stock_df["order_upto_point"] = np.where(
corr_safety_stock_df["corr_oup"] >= 0, corr_safety_stock_df["corr_oup"],
corr_safety_stock_df["order_upto_point"])
corr_safety_stock_df.drop(["corr_ss", "corr_rop", "corr_oup"], axis=1, inplace=True)
corr_safety_stock_df["max_value"] = corr_safety_stock_df["order_upto_point"] * \
corr_safety_stock_df["fptr"]
assert safety_stock_df.shape == corr_safety_stock_df.shape
# Evaluate PRE and POST correction
pre_post_metrics = {
"metric": ["pre_corr", "post_corr"],
"ss_qty": [safety_stock_df["safety_stock"].sum(),
corr_safety_stock_df["safety_stock"].sum()],
"ss_val": [round((safety_stock_df["safety_stock"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["safety_stock"] * corr_safety_stock_df["fptr"]).sum(), 2)],
"rop_qty": [safety_stock_df["reorder_point"].sum(), corr_safety_stock_df["reorder_point"].sum()],
"rop_val": [round((safety_stock_df["reorder_point"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["reorder_point"] * corr_safety_stock_df["fptr"]).sum(), 2)],
"oup_qty": [safety_stock_df["order_upto_point"].sum(), corr_safety_stock_df["order_upto_point"].sum()],
"oup_val": [round((safety_stock_df["order_upto_point"] * safety_stock_df["fptr"]).sum(), 2),
round((corr_safety_stock_df["order_upto_point"] * corr_safety_stock_df["fptr"]).sum(), 2)]
}
pre_post_metics_df = pd.DataFrame.from_dict(pre_post_metrics).set_index('metric').T
pre_post_metics_df["delta"] = pre_post_metics_df["post_corr"] - pre_post_metics_df["pre_corr"]
pre_post_metics_df["change%"] = round((pre_post_metics_df["delta"] / pre_post_metics_df["pre_corr"]) * 100, 2)
logger.info(f"\n{str(pre_post_metics_df)}")
return corr_safety_stock_df
def max_mode(pd_series):
return int(max(pd_series.mode()))
def get_3m_drug_std_qty(store_id, db, schema, logger):
"""
To fetch repeatable patient-drug qty from past 90days and calculate
standard drug qty.
"""
start_date = (dt.date.today() - dt.timedelta(days=90)).strftime("%Y-%m-%d")
end_date = dt.date.today().strftime("%Y-%m-%d")
q_3m = """
select "patient-id" , "old-new" , "drug-id" ,
date("created-at") as "on-date", quantity as "tot-qty"
from "{schema}".sales
where "store-id" = {0}
and "is-repeatable" = 1
and "bill-flag" = 'gross'
and "created-at" > '{1} 00:00:00' and "created-at" < '{2} 00:00:00'
""".format(store_id, start_date, end_date, schema=schema)
df_3m = db.get_df(q_3m)
df_3m.columns = [c.replace('-', '_') for c in df_3m.columns]
# Get patient-drug-level STD Qty
df_3m["3m_bills"] = 1
df_3m["std_qty"] = df_3m["tot_qty"]
df_3m_patient = df_3m.groupby(["patient_id", "drug_id"],
as_index=False).agg(
{"3m_bills": "sum", "tot_qty": "sum", "std_qty": max_mode})
logger.info(f"Total repeatable patients: {len(df_3m_patient.patient_id.unique())}")
# Get drug-level STD Qty
df_3m_drugs = df_3m_patient.groupby("drug_id", as_index=False).agg(
{"std_qty": "max"})
# STD Qty > 10 is considered outliers, to drop.
drug_count_before = df_3m_drugs.shape[0]
df_3m_drugs = df_3m_drugs.loc[df_3m_drugs["std_qty"] <= 10]
drug_count_after = df_3m_drugs.shape[0]
logger.info(f"Number of outlier drugs STD Qty: {drug_count_before-drug_count_after}")
# Repeatable drugs STD Qty to check against IPC set ROP
unique_drugs_3m = list(df_3m_drugs["drug_id"].unique())
return df_3m_drugs, unique_drugs_3m | zeno-etl-libs-v3 | /zeno_etl_libs_v3-1.0.17-py3-none-any.whl/zeno_etl_libs/utils/ipc/heuristics/ipcv5_heuristics.py | ipcv5_heuristics.py |
# ETL Project
Extract, Transform, Load (ETL). All the scripts (glue and sagemaker) and custom libs.
## 1. Setup:
* clone the git project
```
git clone https://<your-git-user-name>@bitbucket.org/ashishgenerico/etl.git
```
* go inside the `./etl/` directory
```commandline
cd etl
```
* Request the `zeno_secrets.py` file from your fellow team members and paste that file inside `secret` directory
## 2. Glue
* Create `python3.6` virtual env and activate
```commandline
python3 -m venv etl_env
```
* Activate the virtual env
```commandline
source etl_env/bin/activate
```
* Install the `requirements.txt` inside the virtual env
```commandline
pip install -r requirements.txt
```
* Write your ETL script inside the `glue-jobs/src/scripts/<your_script_name>/` folder
## 3. Sagemaker
* Create `python3.7` (or **greater**) virtual env and activate
```commandline
python3 -m venv etl_env
```
* Activate the virtual env
```commandline
source etl_env/bin/activate
```
* Install the `requirements-ml.txt` inside the virtual env
```commandline
pip install -r requirements-ml.txt
```
* Write your ETL jupyter lab notebooks inside the `sagemaker-jobs/src/scripts/<your_script_name>/` folder
* Refer the demo notebooks inside the `sagemaker-jobs/src/scripts/experiments/`
## 4. Deployment
### 4.1 Glue
* Add your script details in `.\templates\templetes.json` file
* Push your code to bitbucket and raise PR
* Don't forget to enjoy. 🍺🍺
### 4.2 Sagemaker
* If there are changes in `zeno_etl_libs` custom library then we need to publish it on PyPI
* Increase the version in `setup.py` present in `etl` folder
* Run the below command to build the package
```commandline
python setup.py sdist bdist_wheel
```
* Run the below command to publish the new version on PyPI
```commandline
twine upload dist/* --verbose -u kuldeepsingh -p bEmham-6sonke-forcex
```
* Add the below command at the start of jupyter notebook
```commandline
!pip install zeno_etl_libs==new.version.number
```
## 5. Troubleshooting
1. Don't forget to include the custom library folder(`./zeno_etl_libs`) to the python search path before `import zeno_etl_libs` it for local development
```python
import sys
sys.path.append('../../../..')
```
2. Use **conda**, if there are any issue while installing the python package or virtual environment creation
## 6. Rules
* Follow the proper git flow, given here: [here](https://docs.google.com/document/d/1SnT_UKCj1Da07S-FFgvTxk30WbRbb-MFX89kEnMIKbk/edit)
## 7. Container image update (for DevOps)
* Set these options in case of **UnicodeEncodeError: 'ascii' codec can't encode characters** error
```commandline
export LC_ALL=en_US.utf-8 && export LANG=en_US.utf-8 && export PYTHONIOENCODING=utf8
```
* Delete the code build project container
```commandline
aws codebuild delete-project --name create-sagemaker-container-env_name-notebook-runner
```
* Copy the modified Docker file present at `./extra_dependency/Dockerfile` to virtual environment's site package at `./site-packages/sagemaker_run_notebook/container/Dockerfile`, basically add these extra command in the file.
```commandline
RUN apt-get update
RUN pip3 install --upgrade pip
RUN apt-get install -y gcc g++ build-essential python3-dev
```
* Update the
```commandline
run-notebook create-container env_name-notebook-runner --requirements requirements-ml.txt
``` | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/README.md | README.md |
# Zeno Etl Libs custom library
Custom library holds all the helper functions that are commonly utilized across the entire project.
## config:
- Holds a common.py file, which is being utilized to get the secrets from AWS secrets manager.
## db:
- Holds a db.py, which holds up the connections and various related functions for various databases such as MySQL,
- Redshift, MSSQL and PostgreSQL, MongoDB.
- Various functions in db.py include starting of connections, executing of certain user queries and ultimately close of
- the connections.
## django
- This particular folder holds an api.py file, which in turn helps in Django integration via APIs.
## helper
- This folder holds multiple.py files to help with the execution of main job scripts
### 1. aws
- This sub folder contains a s3.py files, which basically lets user do s3 operations from a glue or sagemaker job,
- such as save a file to s3, download a specific file from s3 etc.
### 2. clevertap
- This folder contains a .py file with name clevertap.py, the basic functions within this .py file helps one to
- effectively run clevertap job by using clevertap API.
### 3. disprz
- This folder holds disprz.py, which just like the clevertap, helps jobs related to lnd-ma-disprz via API.
### 4. email
- this folder holds email.py, which basically allows user to send out a specific file from s3 as a mail attachment,
- this .py file can be used to send out error logs as well as output files as email attachment.
### 5. fresh_service
- This folder contains a .py file which helps in the execution of freshservice job, this .py file contains the API.
### 6. google
- This folder holds 2 folders, related to google API, the use case is as follows --
#### i. playstore
- This folder holds a .py file named playstore.py, this .py helps with connecting with google playstore and getting the
- reviews from playstore.
#### ii. sheet
- This folder contains a sheet.py file, which helps user to download or upload data to a Google sheet.
### 7. Parameter
- This folder houses a job-parameter.py, which basically helps user in connecting to redshift table and getting the
- input parameters based on job_id.
### 8. run_notebook
- This folder houses a .py file which helps user to execute certain functions related to Sagemaker jobs, such as
- executing a sagemaker job.
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/README.md | README.md |
import base64
import json
import os
import boto3
from botocore.exceptions import ClientError
def get_secret_from_file(secrets_name, key=None):
"""
:param key: a specific key from secrets
:param secrets_name: secrets file name where secrets are stored
"""
try:
from secret.zeno_secrets import all_secrets
secrets = all_secrets[secrets_name]
try:
if key:
return secrets[key]
else:
return secrets
except KeyError:
error_message = "Set the {0} environment variable".format(key)
raise EnvironmentError(error_message)
except FileNotFoundError:
error_message = "secrets.json not found in config folder"
raise EnvironmentError(error_message)
class Config:
__shared_instance = 'getsecrets'
@staticmethod
def get_instance():
"""Static Access Method"""
if Config.__shared_instance == 'getsecrets':
Config()
return Config.__shared_instance
def __init__(self):
"""virtual private constructor"""
if Config.__shared_instance != 'getsecrets':
raise Exception("This class is a config class !")
else:
Config.__shared_instance = self
self.secrets = None
def download_secret(self, secrets_name=None):
if self.secrets:
return self.secrets
secret_name = f"arn:aws:secretsmanager:ap-south-1:921939243643:secret:{secrets_name}"
region_name = "ap-south-1"
# Create a Secrets Manager client
session = boto3.session.Session(
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
region_name=os.environ.get("REGION_NAME")
)
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
print("connecting with secrets manager for getting secrets")
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
raise e
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(decoded_binary_secret)
def get_secrets(self):
if os.environ.get('env') == 'stage':
self.secrets = self.download_secret(secrets_name='staging/etl')
elif os.environ.get('env') == 'preprod':
self.secrets = self.download_secret(secrets_name='preproduction/etl')
elif os.environ.get('env') == 'prod':
self.secrets = self.download_secret(secrets_name='production/etl')
else:
self.secrets = get_secret_from_file(secrets_name='development/etl')
return self.secrets | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/config/common.py | common.py |
from zeno_etl_libs.helper.email.email import Email, any_email_in_string
def create_temp_table(db, table):
""" creates table_temp table and truncates the data if table already exists """
temp_table = table.replace("-", "_") + "_temp"
query = """
create temporary table if not exists
"%s" (LIKE "prod2-generico"."%s");
""" % (temp_table, table)
db.execute(query=query)
query = """truncate "%s";""" % (temp_table)
db.execute(query=query)
return temp_table
def month_diff(date_a, date_b):
"""
This function returns month difference between calendar dates 'date_a' and 'date_b'
"""
return 12 * (date_a.dt.year - date_b.dt.year) + (date_a.dt.month - date_b.dt.month)
def drop_table(db, table_name, is_temp=True):
try:
if is_temp:
db.execute(query="""drop table "%s";""" % table_name)
else:
db.execute(query="""drop table "prod2-generico"."%s";""" % table_name)
print(f"table dropped: {table_name}")
except Exception as e:
print(f"Error in table drop: {str(e)}")
def get_table_info(db, table_name, schema=None):
"""
:param db: db class object
:param table_name: table name
:param schema: is schema is None --> temp table without schema
:return: table info data frame
"""
if schema:
schema_filter = f"and table_schema = '{schema}'"
else:
schema_filter = ''
query = f"""
select
ordinal_position as position,
column_name,
data_type,
case
when character_maximum_length is not null
then character_maximum_length
else numeric_precision
end as max_length,
is_nullable,
column_default as default_value
from
information_schema.columns
where
table_name = '{table_name}'
-- enter table name here
{schema_filter}
order by
ordinal_position;
"""
db.execute(query=query)
return db.cursor.fetch_dataframe()
def batch(iterable, n=1):
"""
splits any iterable in batches
Example:
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # list of data
for x in batch(data, 3):
print(x)
# Output
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
[9, 10]
:param iterable: list, tuple, df
:param n: batch size
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def log_or_email_error(logger, exception: Exception, email_to,
subject='132-supply-chain JOB FAILURE'):
"""
if email_to string has any email id in it, email will be sent and code will be terminated.
else exception will be raised.
:param logger: logger object
:param exception: Exception object
:param email_to: csv list of emails eg: [email protected],[email protected]
"""
email = Email()
if any_email_in_string(email_to):
logger.exception(exception)
email.send_email_file(subject=subject,
mail_body='The subject job has failed due to: {}'.format(exception),
to_emails=email_to)
exit(0)
else:
raise exception | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/helper.py | helper.py |
import io
class Reviews:
def __init__(self):
pass
# credentials = ServiceAccountCredentials.from_json_keyfile_name(
# '/Users/kuldeep/Downloads/playconsole-345511-8f3ba450d69b.json',
# scopes=['https://www.googleapis.com/auth/androidpublisher'])
self.keyfile_dict = {
"type": "service_account",
"project_id": "playconsole-345511",
"private_key_id": "8f3ba450d69bbe9694ae00f49f740912052694b8",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC0dsBllub9kOcE\nHoYA1uwFOxDQnNeuh52nPIogA+cwIWH4WfAALh2aVVf30Gf6rsjecUrCZz+8gCnd\nbHGOTbIIm+4F3XOXDRyyUIndEFTvr2/TxF2K+qW6PNSywzMHcWWKmxdJxyBrzqUi\ncTsXyH2S9WDOVelmhPMdjEvuPsU6h39zFUbITNw8YjhH1SrAZm0toOYZ/+D7RZJp\nwQApy8rxmi/6kiUNS/R9GNn0tuNE4PPT1xzpFn4stbqf2XWYtYVCb325jWmbAlj6\nfxYiWmGVlgeNlm8XiU0IVvOiCyj9WqPcG0ohOZ5OZxpf6lzkAfO+cYoBpOp7PCzN\nAKvcAB5TAgMBAAECggEASG/KNnzl5y38rLibzUZ3onnc+/+Yy2OAMpqoTsWCTN15\nd7iSD2BzXXmYP86VjhgOZMtQ2Muc18sSAuD+i8JADhiY6B3FwgHvXNvbGrLthPAE\nkRom+hw13ZWBQuf7Wad4vLQYGvMk3mEqA7Mzpw5A6XY5D1mIwC/pbhjceZsUi7Md\nS0YgX+d3WdMi6yCOT4ulOr5SBvtTt8KdQ2y3gMzRApP73PzdoygbQ/PhehTncf3I\nhSgZPnjU2612KFukWONSipuHzgWkaq20HmQEgVYyzohpntJFD6KQrtOL038yUEGm\n8sBhRkc1p9AQB21kjD/XNlH0koSBHwEVJM5KTWiP4QKBgQD5dEqBDKYuQC0HcKm9\nWuVyJPFzMGmbailTGEZb19HCsXh5jSSnH/BxeZ1HPW25d16vuH98x6pbbr1e85F5\nSJXYgU2hlWF34MVZTsTzf+uZ5aZa86fpSmoWcUVEu5L4Ygy7NxdjI0YJqZqWXNB5\npFQR6PeIRhdn5tAahxlwLXkYywKBgQC5MwSA+BYEQM+a2q2aWwh06LExbOlpckv7\nD3DD1f4kDMcqUlPpP0zaEnVPvknySO7JwHFkFLZRD0PWkM0RgNfrXkwXVVdXzx8E\nfyyl9ZKmPgaFx5L3/jY8M5IdYggaWJWtbvjsvyiKb2ACeEGI2HFCaK9nvCBOK4hj\nknUq8kBHmQKBgGuwLEmxsDvfMJE5rc005EB2elWD3NNe7SAWJqmXbdJi0uOGbwBG\n5YHXQnJyrl+WjKXHPCIeAAkgsVfARljZYPbqOx06Y61gt1Fqk9OasZbqcPpqnV40\n5b9yfrjBUR0xFtXrXolJvP6G3Vl0D/uzWSeyLsoBmDEej1AkanLm7pQpAoGBAIf7\n3u23u6rJz+Y7dUcmWpJFHX5WIxjq9MFWuA0DvsTHoSIBK13TveFNtlekOHWvea4o\nINpEnw3r8HrG/dxBR8mqBqMHZcey7GqH2sfNBi4M0ws93Ds9rKMNltb+WUbHDrg3\nCI4FWoYze0K0/CG4E4mYhlrb9riPHGlIa8Hp+KrZAoGBAI/m6ygHRllcJtl4BlJn\no4Py06mgQO2PT4lP2CEIVStF8mu4PY/oO7HHzDQe6EnvHRW0e8cKAepmSqa8kW4R\ndedaVm8SjGeU74mwGheWzQy7vsaDLafy3FDRAtFTvmE4kyyydVExWr2vAUi84TXM\nAuV9FqvKzQc5zcrT4xIEtRGu\n-----END PRIVATE KEY-----\n",
"client_email": "[email protected]",
"client_id": "103055631091973407668",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/playconsoleserviceacc%40playconsole-345511.iam.gserviceaccount.com"
}
self.package_name = "com.zenohealth.android"
self.bucket_name = "pubsite_prod_rev_08705363625182098208"
def get(self):
from apiclient.discovery import build
from httplib2 import Http
from oauth2client.service_account import ServiceAccountCredentials
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
keyfile_dict=self.keyfile_dict,
scopes=['https://www.googleapis.com/auth/androidpublisher']
)
service = build('androidpublisher', 'v3', http=credentials.authorize(Http()))
reviews_resource = service.reviews()
reviews_page = reviews_resource.list(packageName=self.package_name, maxResults=100).execute()
reviews_list = reviews_page["reviews"]
infinite_loop_canary = 100
while "tokenPagination" in reviews_page:
reviews_page = reviews_resource.list(
packageName=self.package_name,
token=reviews_page["tokenPagination"]["nextPageToken"],
maxResults=100).execute()
reviews_list.extend(reviews_page["reviews"])
infinite_loop_canary -= 1
if infinite_loop_canary < 0:
break
for i in reviews_list:
print((i['comments'][0]['userComment']['text'].encode("utf-8")))
return reviews_list
def download_blob_to_stream(self, source_blob_name):
from google.cloud import storage
from oauth2client.service_account import ServiceAccountCredentials
"""Downloads a blob to a stream or other file-like object."""
# The ID of your GCS bucket
# bucket_name = "your-bucket-name"
# The ID of your GCS object (blob)
# source_blob_name = "storage-object-name"
# The stream or file (file-like object) to which the blob will be written
file_obj = io.BytesIO()
# credentials = service_account.Credentials.from_service_account_info(self.keyfile_dict)
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
keyfile_dict=self.keyfile_dict,
)
storage_client = storage.Client(credentials=credentials)
# storage_client = storage.Client.from_service_account_json('/Users/kuldeep/Downloads/playconsole-345511-8f3ba450d69b.json')
# buckets = storage_client.list_buckets()
# print(f"OK: ")
# for bucket in buckets:
# print(bucket.name)
#
# blobs = storage_client.list_blobs(self.bucket_name)
#
# print("Blobs:")
# for blob in blobs:
# print(blob.name)
# print(f"buckets: {buckets}")
bucket = storage_client.bucket(self.bucket_name)
# Construct a client-side representation of a blob.
# Note `Bucket.blob` differs from `Bucket.get_blob` in that it doesn't
# retrieve metadata from Google Cloud Storage. As we don't use metadata in
# this example, using `Bucket.blob` is preferred here.
blob = bucket.blob(source_blob_name)
blob.download_to_file(file_obj)
#
print(f"Downloaded blob {source_blob_name} to file-like object.")
# return file_obj
# Before reading from file_obj, remember to rewind with file_obj.seek(0).
def get_all_review(self, count=100):
"""
returns: latest reviews by count
"""
from google_play_scraper import Sort, reviews
result, continuation_token = reviews(
self.package_name,
lang='en', # defaults to 'en'
country='us', # defaults to 'us'
sort=Sort.NEWEST, # defaults to Sort.NEWEST
count=50 # batch size
)
# If you pass `continuation_token` as an argument to the reviews function at this point,
# it will crawl the items after count review items.
all_results = result
while len(all_results) < count and result:
print(f"continuation_token: {continuation_token}")
result, continuation_token = reviews(
self.package_name,
continuation_token=continuation_token # defaults to None(load from the beginning)
)
all_results += result
return all_results | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/google/playstore/playstore.py | playstore.py |
import json
import requests
from zeno_etl_libs.config.common import Config
class GoogleSheet:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.url = secrets['NODE_NOTIFICATION_BASE_URL']
self.auth_token = secrets['NOTIFICATION_AUTH_TOKEN']
def download(self, data):
"""
sample data
{
"spreadsheet_id":"1fhQPO7qkbly1q-iDoMN6jmc9VNhzmYBwIFpSsh76m0M",
"sheet_name":"Sheet1",
"listedFields": ["posting_date","id","store_id"]
}
"""
payload = {
'spreadsheet_id': data['spreadsheet_id'],
'sheet_name': data['sheet_name'],
'listedFields': data['listedFields']
}
response = requests.post(
url=self.url + "api/v1/googlesheet-access/read",
headers={'Authorization': self.auth_token, 'Content-Type': 'application/json'},
data=json.dumps(payload), timeout=(1, 60))
if 200 >= response.status_code <= 299:
response_data = response.json()
if not response_data.get('is_error'):
return response.json().get('data', [])
else:
raise ValueError(f"Error: {str(response.text)}")
else:
raise Exception(f"API call failed, error: {response.text}")
def upload(self, data):
data = {
'spreadsheet_id': data['spreadsheet_id'],
'sheet_name': data['sheet_name'],
'headers': data['headers'],
'data': data['data'],
}
response = requests.post(
url=self.url + "api/v1/googlesheet-access/write",
headers={'Authorization': self.auth_token, 'Content-Type': 'application/json'},
data=json.dumps(data), timeout=(1, 60))
if 200 >= response.status_code <= 299:
print(f"Data uploaded successfully. spreadsheet_id: {data['spreadsheet_id']}, name: {data['sheet_name']}")
else:
raise Exception(f"API call failed, error: {response.text}") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/google/sheet/sheet.py | sheet.py |
import datetime
import time
import pandas as pd
import requests
from zeno_etl_libs.config.common import Config
class Dizprz:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.api_token = secrets["LEARNTRON_API_TOKEN"]
def __send_payload(self, offset, fetch_count, from_date, to_date):
fromTimePeriod = from_date.strftime("%Y-%m-%d")
toTimePeriod = to_date.strftime("%Y-%m-%d")
url = f"https://disprzexternalapi.disprz.com/api/analytics/getLearnerAnalytics?offset={offset}" \
f"&fetchCount={fetch_count}&fetchCompletionDetails=true&fetchUdfDetails=true" \
f"&fromTimePeriod={fromTimePeriod}&toTimePeriod={toTimePeriod}&fetchJourneySpecificDetails=true" \
f"&journeyId=0&fetchModuleSpecificDetails=true"
""" this to get the total count when offset is zero (first call) """
if offset == 0:
url = url + "&fetchTotalCount=true"
headers = {
'Learntron-Api-Token': self.api_token,
}
response = requests.get(url, headers=headers)
return response.json()
def get_disprz_dataframe(self):
total_count = 20000 # some value > offset to start with
fetch_count = 1000 # batch size
offset = 0
disprz_data = list() # entire data list
print("Start fetching the disprz data")
start_time = time.time()
to_date = datetime.datetime.today()
from_date = to_date - datetime.timedelta(days=400) # last 365 days
while offset < total_count:
data = self.__send_payload(offset=offset, fetch_count=fetch_count, from_date=from_date, to_date=to_date)
if offset == 0 and data:
total_count = data[0]['totalCount']
print(f"total data at disprz is: {total_count}")
offset += fetch_count
disprz_data += data
print(f"total: {total_count}, offset: {offset}, length: {len(data)}")
try:
df = pd.DataFrame(disprz_data)
except Exception as error:
raise Exception("Error while fetching data: {}". format(error))
print(f"total count: {total_count}, df len: {len(df)}")
print(f"total time taken was: {time.time() - start_time} sec")
if total_count == len(df):
print(f"fetched all data from disprz successfully")
return df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/disprz/disprz.py | disprz.py |
import re
import requests
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.helper.aws.s3 import S3
FILE_TYPES_MIME_MAPPING = {
'csv': 'application/csv',
'txt': 'text/plain',
'log': 'text/plain',
'xls': 'application/vnd.ms-excel',
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'zip': 'application/zip'
}
def determine_file_mime_type(file_name):
if file_name.endswith(".csv"):
return FILE_TYPES_MIME_MAPPING['csv']
elif file_name.endswith(".txt"):
return FILE_TYPES_MIME_MAPPING['txt']
elif file_name.endswith(".log"):
return FILE_TYPES_MIME_MAPPING['log']
elif file_name.endswith(".xls"):
return FILE_TYPES_MIME_MAPPING['xls']
elif file_name.endswith(".xlsx"):
return FILE_TYPES_MIME_MAPPING['xlsx']
elif file_name.endswith(".zip"):
return FILE_TYPES_MIME_MAPPING['zip']
raise ValueError("No MIME type available")
class Email:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.url = secrets['NODE_NOTIFICATION_BASE_URL']
self.NOTIFICATION_EMAIL_FILE_POSTFIX_URL = "api/v1/queueing-notification/send-mail"
self.auth_token = secrets['NOTIFICATION_AUTH_TOKEN']
self.s3 = S3()
def send_email_file(self, subject, mail_body, to_emails, file_uris=None, file_paths=None, from_email='[email protected]'):
file_paths = file_paths if file_paths else []
multiple_files = list()
url = self.url + self.NOTIFICATION_EMAIL_FILE_POSTFIX_URL
headers = {'Authorization': self.auth_token}
if isinstance(to_emails, list):
to_emails = ','.join(email_id for email_id in to_emails)
data = {
'subject': subject,
'body': mail_body,
'to_emails': to_emails,
'from_email': from_email,
'is_html': 'false',
}
if file_uris is not None:
for file_uri in file_uris:
file_name = file_uri.split('/')[-1]
mime_type = determine_file_mime_type(file_name)
file_bytes = self.s3.get_file_object(uri=file_uri)
multiple_files.append(('file', (file_name, file_bytes, mime_type)))
for file_path in file_paths:
file_name = file_path.split('/')[::-1][0]
mime_type = determine_file_mime_type(file_name)
multiple_files.append(('file', (file_name, open(file_path, 'rb'), mime_type)))
response = requests.post(url, data=data, files=multiple_files, headers=headers)
if response.status_code != 200:
raise Exception(f"Email sending failed: {response.text}")
else:
print(f"Email sending successful: {response.text}")
def is_string_an_email(email: str):
"""
Checks if a given string is proper email or some fake string.
Eg:
email = "[email protected]"
is_string_an_email(email) --> True
:param email: email id string
:return: True | False
"""
email_regx = "^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$"
if re.match(email_regx, email):
return True
return False
def any_email_in_string(csv_emails):
"""
In a given comma separated string, if any email id is present then returns True
:param csv_emails: comma separated email id strings
:returns: True | False
"""
for string in csv_emails.split(","):
if is_string_an_email(email=string):
return True
return False | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/email/email.py | email.py |
import errno
import json
import os
import re
import time
from subprocess import Popen
from shlex import split
from zeno_etl_libs.config.common import Config
import boto3
def ensure_session(session=None):
"""If session is None, create a default session and return it. Otherwise return the session passed in"""
if session is None:
session = boto3.session.Session()
return session
def get_execution_role(session):
"""Return the role ARN whose credentials are used to call the API.
Throws an exception if the current AWS identity is not a role.
Returns:
(str): The role ARN
"""
assumed_role = session.client("sts").get_caller_identity()["Arn"]
if ":user/" in assumed_role:
user_name = assumed_role[assumed_role.rfind("/") + 1:]
raise ValueError(
f"You are running as the IAM user '{user_name}'. You must supply an IAM role to run SageMaker jobs."
)
if "AmazonSageMaker-ExecutionRole" in assumed_role:
role = re.sub(
r"^(.+)sts::(\d+):assumed-role/(.+?)/.*$",
r"\1iam::\2:role/service-role/\3",
assumed_role,
)
return role
role = re.sub(
r"^(.+)sts::(\d+):assumed-role/(.+?)/.*$", r"\1iam::\2:role/\3", assumed_role
)
# Call IAM to get the role's path
role_name = role[role.rfind("/") + 1:]
arn = session.client("iam").get_role(RoleName=role_name)["Role"]["Arn"]
if ":role/" in arn:
return arn
message = "The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role"
raise ValueError(message.format(arn))
def execute_notebook(
*,
image="notebook-runner",
input_path,
output_prefix,
notebook,
parameters,
role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
instance_type="ml.m5.large",
session,
in_vpc=True,
env="stage",
timeout_in_sec=7200
):
session = ensure_session(session)
configobj = Config.get_instance()
secrets = configobj.get_secrets()
if not role:
role = get_execution_role(session)
elif "/" not in role:
account = session.client("sts").get_caller_identity()["Account"]
role = "arn:aws:iam::{}:role/{}".format(account, role)
if "/" not in image:
# account = session.client("sts").get_caller_identity()["Account"]
# region = session.region_name
account = secrets['AWS_ACCOUNT_ID']
region = secrets['AWS_REGION']
image = "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, image)
if notebook == None:
notebook = input_path
base = os.path.basename(notebook)
nb_name, nb_ext = os.path.splitext(base)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
job_name = (
(env + "-" + re.sub(r"[^-a-zA-Z0-9]", "-", nb_name))[: 62 - len(timestamp)]
+ "-"
+ timestamp
)
input_directory = "/opt/ml/processing/input/"
local_input = input_directory + os.path.basename(input_path)
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
local_output = "/opt/ml/processing/output/"
api_args = {
"ProcessingInputs": [
{
"InputName": "notebook",
"S3Input": {
"S3Uri": input_path,
"LocalPath": input_directory,
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "result",
"S3Output": {
"S3Uri": output_prefix,
"LocalPath": local_output,
"S3UploadMode": "EndOfJob",
},
},
],
},
"ProcessingJobName": job_name,
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": instance_type,
"VolumeSizeInGB": 40,
}
},
"StoppingCondition": {"MaxRuntimeInSeconds": timeout_in_sec},
"AppSpecification": {
"ImageUri": image,
"ContainerArguments": [
"run_notebook",
],
},
"RoleArn": role,
"Environment": {},
}
if in_vpc:
if env == "stage":
api_args["NetworkConfig"] = {
'EnableInterContainerTrafficEncryption': False,
'EnableNetworkIsolation': False,
'VpcConfig': {
'SecurityGroupIds': [
"sg-0101c938006dab959"
],
'Subnets': [
'subnet-0446eb5f39df5ceca'
]
}
}
elif env == "prod":
api_args["NetworkConfig"] = {
'EnableInterContainerTrafficEncryption': False,
'EnableNetworkIsolation': False,
'VpcConfig': {
'SecurityGroupIds': [
"sg-08f218fe121e66d6e"
],
'Subnets': [
'subnet-0e5470f4100505343'
]
}
}
else:
raise Exception(f"env(ie. stage, prod) input is must if is_vpc = True")
api_args["Environment"]["PAPERMILL_INPUT"] = local_input
api_args["Environment"]["PAPERMILL_OUTPUT"] = local_output + result
if os.environ.get("AWS_DEFAULT_REGION") is not None:
api_args["Environment"]["AWS_DEFAULT_REGION"] = os.environ["AWS_DEFAULT_REGION"]
api_args["Environment"]["PAPERMILL_PARAMS"] = json.dumps(parameters)
api_args["Environment"]["PAPERMILL_NOTEBOOK_NAME"] = notebook
api_args["Environment"]["AWS_ACCESS_KEY_ID"] = secrets["AWS_ACCESS_KEY_ID"]
api_args["Environment"]["AWS_SECRET_ACCESS_KEY"] = secrets["AWS_SECRET_ACCESS_KEY_ID"]
api_args["Environment"]["REGION_NAME"] = "ap-south-1"
client = boto3.client("sagemaker")
result = client.create_processing_job(**api_args)
job_arn = result["ProcessingJobArn"]
job = re.sub("^.*/", "", job_arn)
return job
def default_bucket():
return "sagemaker-ap-south-1-921939243643"
def upload_notebook(notebook, session=None):
"""Uploads a notebook to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook (str):
The filename of the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
with open(notebook, "rb") as f:
return upload_fileobj(f, session)
def upload_fileobj(notebook_fileobj, session=None):
"""Uploads a file object to S3 in the default SageMaker Python SDK bucket for
this user. The resulting S3 object will be named "s3://<bucket>/papermill-input/notebook-YYYY-MM-DD-hh-mm-ss.ipynb".
Args:
notebook_fileobj (fileobj):
A file object (as returned from open) that is reading from the notebook you want to upload. (Required)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The resulting object name in S3 in URI format.
"""
session = ensure_session(session)
snotebook = "notebook-{}.ipynb".format(
time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
)
s3 = session.client("s3")
key = "papermill_input/" + snotebook
bucket = default_bucket()
s3path = "s3://{}/{}".format(bucket, key)
s3.upload_fileobj(notebook_fileobj, bucket, key)
return s3path
def get_output_prefix():
"""Returns an S3 prefix in the Python SDK default bucket."""
return "s3://{}/papermill_output".format(default_bucket())
def wait_for_complete(job_name, progress=True, sleep_time=10, session=None):
"""Wait for a notebook execution job to complete.
Args:
job_name (str):
The name of the SageMaker Processing Job executing the notebook. (Required)
progress (boolean):
If True, print a period after every poll attempt. (Default: True)
sleep_time (int):
The number of seconds between polls. (Default: 10)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
A tuple with the job status and the failure message if any.
"""
session = ensure_session(session)
client = session.client("sagemaker")
done = False
while not done:
if progress:
print(".", end="")
desc = client.describe_processing_job(ProcessingJobName=job_name)
status = desc["ProcessingJobStatus"]
if status != "InProgress":
done = True
else:
time.sleep(sleep_time)
if progress:
print()
return status, desc.get("FailureReason")
def download_notebook(job_name, output=".", session=None):
"""Download the output notebook from a previously completed job.
Args:
job_name (str): The name of the SageMaker Processing Job that executed the notebook. (Required)
output (str): The directory to copy the output file to. (Default: the current working directory)
session (boto3.Session):
A boto3 session to use. Will create a default session if not supplied. (Default: None)
Returns:
The filename of the downloaded notebook.
"""
session = ensure_session(session)
client = session.client("sagemaker")
desc = client.describe_processing_job(ProcessingJobName=job_name)
prefix = desc["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
notebook = os.path.basename(desc["Environment"]["PAPERMILL_OUTPUT"])
s3path = "{}/{}".format(prefix, notebook)
if not os.path.exists(output):
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
p1 = Popen(split("aws s3 cp --no-progress {} {}/".format(s3path, output)))
p1.wait()
return "{}/{}".format(output.rstrip("/"), notebook)
def run_notebook(
image,
notebook,
parameters={},
role=None,
instance_type="ml.m5.large",
output_prefix=None,
output=".",
session=None,
in_vpc=False,
env="stage",
timeout_in_sec=7200,
check_completion_status=True
):
"""Run a notebook in SageMaker Processing producing a new output notebook.
Args:
image (str): The ECR image that defines the environment to run the job (required).
notebook (str): The local notebook to upload and run (required).
parameters (dict): The dictionary of parameters to pass to the notebook (default: {}).
role (str): The name of a role to use to run the notebook (default: calls get_execution_role()).
instance_type (str): The SageMaker instance to use for executing the job (default: ml.m5.large).
output_prefix (str): The prefix path in S3 for where to store the output notebook
(default: determined based on SageMaker Python SDK)
output (str): The directory to copy the output file to (default: the current working directory).
session (boto3.Session): The boto3 session to use. Will create a default session if not supplied (default: None).
Returns:
A tuple with the processing job name, the job status, the failure reason (or None) and the the path to
the result notebook. The output notebook name is formed by adding a timestamp to the original notebook name.
"""
session = ensure_session(session)
if output_prefix is None:
output_prefix = get_output_prefix()
s3path = upload_notebook(notebook, session)
job_name = execute_notebook(
image=image,
input_path=s3path,
output_prefix=output_prefix,
notebook=notebook,
parameters=parameters,
role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
instance_type=instance_type,
session=session,
in_vpc=in_vpc,
env=env,
timeout_in_sec=timeout_in_sec
)
print("Job {} started".format(job_name))
if check_completion_status:
status, failure_reason = wait_for_complete(job_name)
else:
status = 'Completed'
failure_reason = None
if status == "Completed":
local = download_notebook(job_name, output=output)
else:
local = None
return (job_name, status, local, failure_reason)
if __name__ == '__main__':
# run_notebook(
# image="notebook-runner",
# notebook="send-glue-job-logs.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# parameters={"job_name": "prod-8-sales"},
# in_vpc=False
# )
run_notebook(
notebook="redshift-write-demo.ipynb",
parameters={"env": "stage"},
in_vpc=True,
env="stage"
)
# run_notebook(
# image="notebook-runner",
# notebook="s3_read_write.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# in_vpc=False
# )
# run_notebook(
# image="notebook-runner",
# notebook="ml-demo.ipynb",
# role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
# in_vpc=True
# ) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/run_notebook/run_notebook.py | run_notebook.py |
# Copyright 2015 CleverTap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['CleverTap']
import json
import urllib.request, urllib.parse, urllib.error
import time
class CleverTap(object):
api_hostname = 'api.clevertap.com'
api_version = 1
class TargetActions(object):
CREATE = "create"
ESTIMATE = "estimate"
LIST = "list"
RESULT = "result"
STOP = "stop"
@classmethod
def valid_actions(cls):
return [cls.CREATE, cls.ESTIMATE, cls.LIST, cls.RESULT, cls.STOP]
def __init__(self, account_id, account_passcode, region=None):
self.account_id = account_id
self.account_passcode = account_passcode
self.account_region = region
self.cursor = None
self.url = None
self.records = []
if self.account_region is not None:
self.__class__.api_hostname = "%s.%s" % (self.account_region, self.__class__.api_hostname)
def __repr__(self):
return "%s(account_id=%s, passcode=%s, region=%s, endpoint=%s)" % (self.__class__.__name__, self.account_id, self.account_passcode, self.account_region, self.api_endpoint)
@property
def api_endpoint(self):
return 'https://%s/%s' % (self.__class__.api_hostname, self.__class__.api_version)
def upload(self, data, dryRun=False):
"""upload an array of profile and/or event dicts"""
# validate data
validation_error = self._validate("upload", data)
if validation_error:
raise Exception(validation_error)
return
# construct the base request url
self.url = '/'.join([self.api_endpoint, "upload"])
if dryRun:
self.url += "?dryRun=1"
# the request body is the json encoded data
body = json.dumps({"d":data})
# request headers
headers_params = {'Content-Type':'application/json;charset=utf-8'}
# make the request
return self._call(body=body, headers_params=headers_params)
def targets(self, action, payload):
# validate
validation_error = self._validate(action, payload)
if validation_error:
raise Exception(validation_error)
return
url_action = None
if action in self.TargetActions.valid_actions():
url_action = action
if not url_action:
print(("unknown targets action %s" % action))
return
if url_action == self.TargetActions.ESTIMATE:
payload['estimate_only'] = True
url_action = self.TargetActions.CREATE
# construct the request url
self.url = '/'.join([self.api_endpoint, "targets", "%s.json"%url_action])
# the request body is the json encoded payload
body = json.dumps(payload) if payload else None
# request headers
headers_params = {'Content-Type':'application/json'}
# make the request
return self._call(body=body, headers_params=headers_params)
def profile(self, email=None, identity=None, objectId=None):
"""fetch an individual user profile by ID, one of email, identity or CleverTap objectID"""
if email is None and identity is None and objectId is None:
raise Exception("profile requires one of email, identity or objectId")
return
# construct the request url
self.url = '/'.join([self.api_endpoint, "profile.json"])
if email is not None:
self.url += "?email=%s" % email
elif identity is not None:
self.url += "?identity=%s" % identity
elif objectId is not None:
self.url += "?objectId=%s" % objectId
# request headers
headers_params = {'Content-Type':'application/json'}
# make the request
return self._call(headers_params=headers_params)
def profiles(self, query, batch_size=10):
"""download profiles defined by query"""
return self._fetch_records("profiles", query, batch_size=batch_size)
def events(self, query, batch_size=10):
"""download events defined by query"""
return self._fetch_records("events", query, batch_size=batch_size)
def _fetch_records(self, type, query, batch_size=10):
# reset our records cache
self.records = []
# validate query
validation_error = self._validate(type, query)
if validation_error:
raise Exception(validation_error)
return
# construct the base request url
self.baseurl = '/'.join([self.api_endpoint, "%s.json"%type])
_args = urllib.parse.urlencode({"query":json.dumps(query), 'batch_size':batch_size})
# add query and batch_size as query args
self.url = "%s?%s"%(self.baseurl, _args)
headers_params = {'Content-Type':'application/json;charset=utf-8'}
# fetch initial cursor
while True:
print('fetching initial cursor')
res = self._call(headers_params=headers_params) or {}
if 'error' in res:
print(res)
if res.get('code', -1) == 1:
print("request throttled, retrying in 30")
time.sleep(30)
else:
# some other error abort
return self.records
else:
break
self.cursor = res.get("cursor", None)
# if we have a cursor then make a second request with the cursor
if self.cursor:
# construct the request url
# add the cursor
self.url = "%s?cursor=%s"%(self.baseurl, self.cursor)
# convenience inner function to handle cursor requests
def call_records():
print(("calling %s records" % batch_size))
# make the request
res = self._call() or {}
# make sure the cursor is ready with data
cursor_ready = not 'error' in res
if not cursor_ready:
print(res)
if res.get('code', -1) == 2:
wait_interval = 5
print(("cursor not ready, retrying again in %s" % wait_interval))
time.sleep(wait_interval)
return
# parse response
self.cursor = res.get("next_cursor", None)
new_records = res.get("records", [])
# add the new records array to our records array
self.records += new_records
print(("Received %s records; have %s total records" % (len(new_records), len(self.records))))
# if the request returns a new cursor, update the api url with the new cursor
if self.cursor:
self.url = "%s?cursor=%s"%(self.baseurl, self.cursor)
else:
self.url = None
# keep making requests with the new cursor as long as we have a cursor
while True:
if self.cursor is None:
print("no cursor, finished fetching records")
break
else:
print(("have cursor %s" % self.cursor))
call_records()
return self.records
def _call(self, **kwargs):
if self.url == None:
print("api url is None")
return None
headers_params = kwargs.get('headers_params', {})
# add account_id, and passcode to request headers
headers_params['X-CleverTap-Account-Id'] = self.account_id
headers_params['X-CleverTap-Passcode'] = self.account_passcode
args = kwargs.get("args", None)
if args:
args = urllib.parse.urlencode(args)
body = kwargs.get("body", None)
# Create the request
req = urllib.request.Request(self.url, args, headers_params)
if body:
req.data = bytes(json.dumps(body), 'utf-8')
try:
# Open the request
f = urllib.request.urlopen(req)
# Get the response
response = f.read()
# Close the opened request
f.close()
except Exception as e:
print(e)
try:
return e.read()
except Exception as e:
pass
return None
# Parse and return the response
try:
res = self._parse_response(response)
except Exception as e:
print(e)
res = None
return res
def _parse_response(self, response):
"""Parse a response from the API"""
try:
res = json.loads(response)
except Exception as e:
e.args += ('API response was: %s' % response)
raise e
return res
def _validate(self, type, data):
"""Simple data validation"""
validation_error = None
if not self.account_id:
validation_error = "clevertap account id missing"
return validation_error
if not self.account_passcode:
validation_error = "clevertap account passcode missing"
return validation_error
if type in [self.TargetActions.CREATE, self.TargetActions.ESTIMATE]:
if data is None:
return "Push targets action %s requires a payload"%type
if (data.get("name", None) is None):
return "Push targets action %s requires a name"%type
if (data.get("where", None) is None) and (data.get("segment", None) is None):
return "Push targets action %s requires a where or segment value"%type
if (data.get("where", None) is not None) and (data.get("segment", None) is not None):
return "Push targets action %s does not support both a where value and a segment value, specify one or the other"%type
if (data.get("segment", None) is not None):
if data.get("segment") != "all":
return "Push targets action %s segment value must be 'all'"%type
if (data.get("content", None) is None):
return "Push targets action %s requires a content dict"%type
if (data.get("content", None) is not None):
if (data.get("content", {}).get("title", None) is None) or (data.get("content", {}).get("body", None) is None):
return "Push targets action %s content dict requires a title and a body"%type
if (data.get("devices", None) is None):
return "Push targets action %s requires a devices array"%type
return validation_error
if type == self.TargetActions.LIST:
# no-op
return validation_error
if type in [self.TargetActions.RESULT, self.TargetActions.STOP]:
if (data is None) or (data.get("id", None) is None):
validation_error = "Push targets %s action requires a target id"%type
return validation_error
if type == "upload":
for record in data or []:
identity = record.get("identity", None) or record.get("FBID", None) or record.get("GPID", None) or record.get("objectId", None)
if identity is None:
validation_error = "record must contain an identity, FBID, GPID or objectId field: %s"%record
return validation_error
break
record_type = record.get("type", None)
if record_type not in ['profile', 'event']:
validation_error = "record type must be profile or event: %s"%record
return validation_error
break
if record_type == "profile":
profileData = record.get("profileData", None)
if profileData is None or not isinstance(profileData, dict):
validation_error = "record with type profile must contain a profileData dict: %s"%record
return validation_error
break
Phone = profileData.get("Phone", None)
if Phone and (not isinstance(Phone, str) or not Phone.startswith("+")):
validation_error = "profile Phone must be a string and start with +<country code>: %s"%record
return validation_error
break
if record_type == "event":
evtData = record.get("evtData", None)
if evtData is None or not isinstance(evtData, dict):
validation_error = "record with type event must contain an evtData dict: %s"%record
return validation_error
break
return validation_error | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/clervertap/clevertap_2.py | clevertap_2.py |
import json
import requests
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
class CleverTap:
"""
class: is used to fetch the data from CleverTap using API calls
"""
def __init__(self, api_name=None, event_name=None, batch_size=None, from_date=None, to_date=None, query=None):
"""
:params account_id: CleverTap Account ID
:params passcode: CleverTap passcode
:params api_name: CleverTap api name to fetch the different type of data eg: "profiles.json"
:params batch_size: no of records to be fetched per batch
:params from_date: start date filter, YYYYMMDD
:params to_date: end date filter, YYYYMMDD
:params query:
"""
self.logger = get_logger()
configobj = Config.get_instance()
secrets = configobj.get_secrets()
account_id = secrets['CLEVERTAP_ACCOUNT_ID']
passcode = secrets['CLEVERTAP_PASSCODE']
self.uri = "api.clevertap.com"
self.account_id = account_id if account_id else "TEST-K5Z-K95-RZ6Z"
self.passcode = passcode if passcode else 'd1d2cc1f8624434dbb3038b77d3fcf9d'
self.api_name = api_name if api_name else "profiles.json"
self.event_name = event_name if event_name else "App Launched"
self.batch_size = batch_size if batch_size else 5
self.from_date = from_date if from_date else 20220101
self.to_date = to_date if to_date else 20220101
self.cursor = None
self.all_records = list()
self.query = query or {
"event_name": self.event_name,
"from": self.from_date,
"to": self.to_date
}
self.headers = {
'X-CleverTap-Account-Id': self.account_id,
'X-CleverTap-Passcode': self.passcode,
'Content-Type': 'application/json'
}
self.logger.info(f"account_id: {account_id}, api_name: {api_name}, event_name:{event_name}")
def set_cursor(self):
url = f"https://{self.uri}/1/{self.api_name}?batch_size={self.batch_size}"
response = requests.request("POST", url, headers=self.headers, data=json.dumps(self.query))
if response.status_code == 200:
self.cursor = response.json()['cursor']
self.logger.info("Cursor set successfully!")
return self.cursor
else:
raise Exception(f"CleverTap cursor getting failed: {str(response.text)}")
def get_profile_data_batch(self):
url = f"https://{self.uri}/1/{self.api_name}?cursor={self.cursor}"
response = requests.request("GET", url, headers=self.headers, data="")
if response.status_code == 200:
data = response.json()
""" it will be set to None if all records fetched """
self.cursor = data.get("next_cursor")
""" adding batch records to all records list """
self.all_records += data['records']
self.logger.info(f"Batch records fetched successfully, count: {len(data['records'])}")
# self.logger.info(f"Batch records: {data['records']}")
return data['records']
else:
raise Exception(f"CleverTap profile_data getting failed: {str(response.text)}")
def get_profile_data_all_records(self):
self.set_cursor()
while self.cursor:
self.get_profile_data_batch()
def create_target(self):
"""
eg. query:
{"name": "My Sms API campaign", "estimate_only": True, "target_mode": "sms",
"where": {"event_name": "Charged", "from": 20171001, "to": 20171220, "common_profile_properties": {
"profile_fields": [{"name": "Customer Type", "operator": "equals", "value": "Platinum"}]}},
"respect_frequency_caps": True, "content": {"body": "Sms body"}, "when": "now"}
"""
response = requests.post(f'https://in1.{self.uri}/1/targets/{self.api_name}', headers=self.headers,
data=json.dumps(self.query))
if 200 <= response.status_code <= 299:
res = response.text
self.logger.info(f"create target successful, status code:{response.status_code} text: {res}")
return res
else:
raise Exception(f"create target failed, status code:{response.status_code} text: {response.text}") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/clervertap/clevertap.py | clevertap.py |
import boto3
import datetime
from dateutil.tz import tzutc
from zeno_etl_libs.config.common import Config
class Redshift:
def __init__(self):
configobj = Config.get_instance()
self.secrets = configobj.get_secrets()
self.client = boto3.client('redshift')
self.cluster_identifier = self.secrets["CLUSTER_IDENTIFIER"]
self.database_name = self.secrets["REDSHIFT_WRITE_DB"]
def get_snapshot_identifier(self, snapshot_type='automated', utc_date: str = None) -> str:
"""
:param snapshot_type: 'automated' | 'manual'
:param utc_date: format "%Y-%m-%d"
Returns:
Automated snapshot identifier of given utc_date, if utc_date is not given then latest snapshot
"""
end_time = datetime.datetime.strptime(utc_date, "%Y-%m-%d") if utc_date else datetime.datetime.now(tz=tzutc())
end_time = end_time.replace(hour=23, minute=59, second=59)
# utc_date = str(end_time.date())
""" last 1 year """
start_time = end_time - datetime.timedelta(days=365)
response = self.client.describe_cluster_snapshots(
ClusterIdentifier=self.cluster_identifier,
SnapshotType=snapshot_type,
StartTime=start_time,
EndTime=end_time,
MaxRecords=100,
ClusterExists=True,
SortingEntities=[
{
'Attribute': 'CREATE_TIME',
'SortOrder': 'DESC'
},
]
)
for snap in response['Snapshots']:
print(snap['SnapshotIdentifier'])
if not utc_date:
return snap['SnapshotIdentifier']
else:
if utc_date in snap['SnapshotIdentifier']:
return snap['SnapshotIdentifier']
return ""
def create_manual_snapshot(self):
"""
resource to read:
1. https://n2ws.com/blog/aws-automation/3-reasons-to-automate-your-manual-redshift-snapshots
2. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html#Redshift.Client.create_cluster_snapshot
3. https://aws.amazon.com/redshift/pricing/
"""
# Keeping the minutes and seconds constant
time_now = datetime.datetime.now(tz=tzutc()).strftime('%Y-%m-%d-%H-00-00')
snapshot_identifier = f"{self.cluster_identifier}-{time_now}"
print(f"snapshot_identifier: {snapshot_identifier}")
print("MANUAL_SNAPSHOT_RETENTION_PERIOD type: ", type(int(self.secrets["MANUAL_SNAPSHOT_RETENTION_PERIOD"])))
# TODO: take MANUAL_SNAPSHOT_RETENTION_PERIOD period from secrets(type int needed str was coming, so fix it)
response = self.client.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=self.cluster_identifier,
ManualSnapshotRetentionPeriod=int(self.secrets["MANUAL_SNAPSHOT_RETENTION_PERIOD"]),
Tags=[
{
'Key': 'backup_type',
'Value': 'monthly'
},
]
)
return response | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/aws/redshift.py | redshift.py |
import os
import time
from io import StringIO, BytesIO # python3; python2: BytesIO
import boto3
from pandas import ExcelWriter
from zeno_etl_libs.config.common import Config
class S3:
def __init__(self, bucket_name=None, region=None):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.secrets = secrets
self.bucket_name = bucket_name or 'aws-glue-temporary-921939243643-ap-south-1'
self.region = region if region else 'ap-south-1'
self.aws_access_key_id = secrets['AWS_ACCESS_KEY_ID']
self.aws_secret_access_key = secrets['AWS_SECRET_ACCESS_KEY_ID']
self.s3_resource = boto3.resource('s3', self.region, aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
self.s3_client = boto3.client('s3', self.region, aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
def save_df_to_s3(self, df, file_name=None, index_label=False, index=False, header=True):
file_name = file_name or f"temp_{int(time.time() * 1000)}.csv"
csv_buffer = StringIO()
df.to_csv(csv_buffer, index_label=index_label, index=index, header=header)
self.s3_resource.Object(self.bucket_name, file_name).put(Body=csv_buffer.getvalue())
s3_uri = f"s3://{self.bucket_name}/{file_name}"
return s3_uri
def get_file_object(self, uri, encoding="utf-8"):
names = uri.split("//")[-1].split("/")
self.bucket_name = names[0]
key = "/".join(names[1:])
obj = self.s3_resource.Object(self.bucket_name, key).get()
big_str = obj["Body"].read().decode(encoding)
return big_str
def unload_redshift_s3(self, table_name, file_s3_uri, db, schema=None):
if schema:
table_location = f"""{schema}"."{table_name}"""
else:
""" temp tables have session specific schema which has no name """
table_location = table_name
query = f"""
UNLOAD
('select * from "{table_location}"')
TO
'{file_s3_uri}'
CREDENTIALS
'aws_access_key_id={self.aws_access_key_id};aws_secret_access_key={self.aws_secret_access_key}'
FORMAT CSV
--HEADER
--ADDQUOTES;
"""
db.execute(query=query)
def write_to_db_from_s3_csv(self, table_name, file_s3_uri, db, schema=None, delete_folder=False):
if schema:
table_location = f"""{schema}"."{table_name}"""
else:
""" temp tables have session specific schema which has no name """
table_location = table_name
query = f"""
COPY
"{table_location}"
FROM
'{file_s3_uri}'
CREDENTIALS
'aws_access_key_id={self.aws_access_key_id};aws_secret_access_key={self.aws_secret_access_key}'
-- for better copy performance
COMPUPDATE OFF
STATUPDATE OFF
REGION 'ap-south-1'
IGNOREHEADER 1
FORMAT AS csv
MAXERROR 1 ;
"""
db.execute(query=query)
if delete_folder:
self.delete_s3_obj(uri=file_s3_uri, delete_folder=True)
else:
self.delete_s3_obj(uri=file_s3_uri)
def write_df_to_db(self, df, table_name, db, schema=None):
file_name = f"temp_{int(time.time() * 1000)}.csv"
file_s3_uri = self.save_df_to_s3(df=df, file_name=file_name) # eg. "s3://{self.bucket_name}/df.csv"
self.write_to_db_from_s3_csv(table_name=table_name, file_s3_uri=file_s3_uri, db=db, schema=schema)
self.delete_s3_obj(uri=file_s3_uri)
def write_to_text_file_on_s3(self, file_name):
file_name = f"temp_{int(time.time() * 1000)}.txt" or file_name
csv_buffer = StringIO()
self.s3_resource.Object(self.bucket_name, file_name).put(Body=csv_buffer.getvalue())
s3_uri = f"s3://{self.bucket_name}/{file_name}"
return s3_uri
def write_df_to_excel(self, data, file_name):
"""
df: data frame
file_name: with reference to /tmp folder
sheet_name:
"""
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
if not os.path.exists(path):
os.mkdir(path, 0o777)
local_file_full_path = path + file_name
with ExcelWriter(local_file_full_path) as writer:
for sheet_name, df in data.items():
df.to_excel(writer, sheet_name=sheet_name)
return local_file_full_path
def write_text_to_file(self, text, file_name):
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
if not os.path.exists(path):
os.mkdir(path, 0o777)
local_file_full_path = path + file_name
file_p = open(local_file_full_path, "w")
file_p.writelines(text)
return local_file_full_path
def upload_file_to_s3(self, file_name):
local_file_full_path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/" + file_name
s3_file_full_path = file_name
self.s3_client.upload_file(
Filename=local_file_full_path,
Bucket=self.bucket_name,
Key=s3_file_full_path,
)
s3_uri = f"s3://{self.bucket_name}/{s3_file_full_path}"
return s3_uri
def download_file_from_s3(self, file_name):
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
print(f"path: {path}")
if not os.path.exists(path):
os.mkdir(path, 0o777)
head, tail = os.path.split(file_name)
local_file_full_path = path + tail
s3_file_full_path = file_name
self.s3_client.download_file(
Bucket=self.bucket_name,
Key=s3_file_full_path,
Filename=local_file_full_path
)
return local_file_full_path
def delete_s3_obj(self, uri, delete_folder=False):
if delete_folder:
response = self.s3_client.list_objects_v2(Bucket=self.bucket_name, Prefix="unload/" + uri.split('/')[-2] + "/")
files_in_folder = response["Contents"]
files_to_delete = []
for f in files_in_folder:
files_to_delete.append({"Key": f["Key"]})
response = self.s3_client.delete_objects(
Bucket=self.bucket_name, Delete={"Objects": files_to_delete}
)
print(response)
else:
names = uri.split("//")[-1].split("/")
self.bucket_name = names[0]
key = "/".join(names[1:])
response = self.s3_resource.Object(self.bucket_name, key).delete()
print(f"S3 object(uri: {uri}) delete response: {response}")
def move_s3_obj(self, source, target_key):
try:
self.s3_client.copy_object(
Bucket=self.bucket_name,
CopySource=source,
Key=target_key
)
except self.s3_client.exceptions.ObjectNotInActiveTierError as e:
print(f"Copying s3 obj failed: {e}")
raise e
self.delete_s3_obj(uri=f"s3:/{source}")
def read_df_from_s3_csv(self, bucket_name, object_key):
try:
import pandas as pd
client = boto3.client('s3')
csv_obj = client.get_object(Bucket=bucket_name, Key=object_key)
body = csv_obj['Body']
csv_string = body.read().decode('utf-8')
df = pd.read_csv(StringIO(csv_string))
return df
except Exception as error:
print(f"Read from S3 failed: {error}")
raise error | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/helper/aws/s3.py | s3.py |
import datetime
import json
import os
import requests
from zeno_etl_libs.config.common import Config
class Sql:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.url = secrets['DJANGO_ACCOUNTS_BASE_URL']
self.node_url = secrets['NODE_NOTIFICATION_BASE_URL']
self.auth_token = secrets['DJANGO_OAUTH_TOKEN']
self.node_auth_token = secrets['NOTIFICATION_AUTH_TOKEN']
self.env = os.environ.get('env', 'dev')
def create_big_query_log_for_mysql_db_update(self, request_body, event_name):
url = self.node_url + "api/v1/bigQuery-access/event-logs-write"
headers = {'Content-Type': 'application/json', 'Authorization': self.node_auth_token}
__BIG_QUERY_PLATFORM = 'data-science-server'
__BIG_QUERY_PLATFORM_ID = 2
__BIG_QUERY_PLATFORM_CODE = 'DSS'
request_dict = {
"__environment": self.env,
"logs": [{
"__platform": __BIG_QUERY_PLATFORM,
"__platform_properties": {
"__id": __BIG_QUERY_PLATFORM_ID,
"__code": __BIG_QUERY_PLATFORM_CODE
},
"__user_email": "[email protected]",
"user_properties": {
"_id": 5
},
"__event": event_name,
"event_properties": {
"_entity_details": {
"_entity_name": request_body['database_table_name']
},
"updated_fields": request_body['data']
},
"__trigger_at": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}]
}
requests.post(url, json=request_dict, headers=headers, timeout=(1, 10))
def update(self, script_data, logger=None) -> tuple:
"""
Used to hit the django-accounts dynamic-update-api that will make changes to the Mysql Table.
If you pass the logger_obj as a parameter to this function it will log to the file in case of errors.
"""
# construct request_body from data
request_body = {'database_table_name': script_data['table']}
request_body_data = list()
for entry in script_data['data_to_be_updated']:
update_data_dict = dict()
update_data_dict['id'] = entry.pop('id')
update_data_dict['update_data'] = entry
request_body_data.append(update_data_dict)
request_body['data'] = request_body_data
url = self.url + 'api/web/v1/dynamic-update-api/'
authorization_token = 'Bearer {}'.format(self.auth_token)
headers = {"Content-Type": "application/json", 'Authorization': authorization_token}
try:
response = requests.post(url, data=json.dumps(request_body), headers=headers, timeout=(1, 60))
if response.status_code == 200:
self.create_big_query_log_for_mysql_db_update(request_body, "DSS_MYSQL_UPDATE")
return True, response.json()
raise Exception(f"api/web/v1/dynamic-update-api failed: {response.text}")
except Exception as exc:
raise Exception(exc)
class Django:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.url = secrets['DJANGO_ACCOUNTS_BASE_URL']
self.node_url = secrets['NODE_NOTIFICATION_BASE_URL']
self.auth_token = secrets['DJANGO_OAUTH_TOKEN']
self.node_auth_token = secrets['NOTIFICATION_AUTH_TOKEN']
self.env = os.environ.get('env', 'dev')
def django_model_execution_log_create_api(self, request_body, logger=None) -> tuple:
"""
Used to hit django-accounts model-execution-log-admin api that will create entries in model_execution_log table.
If you pass the logger_obj as a parameter to this function it will log to the file in case of errors.
e.g. request_body = {
"object_id": 1, # PK (line-item)
"content_type": 74 # PK of django tables
}
"""
url = self.url + 'api/web/v1/model-execution-log-admin/'
headers = {"Content-Type": "application/json", 'Authorization': f"Bearer {self.auth_token}"}
response = requests.post(url, data=json.dumps(request_body), headers=headers)
if response.status_code in [200, 201]:
return True, response.json()
return False, response.text | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/django/api.py | api.py |
sales = """
select
z."store-id",
z."drug-id",
avg(z."mean-fptr") as "mean-fptr",
sum(z.quantity) as quantity
from
(
select
f."patient-id",
f."store-id",
f."id" as "bill-id",
c."id" as "drug-id",
a."quantity" as "sold-quantity",
coalesce(g."returned-quantity", 0) as "returned-quantity",
(a."quantity" - coalesce(g."returned-quantity", 0)) as "quantity",
a."rate",
case
when coalesce( ii."actual-quantity", 1) = 0
then coalesce(ii."net-value" , b."final-ptr")
else
coalesce(ii."net-value" / coalesce( ii."actual-quantity", 1), b."final-ptr")
end as "mean-fptr",
((a."quantity" - coalesce(g."returned-quantity", 0)) * a."rate") as "value"
from
"prod2-generico"."bills-1" f
join "prod2-generico"."bill-items-1" a on
f."id" = a."bill-id"
left join "prod2-generico"."inventory-1" b on
a."inventory-id" = b."id"
left join "prod2-generico"."invoice-items-1" ii on
b."invoice-item-id" = ii."id"
left join "prod2-generico"."drugs" c on
c."id" = b."drug-id"
left join "prod2-generico"."customer-return-items-1" g on
g."inventory-id" = a."inventory-id"
and g."bill-id" = a."bill-id"
where
DATEDIFF(d,date(a."created-at"),current_date) <= {days}
and (a."quantity" - coalesce(g."returned-quantity", 0)) > 0
and f."store-id" = {store_id}
)z
group by
z."store-id",
z."drug-id"
"""
expiry = """
-- short book
select
case
when date(inv.expiry) > current_date then 'Near Expiry'
else 'Expired'
end as "inventory-type",
date(i."invoice-date") as "invoice-date",
case
when "auto-short" = 1
and "home-delivery" = 0
and "patient-id" = 4480 then 'Auto Short'
when "auto-short" = 1
and "home-delivery" = 0
and "patient-id" != 4480 then 'Manual Short'
when "auto-short" = 0
and "auto-generated" = 0
and "home-delivery" = 0 then 'Patient Request'
when "auto-short" = 0
and "auto-generated" = 0
and "home-delivery" = 1 then 'Patient Request with HD'
when sb.id is null then 'Source not found'
else 'Unclassified'
end as "request-type",
inv.id as "inventory-id",
inv."store-id",
inv."drug-id",
d."drug-name",
date(inv."created-at") as created_date,
date(inv.expiry) as expiry,
inv.barcode,
inv."invoice-item-id",
inv.quantity,
i."id" as "invoice-id",
i."invoice-number",
e."name" as "store-name",
i."distributor-id",
f.name as "distributor-name",
d."type" as "drug-type",
d."category",
df."drug-grade",
d."cold-chain",
df.min,
df."safe-stock",
df.max,
inv."ptr" as fptr,
inv."ptr" * inv.quantity as value,
sb.id as "short-book-id",
e."franchisee-id" ,
case when (i."invoice-date") < (e."opened-at") then 'launch-stock'
else 'normal' end as "launch-flag",
inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" as "locked-quantity",
(inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer") * inv."ptr" as "locked-value"
from
"prod2-generico"."inventory-1" inv
left join "prod2-generico".invoices i on
inv."invoice-id" = i.id
left join "prod2-generico"."invoice-items-1" ii on
inv."invoice-item-id" = ii."id"
left join "prod2-generico"."short-book-invoice-items" sbii on
ii."invoice-item-reference" = sbii."invoice-item-id"
left join "prod2-generico"."short-book-1" sb on
sbii."short-book-id" = sb.id
join "prod2-generico"."drugs" d on
d."id" = inv."drug-id"
join "prod2-generico"."stores" e on
e."id" = inv."store-id"
left join "prod2-generico"."distributors" f on
f."id" = i."distributor-id"
left join "prod2-generico"."drug-order-info" df on
df."drug-id" = inv."drug-id"
and df."store-id" = inv."store-id"
left join "prod2-generico"."invoices-1" i2
on inv."franchisee-invoice-id" = i2.id
where
((e."franchisee-id" = 1 and DATEDIFF(d,current_date,date(inv.expiry))< {expiry_days})
or( e."franchisee-id" != 1 and DATEDIFF(d,current_date,date(inv.expiry))< {fofo_expiry_days}))
-- and extract(yrs from (inv.expiry)) <= extract(yrs from (current_date)) + 1
and extract(yrs from (inv.expiry)) >= 2017
and ( (inv.quantity != 0)
or (inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" > 0) )
and (e."franchisee-id" = 1
or (e."franchisee-id" != 1
and i2."franchisee-invoice" = 0))
"""
return_and_rotation = """
select
date(i."invoice-date") as "invoice-date",
'{inventory_type}' as "inventory-type",
case
when "auto-short" = 1
and "home-delivery" = 0
and "patient-id" = 4480 then 'Auto Short'
when "auto-short" = 1
and "home-delivery" = 0
and "patient-id" != 4480 then 'Manual Short'
when "auto-short" = 0
and "auto-generated" = 0
and "home-delivery" = 0 then 'Patient Request'
when "auto-short" = 0
and "auto-generated" = 0
and "home-delivery" = 1 then 'Patient Request with HD'
when sb.id is null then 'Source not found'
else 'Unclassified'
end as "request-type",
inv.id as "inventory-id",
inv."store-id",
inv."drug-id",
d."drug-name",
date(inv."created-at") as created_date,
date(inv.expiry) as expiry,
inv.barcode,
inv."invoice-item-id",
inv.quantity,
i."id" as "invoice-id",
i."invoice-number",
e."name" as "store-name",
i."distributor-id",
f.name as "distributor-name",
d."type" as "drug-type",
d."category",
df."drug-grade",
d."cold-chain",
df.min,
df."safe-stock",
df.max,
inv."ptr" as fptr,
inv."ptr" * inv.quantity as value,
e."franchisee-id" ,
case
when (i."invoice-date") < (e."opened-at") then 'launch-stock'
else 'normal'
end as "launch-flag",
sb.id as "short-book-id",
inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" as "locked-quantity",
(inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer") * inv."ptr" as "locked-value"
from
"prod2-generico"."inventory-1" inv
left join "prod2-generico".invoices i on
inv."invoice-id" = i.id
left join "prod2-generico"."invoice-items-1" ii on
inv."invoice-item-id" = ii."id"
left join "prod2-generico"."short-book-invoice-items" sbii on
ii."invoice-item-reference" = sbii."invoice-item-id"
left join "prod2-generico"."short-book-1" sb on
sbii."short-book-id" = sb.id
join "prod2-generico"."drugs" d on
d."id" = inv."drug-id"
join "prod2-generico"."stores" e on
e."id" = inv."store-id"
left join "prod2-generico"."distributors" f on
f."id" = i."distributor-id"
left join "prod2-generico"."drug-order-info" df on
df."drug-id" = inv."drug-id"
and df."store-id" = inv."store-id"
left join "prod2-generico"."invoices-1" i2
on inv."franchisee-invoice-id" = i2.id
where
concat(inv."store-id", CONCAT('-', inv."drug-id")) in {store_drug_list}
and DATEDIFF(d,date(inv."created-at"),current_date)>= {days}
and ((e."franchisee-id" = 1
and DATEDIFF(d,current_date,date(inv.expiry))>={expiry_days})
or( e."franchisee-id" != 1
and DATEDIFF(d,current_date,date(inv.expiry))>={fofo_expiry_days})
or( e."franchisee-id" != 1
and (sb."created-at") < (e."opened-at")
and {FIFO_boolean_negative}))
and ( (inv.quantity != 0)
or (inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" > 0) )
and DATEDIFF(d,
date(d."created-at"),current_date)>= 270
and (e."franchisee-id" = 1
or (e."franchisee-id" != 1
and i2."franchisee-invoice" = 0))
"""
dead_liquidation = """
select
transfer."origin-store",
transfer."origin-store-name",
transfer."destination-store",
transfer."destination-store-name",
transfer."transferred-quantity",
transfer."inventory-id",
transfer."drug-id",
d."drug-name",
d.type as "drug-type",
d.category,
doi."drug-grade",
transfer."received-at",
n."created-at" as "bill-timestamp",
m."rate",
transfer."final-ptr",
case
when n."created-at" is null then 0
else coalesce(m."quantity", 0)
end as "sold-quantity",
case
when n."created-at" is null then 0
else coalesce(m."returned-quantity", 0)
end as "returned-quantity",
case
when n."created-at" is null then 0
else coalesce((m."quantity" - m."returned-quantity") * m."rate", 0)
end as "net-value"
from
(
select
c."origin-store",
a."destination-store",
sum(b.quantity) as "transferred-quantity",
e."name" as "origin-store-name",
f."name" as "destination-store-name",
b."inventory-id",
c."drug-id",
avg(c."final-ptr") as "final-ptr",
min(b."received-at") as "received-at"
from
"prod2-generico"."stock-transfers-1" a
join "prod2-generico"."stock-transfer-items-1" b on
a."id" = b."transfer-id"
join "prod2-generico"."inventory-1" c on
b."inventory-id" = c."id"
left join "prod2-generico"."drugs" d on
c."drug-id" = d."id"
left join "prod2-generico"."stores" e on
c."origin-store" = e."id"
left join "prod2-generico"."stores" f on
a."destination-store" = f."id"
where
"source-store" = 111
and d."type" != 'category-4'
and date(b."transferred-at") <= current_date
and d."id" != 406872
and d."id" != 424692
and d."id" != 401179
and d."id" != 444213
and cast(b."received-at" as varchar) <> '0000-00-00 00:00:00'
group by
c."origin-store",
a."destination-store",
e."name",
f."name",
b."inventory-id",
c."drug-id"
)transfer
left join "prod2-generico"."bill-items-1" m
on
transfer."inventory-id" = m."inventory-id"
left join "prod2-generico"."bills-1" n
on
n."id" = m."bill-id"
and transfer."destination-store" = n."store-id"
left join "prod2-generico".drugs d
on
transfer."drug-id" = d.id
left join "prod2-generico"."drug-order-info" doi
on
doi."drug-id" = transfer."drug-id"
and doi."store-id" = transfer."destination-store"
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/dead_stock/dead_stock_queries.py | dead_stock_queries.py |
import pandas as pd
import datetime
import numpy as np
import argparse
import os
from zeno_etl_libs.queries.dead_stock import dead_stock_queries
def dead_data_prep(store_id=None, days=270, logger=None, connection = None):
# from zeno_etl_libs.db.db import DB
# parser = argparse.ArgumentParser(description="This is ETL script.")
# parser.add_argument('-e', '--env', default="dev", type=str, required=False)
# args, unknown = parser.parse_known_args()
# env = args.env
# os.environ['env'] = env
# rs_db = DB()
# rs_db.open_connection()
rs_db = connection
'''Getting sales data'''
sales_query = dead_stock_queries.sales.format(days=days, store_id=str(store_id))
sales = rs_db.get_df(sales_query)
# removing DC inventory
sales_store = sales[~sales['store-id'].isin([92, 111, 156, 160, 169, 172])]
logger.info('Sales: Distinct # of drugs' + str(sales['drug-id'].nunique()))
logger.info('Sales: Stores' + str(sales['store-id'].nunique()))
'''Getting inventory data'''
inventory_query = '''
select
inv."store-id",
inv."drug-id",
avg(coalesce(ii."net-value" / ii."actual-quantity", inv."final-ptr")) as "mean-fptr",
sum(inv."quantity") as "inventory-oh"
from
"prod2-generico"."inventory-1" inv
left join "prod2-generico"."invoice-items" ii on
inv."invoice-item-id" = ii."franchisee-invoice-item-id"
left join "prod2-generico"."stores" s
on
s.id = inv."store-id"
left join "prod2-generico"."invoices-1" i
on
i.id = inv."franchisee-invoice-id"
where
inv.quantity > 0
and inv."store-id" = {store_id}
and (s."franchisee-id" = 1
or (s."franchisee-id" != 1
and i."franchisee-invoice" = 0))
group by
inv."store-id",
inv."drug-id"
'''.format(store_id=store_id)
inventory = rs_db.get_df(inventory_query)
# removing DC inventory
inventory_store = inventory[~inventory['store-id'].isin([92, 111, 156, 160, 169, 172])]
logger.info('Inv: Distinct # of drugs ' + str(inventory_store['drug-id'].nunique()))
logger.info('Inv: Stores ' + str(inventory_store['store-id'].nunique()))
store_inventory_sales = inventory_store.merge(
sales_store, on=['store-id', 'drug-id'], how='outer',
suffixes=('', '-y'))
# print(store_inventory_sales.columns)
store_inventory_sales['mean-fptr'] = store_inventory_sales['mean-fptr'].combine_first(
store_inventory_sales['mean-fptr-y'])
store_inventory_sales.drop('mean-fptr-y', axis=1, inplace=True)
store_inventory_sales['quantity'].fillna(0, inplace=True)
store_inventory_sales['inventory-oh'].fillna(0, inplace=True)
# print('# of line items', store_inventory_sales.shape[0])
logger.info('Distinct # of drugs '+ str(store_inventory_sales['drug-id'].nunique()))
logger.info('Store - '+ str(store_id))
logger.info('Total inventory count '+ str(store_inventory_sales['inventory-oh'].sum()))
logger.info('Total sales count ' + str(store_inventory_sales.quantity.sum()))
'''Getting drug and store info '''
drug_store_info_query = '''
select
store.id,
store.name,
d.id,
d."drug-name",
d.type,
d.category,
coalesce(doi."drug-grade", 'NA') as "drug-grade"
from
"prod2-generico".stores store
cross join "prod2-generico".drugs d
left join "prod2-generico"."drug-order-info" doi on
d.id = doi."drug-id"
and store.id = doi."store-id"
where
store.id = {store_id}
'''.format(store_id=store_id)
drug_store_info = rs_db.get_df(drug_store_info_query)
drug_store_info.columns = ['store-id', 'store-name', 'drug-id', 'drug-name',
'type', 'category', 'drug-grade']
store_inventory_sales = store_inventory_sales.merge(
drug_store_info, on=['store-id', 'drug-id'], how='left')
#rs_db.close_connection()
# store_inventory_sales = store_inventory_sales.merge(ptr, how='left', on='drug_id')
return sales, inventory, store_inventory_sales
def dead_stock_categorization(
sales, inventory, store_inventory_sales, stores_list,
logger=None, days=270, expiry_days=120,fofo_expiry_days=210, connection = None):
# from zeno_etl_libs.db.db import DB
# parser = argparse.ArgumentParser(description="This is ETL script.")
# parser.add_argument('-e', '--env', default="dev", type=str, required=False)
# args, unknown = parser.parse_known_args()
# env = args.env
# os.environ['env'] = env
# rs_db = DB()
# rs_db.open_connection()
rs_db = connection
'''Dead Stock Categorization
1. Expiry - within 4 months or already expired: Return
2. No sales at enterprise level (in 9 months): Return
3. No sales at Store "A", but at other stores (in 9 months): Rotate
4. FIFO dead: sold in stores but inventory created more than 9 months ago: Rotate
'''
# 1.
'''Getting expired inventory data'''
expiry_query = dead_stock_queries.expiry.format(expiry_days=expiry_days,fofo_expiry_days=fofo_expiry_days)
expiry_barcodes = rs_db.get_df(expiry_query)
expiry_agg = expiry_barcodes.groupby(['store-id', 'drug-id'])['quantity'].sum().reset_index()
expiry_agg.columns = ['store-id', 'drug-id', 'expired-quantity']
store_inventory_sales_with_exp = store_inventory_sales.merge(expiry_agg, on=['store-id', 'drug-id'], how='left')
store_inventory_sales_with_exp['expired-quantity'].fillna(0, inplace=True)
store_inventory_sales_with_exp['quantity-rem-after-expiry'] = (
store_inventory_sales_with_exp['inventory-oh'] - store_inventory_sales_with_exp['expired-quantity']
)
logger.info('Expired to be returned: units to be returned' +
str(store_inventory_sales_with_exp['expired-quantity'].sum()))
logger.info('Expired to be returned: value' + str(round(
(expiry_barcodes['value']).sum()/10000000,2)) + 'Crs')
logger.info('Post expiry drugs inventory')
logger.info('# of line items' + str(store_inventory_sales_with_exp.shape[0]))
logger.info('Distinct # of drugs' + str(store_inventory_sales_with_exp['drug-id'].nunique()))
logger.info('Stores' + str(store_inventory_sales_with_exp['store-id'].nunique()))
# 2.
drug_wise_sales = store_inventory_sales_with_exp[
store_inventory_sales_with_exp['store-id'].isin(stores_list)
].groupby(['drug-id'])['quantity'].sum().reset_index()
drugs_no_enterprise_sales = drug_wise_sales.loc[drug_wise_sales['quantity'] == 0, 'drug-id']
drug_returns = store_inventory_sales_with_exp[
(store_inventory_sales_with_exp['drug-id'].isin(drugs_no_enterprise_sales.values)) &
(store_inventory_sales_with_exp['store-id'].isin(stores_list))
]
store_inventory_sales_with_exp_post_returns = store_inventory_sales_with_exp[
(~store_inventory_sales_with_exp['drug-id'].isin(drugs_no_enterprise_sales.values))
]
logger.info('Drug with no enterprise sales: units to be returned ' + str(drug_returns['quantity-rem-after-expiry'].sum()))
logger.info('Drug with no enterprise sales: value ' + str(round((drug_returns['mean-fptr'].astype(float)*drug_returns['quantity-rem-after-expiry'].astype(float)).sum()/10000000, 2)) + 'Crs')
logger.info('Post returns, drugs inventory')
logger.info('# of line items ' + str(store_inventory_sales_with_exp_post_returns.shape[0]))
logger.info('Distinct # of drugs ' + str(store_inventory_sales_with_exp_post_returns['drug-id'].nunique()))
logger.info('Stores ' + str(store_inventory_sales_with_exp_post_returns['store-id'].nunique()))
# getting barcode level info for drugs to return
return_store_drug_comb = drug_returns['store-id'].astype(int).astype(str) + '-' + drug_returns['drug-id'].astype(int).astype(str)
return_store_drug_list = str(list(
return_store_drug_comb.values)).replace('[', '(').replace(']', ')')
return_query = dead_stock_queries.return_and_rotation.format(
store_drug_list=return_store_drug_list,
inventory_type='Return',
days=0,
expiry_days=expiry_days,
fofo_expiry_days=fofo_expiry_days,
FIFO_boolean_negative = True)
return_barcodes = rs_db.get_df(return_query)
separate_old_inv_query = '''
select
concat("store-id", CONCAT('-', "drug-id")) as "store-drug-id"
from
"prod2-generico"."inventory-1" inv
left join "prod2-generico"."stores" s
on
inv."store-id" = s.id
where
"store-id" in {stores}
and "drug-id" in {drugs}
and (quantity > 0
or ((inv.quantity != 0)
or (inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" > 0)))
and (((s."franchisee-id" = 1)
and(inv."created-at" <DATEADD(d,-{days},CURRENT_DATE)))
or ((s."franchisee-id" != 1)
and(inv."created-at" <DATEADD(d,-10,CURRENT_DATE))))
group by
"store-id",
"drug-id"
'''.format(days=days, stores=tuple(drug_returns['store-id'].unique()) + (0,0), drugs=tuple(drug_returns['drug-id'].unique()) + (0,0))
separate_old_inv = rs_db.get_df(separate_old_inv_query)
return_barcodes['store-drug-id'] = return_barcodes['store-id'].astype(str) + '-' + return_barcodes['drug-id'].astype(str)
return_barcodes = return_barcodes[return_barcodes['store-drug-id'].isin(tuple(separate_old_inv['store-drug-id']))]
return_barcodes = return_barcodes.drop(columns=['store-drug-id'])
# FOFO - No Baby food in return/rotate except launch stock, No PR
conditions = [((return_barcodes['franchisee-id'].astype(int) != 1)
& (return_barcodes['launch-flag'].astype(str) != 'launch-stock')
& (return_barcodes['drug-type'].astype(str) == 'baby-food')),
((return_barcodes['franchisee-id'].astype(int) != 1)
& (return_barcodes['request-type'].isin(['Patient Request', 'Patient Request with HD'])))]
choices = [1, 1]
return_barcodes['delete'] = np.select(conditions, choices)
return_barcodes = return_barcodes[return_barcodes['delete']!=1]
return_barcodes = return_barcodes.drop(columns=['delete'])
# 3.
# store drug combination (store active for than 6 months)
store_inventory_sales_active = store_inventory_sales_with_exp_post_returns[
store_inventory_sales_with_exp_post_returns['store-id'].isin(stores_list)]
store_drug_no_sale = store_inventory_sales_active.loc[
(store_inventory_sales_active['quantity'] == 0)]
store_drug_with_sale = store_inventory_sales_with_exp_post_returns.loc[
(store_inventory_sales_with_exp_post_returns['quantity'] != 0)
]
zippin_inventory = store_drug_no_sale.groupby(
['type', 'category', 'drug-id'])['quantity-rem-after-expiry'].\
sum().reset_index()
logger.info('Rotating inventory stats')
logger.info('# of drugs to rotate ' + str(zippin_inventory.shape[0]))
logger.info('Quantity to be rotated ' + str(zippin_inventory['quantity-rem-after-expiry'].sum()))
logger.info('Rotation Drugs value ' + str(round((store_drug_no_sale['mean-fptr'].astype(float) *store_drug_no_sale['quantity-rem-after-expiry'].astype(float)).sum()/10000000,2)) + 'Crs')
# getting barcode level info for drugs to rotate
rotate_store_drug_comb = store_drug_no_sale['store-id'].astype(int).astype(str) + '-' + store_drug_no_sale['drug-id'].astype(int).astype(str)
#logger.info(list(rotate_store_drug_comb.values))
#logger.info(len(list(rotate_store_drug_comb.values)))
rotation_drug_list = str(list(
rotate_store_drug_comb.values)).replace('[', '(').replace(']', ')')
if len(list(rotate_store_drug_comb.values))==0:
rotation_drug_list = [0]
rotation_drug_list = str(list(rotation_drug_list)).replace('[', '(').replace(']', ')')
rotation_query = dead_stock_queries.return_and_rotation.format(
store_drug_list=rotation_drug_list,
inventory_type='Rotate',
days=0,
expiry_days=expiry_days,
fofo_expiry_days=fofo_expiry_days,
FIFO_boolean_negative = True)
rotation_barcodes = rs_db.get_df(rotation_query)
separate_old_inv_query = '''
select
concat("store-id", CONCAT('-', "drug-id")) as "store-drug-id"
from
"prod2-generico"."inventory-1" inv
left join "prod2-generico"."stores" s
on
inv."store-id" = s.id
where
"store-id" in {stores}
and "drug-id" in {drugs}
and (quantity > 0
or ((inv.quantity != 0)
or (inv."locked-quantity" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" > 0)))
and (((s."franchisee-id" = 1)
and(inv."created-at" <DATEADD(d,-{days},CURRENT_DATE)))
or ((s."franchisee-id" != 1)
and(inv."created-at" <DATEADD(d,-10,CURRENT_DATE))))
group by
"store-id",
"drug-id"
'''.format(days=days,stores=tuple(store_drug_no_sale['store-id'].unique()) + (0,0), drugs=tuple(store_drug_no_sale['drug-id'].unique()) + (0,0))
separate_old_inv = rs_db.get_df(separate_old_inv_query)
rotation_barcodes['store-drug-id'] = rotation_barcodes['store-id'].astype(str) + '-' + rotation_barcodes['drug-id'].astype(str)
rotation_barcodes = rotation_barcodes[rotation_barcodes['store-drug-id'].isin(tuple(separate_old_inv['store-drug-id']))]
rotation_barcodes = rotation_barcodes.drop(columns=['store-drug-id'])
# FOFO - No Baby food in return/rotate except launch stock, No PR
conditions = [((rotation_barcodes['franchisee-id'].astype(int) != 1)
& (rotation_barcodes['launch-flag'].astype(str) != 'launch-stock')
& (rotation_barcodes['drug-type'].astype(str) == 'baby-food')),
((rotation_barcodes['franchisee-id'].astype(int) != 1)
& (rotation_barcodes['request-type'].isin(['Patient Request', 'Patient Request with HD'])))]
choices = [1, 1]
rotation_barcodes['delete'] = np.select(conditions, choices)
rotation_barcodes = rotation_barcodes[rotation_barcodes['delete'] != 1]
rotation_barcodes = rotation_barcodes.drop(columns=['delete'])
# 4.
fifo_drug = store_drug_with_sale.loc[store_drug_with_sale['inventory-oh'] != 0,
['store-id', 'drug-id']].drop_duplicates()
fifo_drug_list = fifo_drug['store-id'].astype(str) + '-' + fifo_drug['drug-id'].astype(str)
fifo_drug_list = str(list(fifo_drug_list)).replace('[','(').replace(']',')')
#print('fifo drug list - {}'.format(fifo_drug_list))
fifo_query = dead_stock_queries.return_and_rotation.format(
store_drug_list=fifo_drug_list,
inventory_type='FIFO Dead',
days=days,
expiry_days=expiry_days,
fofo_expiry_days=fofo_expiry_days,
FIFO_boolean_negative=False)
# logger.info(fifo_query)
fifo_barcodes = rs_db.get_df(fifo_query)
logger.info('FIFO dead stock stats')
logger.info('Quantity to be rotated' + str(fifo_barcodes['quantity'].sum()))
logger.info('FIFO Drugs value' + str(round((fifo_barcodes['fptr'].astype(float) *fifo_barcodes['quantity'].astype(float)).sum()/10000000,2)) + 'Crs')
# rs_db.close_connection()
return zippin_inventory, store_drug_no_sale, store_drug_with_sale, expiry_barcodes, return_barcodes, rotation_barcodes, fifo_barcodes
def dead_value_bucket(dead_rotate):
dead_rotate = dead_rotate.groupby('inventory-id')['value']. sum().reset_index()
dead_rotate = dead_rotate.sort_values('value', ascending=False)
dead_rotate['cumsum-percentage'] = (
dead_rotate['value'].cumsum()/dead_rotate['value'].sum())
dead_rotate['bucket'] = np.select(
[dead_rotate['cumsum-percentage'] <= 0.01,
(dead_rotate['cumsum-percentage'] > 0.01) &
(dead_rotate['cumsum-percentage'] <= 0.02),
(dead_rotate['cumsum-percentage'] > 0.02) &
(dead_rotate['cumsum-percentage'] <= 0.05),
(dead_rotate['cumsum-percentage'] > 0.05) &
(dead_rotate['cumsum-percentage'] <= 0.10),
(dead_rotate['cumsum-percentage'] > 0.10) &
(dead_rotate['cumsum-percentage'] <= 0.20),
(dead_rotate['cumsum-percentage'] > 0.20) &
(dead_rotate['cumsum-percentage'] <= 0.40),
(dead_rotate['cumsum-percentage'] > 0.40) &
(dead_rotate['cumsum-percentage'] <= 0.60),
(dead_rotate['cumsum-percentage'] > 0.60) &
(dead_rotate['cumsum-percentage'] <= 0.80),
dead_rotate['cumsum-percentage'] > 0.80
],
['under 1%', '1-2%', '2-5%', '5-10%', '10-20%', '20-40%',
'40-60%', '60-80%', 'more than 80%'])
return dead_rotate | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/dead_stock/dead_stock_categorisation.py | dead_stock_categorisation.py |
max_bill_id = """
select
max("bill-id") as "bill-id-max"
from
"prod2-generico"."{}"
"""
max_return_id = """
select
max("return-item-id") as "return-item-id-max"
from
"prod2-generico"."{}"
"""
insert_sales_record = """
insert
into
"prod2-generico"."{}" (
"updated-by",
"updated-at",
"bill-id",
"patient-id",
"store-id",
"inventory-id",
"drug-id",
"drug-name",
"type",
"category",
"composition",
"company",
"company-id",
"composition-master-id",
"quantity",
"created-at",
"year-created-at",
"month-created-at",
"rate",
"net-rate",
"net-quantity",
"revenue-value",
"purchase-rate",
"ptr",
"mrp",
"substitution-status",
"created-by",
"bill-flag",
"old-new",
"first-bill-date",
"p-reference",
"patient-category",
"lp-flag",
"min",
"max",
"safe-stock",
"promo-code-id",
"payment-method",
"doctor-id",
"code-type",
"pc-type",
"promo-code",
"campaign",
"pr-flag",
"hd-flag",
"ecom-flag",
"substitution-status-g",
"substitution-status-trend",
"goodaid-availablity-flag",
"cgst-rate",
"cgst-amt",
"cgst",
"sgst-rate",
"sgst-amt",
"sgst",
"tax-rate",
"igst-rate",
"igst-amt",
"utgst-rate",
"utgst-amt",
"serial",
"hsncode",
"is-repeatable",
"store-name",
"store-manager",
"line-manager",
"abo",
"city",
"store-b2b",
"store-month-diff",
"store-opened-at",
"franchisee-id",
"franchisee-name",
"cluster-id",
"cluster-name",
"return-item-id",
"bill-item-id",
"promo-discount",
"type-at-selling",
"category-at-selling",
"created-date",
"invoice-item-reference",
"distributor-id",
"distributor-name",
"billed-at-return-ref",
"return-reason",
"drug-grade",
"acquired" ,
"old-new-static",
"crm-flag",
"invoice-id",
"franchisee-invoice",
"group"
)
select
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
f."id" as "bill-id",
f."patient-id" as "patient-id",
f."store-id" as "store-id",
b."id" as "inventory-id" ,
b."drug-id" as "drug-id",
c."drug-name" as "drug-name",
c."type",
c."category",
c."composition",
c."company",
c."company-id" as "company-id",
c."composition-master-id" as "composition-master-id",
a."quantity",
f."created-at" as "created-at" ,
extract(year
from
f."created-at") as "year-created-at",
extract(month
from
f."created-at") as "month-created-at",
a."rate",
a."rate" as "net-rate",
a."quantity" as "net-quantity",
(a."rate" * a."quantity") as "revenue-value",
b."purchase-rate" as "purchase-rate",
b."ptr",
b."mrp",
s."substitution-status" as "substitution-status",
f."created-by" as "created-by",
'gross' as "bill-flag",
case
when (12 * (extract (year
from
f."created-at") - extract (year
from
pm."first-bill-date")) + (extract (month
from
f."created-at") - extract (month
from
pm."first-bill-date")))>= 1 then 'old'
else 'new'
end as "old-new",
pm."first-bill-date" as "first-bill-date",
p."reference" as "p-reference",
p."patient-category" as "patient-category",
b."franchisee-inventory" as "lp-flag",
doi."min",
doi."max" ,
doi."safe-stock" as "safe-stock",
f."promo-code-id" as "promo-code-id",
f."payment-method" as "payment-method",
f."doctor-id" as "doctor-id",
pc."code-type" as "code-type" ,
pc."type" as "pc-type",
pc."promo-code" as "promo-code",
ca."campaign",
NVL(pso2."pr-flag",
false),
NVL(pso2."hd-flag",
false),
NVL(pso2."ecom-flag",
false),
case
when (s."substitution-status" = 'substituted'
and c."company-id" = 6984
and f."created-at" >= mca."store-first-inv-date") then 'ga-substituted'
when (s."substitution-status" = 'substituted'
and mca."store-first-inv-date" is null ) then 'ga-not-available'
else s."substitution-status"
end as "substitution-status-g",
case
when (s."substitution-status" = 'substituted'
and c."company-id" = 6984
and f."created-at" >= mca."store-first-inv-date") then 'substituted'
else s."substitution-status"
end as "substitution-status-trend",
case
when (f."created-at" >= casl."system-first-inv-date") then 'available'
else 'not-available'
end as "goodaid-availablity-flag",
a."cgst-rate" as "cgst-rate" ,
a."cgst-amt" as "cgst-amt" ,
a."cgst" ,
a."sgst-rate" as "sgst-rate" ,
a."sgst-amt" as "sgst-amt" ,
a."sgst" ,
(a."cgst-rate" + a."sgst-rate") as "tax-rate",
a."igst-rate" as "igst-rate" ,
a."igst-amt" as "igst-amt" ,
a."utgst-rate" as "utgst-rate" ,
a."utgst-amt" as "utgst-amt" ,
f."serial" ,
c."hsncode",
c."is-repeatable" as "is-repeatable",
msm.store as "store-name",
msm."store-manager" ,
msm."line-manager",
msm.abo,
msm.city,
(case when (f."gst-number" is not null and f."gst-number"!='') then 'B2B' else msm."store-b2b" end) as "store-b2b",
msm."month-diff" as "store-month-diff",
msm."opened-at" as "store-opened-at",
msm."franchisee-id",
msm."franchisee-name",
msm."cluster-id",
msm."cluster-name",
NULL as "return-item-id",
a.id as "bill-item-id",
a."promo-discount" as "promo-discount",
c."type" as "type-at-selling",
c."category" as "category-at-selling",
date(f."created-at") as "created-date",
ii1."invoice-item-reference",
i2."distributor-id",
ds."name" as "distributor-name",
f."created-at" as "billed-at-return-ref",
NULL as "return-reason",
doi."drug-grade",
msm."acquired" ,
msm."old-new-static",
NVL(pso2."crm-flag",
false),
i2.id as "invoice-id",
i2."franchisee-invoice",
d1."group"
from
"prod2-generico"."bills-1" f
left join "prod2-generico"."bill-items-1" a on
f."id" = a."bill-id"
left join "prod2-generico"."inventory-1" b on
a."inventory-id" = b."id"
left join "prod2-generico"."invoice-items-1" ii1 on
b."invoice-item-id" = ii1.id
left join "prod2-generico".invoices i2 on
ii1."invoice-id" = i2.id
left join "prod2-generico".distributors ds ON
i2."distributor-id" = ds.id
left join "prod2-generico"."drugs" c on
c."id" = b."drug-id"
left join "prod2-generico"."drug-unique-composition-mapping" d1 on
b."drug-id" = d1."drug-id"
left join "prod2-generico"."bill-items-substitutes" s on
a."id" = s."bill-item-id"
left join "prod2-generico"."patients-metadata-2" pm on
f."patient-id" = pm."id"
left join "prod2-generico"."patients" p on
f."patient-id" = p."id"
left join "prod2-generico"."drug-order-info" doi on
(doi."store-id" = f."store-id"
and doi."drug-id" = b."drug-id")
left join "prod2-generico"."promo-codes" pc on
NVL(f."promo-code-id",
0) = pc."id"
left join "prod2-generico"."campaigns" ca on
NVL(pc."campaign-id",
0) = ca."id"
left join "prod2-generico"."{}" as pso2 on
a."bill-id" = pso2."id"
left join "prod2-generico"."group-activation" mca on
f."store-id" = mca."store-id"
and d1."group" = mca."group"
left join "prod2-generico"."group-activation-system-level" casl on
d1."group" = casl."group"
inner join "prod2-generico"."{}" as msm on
f."store-id" = msm."id"
where
f."id" > {}
union all
select
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
a."bill-id" as "bill-id",
f."patient-id" as "patient-id",
f."store-id" as "store-id",
b."id" as "inventory-id" ,
b."drug-id" as "drug-id",
c."drug-name" as "drug-name",
c."type",
c."category",
c."composition",
c."company",
c."company-id" as "company-id",
c."composition-master-id" as "composition-master-id",
(a."returned-quantity") as "quantity",
f."returned-at" as "created-at",
extract(year
from
f."returned-at") as "year-created-at",
extract(month
from
f."returned-at") as "month-created-at",
(a."rate") as "rate",
(a."rate" *-1) as "net-rate",
(a."returned-quantity" *-1) as "net-quantity",
(a."rate" * a."returned-quantity" *-1) as "revenue-value",
b."purchase-rate" as "purchase-rate",
b."ptr",
b."mrp",
'return' as "substitution-status",
f."processed-by" as "created-by",
'return' as "bill-flag",
case
when (12 * (extract (year
from
f."returned-at") - extract (year
from
pm."first-bill-date")) + (extract (month
from
f."returned-at") - extract (month
from
pm."first-bill-date")))>= 1 then 'old'
else 'new'
end as "old-new",
pm."first-bill-date" as "first-bill-date",
p."reference" as "p-reference",
p."patient-category" as "patient-category",
b."franchisee-inventory" as "lp-flag",
doi."min",
doi."max" ,
doi."safe-stock" as "safe-stock",
b2."promo-code-id" as "promo-code-id",
b2."payment-method" as "payment-method",
b2."doctor-id" as "doctor-id",
pc."code-type" as "code-type" ,
pc."type" as "pc-type",
pc."promo-code" as "promo-code",
ca."campaign",
NVL(pso2."pr-flag",
false),
NVL(pso2."hd-flag",
false),
NVL(pso2."ecom-flag",
false),
'return' as "substitution-status-g",
'return' as "substitution-status-trend",
case
when (f."returned-at" >= casl."system-first-inv-date") then 'available'
else 'not-available'
end as "goodaid-availablity-flag",
a."cgst-rate" as "cgst-rate" ,
0 as "cgst-amt" ,
0 as "cgst" ,
a."sgst-rate" as "sgst-rate" ,
0 as "sgst-amt" ,
0 as "sgst" ,
(a."cgst-rate" + a."sgst-rate") as "tax-rate",
a."igst-rate" as "igst-rate" ,
0 as "igst-amt" ,
a."utgst-rate" as "utgst-rate" ,
0 as "utgst-amt" ,
f1."serial" ,
c."hsncode",
c."is-repeatable" as "is-repeatable",
msm.store as "store-name",
msm."store-manager" ,
msm."line-manager",
msm.abo,
msm.city,
(case when (f1."gst-number" is not null and f1."gst-number"!='') then 'B2B' else msm."store-b2b" end) as "store-b2b",
msm."month-diff" as "store-month-diff",
msm."opened-at" as "store-opened-at",
msm."franchisee-id",
msm."franchisee-name",
msm."cluster-id",
msm."cluster-name",
a."id" as "return-item-id",
NULL as "bill-item-id",
cast(NULL as numeric) as "promo-discount",
c."type" as "type-at-selling",
c."category" as "category-at-selling",
date(f."returned-at") as "created-date",
ii1."invoice-item-reference",
i2."distributor-id",
ds."name" as "distributor-name",
b2."created-at" as "billed-at-return-ref",
a."return-reason" as "return-reason",
doi."drug-grade",
msm."acquired" ,
msm."old-new-static",
NVL(pso2."crm-flag",
false),
i2.id as "invoice-id",
i2."franchisee-invoice",
d1."group"
from
"prod2-generico"."customer-returns-1" f
left join "prod2-generico"."customer-return-items-1" a on
f."id" = a."return-id"
left join "prod2-generico"."bills-1" f1 on
a."bill-id" = f1."id"
left join "prod2-generico"."inventory-1" b on
a."inventory-id" = b."id"
left join "prod2-generico"."invoice-items-1" ii1 ON
b."invoice-item-id" = ii1.id
left join "prod2-generico".invoices i2 on
ii1."invoice-id" = i2.id
left join "prod2-generico".distributors ds ON
i2."distributor-id" = ds.id
inner join "prod2-generico"."drugs" c on
c."id" = b."drug-id"
left join "prod2-generico"."drug-unique-composition-mapping" d1 on
b."drug-id" = d1."drug-id"
left join "prod2-generico"."patients-metadata-2" pm on
f."patient-id" = pm."id"
left join "prod2-generico"."patients" p on
f."patient-id" = p."id"
left join "prod2-generico"."drug-order-info" doi on
(doi."store-id" = f."store-id"
and doi."drug-id" = b."drug-id")
inner join "prod2-generico"."bills-1" b2 on
a."bill-id" = b2."id"
left join "prod2-generico"."promo-codes" pc on
NVL(b2."promo-code-id",
0) = pc."id"
left join "prod2-generico"."campaigns" ca on
NVL(pc."campaign-id",
0) = ca."id"
left join "prod2-generico"."group-activation" mca on
f."store-id" = mca."store-id"
and d1."group" = mca."group"
left join "prod2-generico"."group-activation-system-level" casl on
d1."group" = casl."group"
left join "prod2-generico"."{}" as pso2 on
a."bill-id" = pso2."id"
inner join "prod2-generico"."{}" as msm on
f."store-id" = msm."id"
where
a."id" > {}
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/sales/sales_config.py | sales_config.py |
max_short_book_id = """
select
max("id") as "short-book-id-max"
from
"prod2-generico"."{}"
"""
insert_as_ms_query = """
insert
into
"prod2-generico"."{}" (
"id",
"created-by",
"updated-by",
"updated-at",
"patient-id",
"store-name",
"drug-name",
"as-ms",
"created-to-invoice-days",
"created-to-invoice-hour",
"created-to-dispatch-days",
"created-to-dispatch-hour",
"created-to-delivery-days",
"created-to-delivery-hour",
"created-to-re-order-days",
"created-to-re-order-hour",
"created-to-order-days",
"created-to-order-hour",
"status",
"requested-quantity",
"quantity",
"required-quantity",
"inventory-at-creation",
"inventory-at-ordering",
"created-at",
"year-created-at",
"month-created-at",
"ordered-time",
"invoiced-at",
"dispatched-at",
"delivered-at",
"completed-at",
"re-ordered-at",
"store-delivered-at",
"decline-reason",
"type",
"store-id",
"drug-id",
"company",
"company-id",
"composition",
"composition-master-id",
"category",
"schedule",
"sub-type",
"preferred-distributor-id",
"preferred-distributor-name",
"drug-grade",
"purchase-rate",
"ptr",
"distributor-type",
"recieved-distributor-id",
"received-distributor-name",
"forward-dc-id",
"dc-name",
"abo",
"line-manager",
"store-manager",
"city",
"store-b2b",
"franchisee-short-book"
)
select
a.id,
'etl-automation' as "created-by",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta', GETDATE()) as "updated-at",
a."patient-id" as "patient-id",
b."name" as "store-name",
a."drug-name" as "drug-name",
case
when a."auto-short" = 1
and a."home-delivery" = 0
and a."created-by" = 'Auto Short'
and a."patient-id" = 4480 then
'AS'
when
a."auto-short" = 1
and a."home-delivery" = 0
and a."patient-id" != 4480 then
'MS'
else
'NA'
end as "as-ms",
--Fulfillment on Invoice
(case
when (a."invoiced-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."invoiced-at")
end) as "created-to-invoice-days",
(case
when (a."invoiced-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."invoiced-at")
end) as "created-to-invoice-hour",
--Fulfillment on dispatch
(case
when (a."dispatched-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."dispatched-at")
end) as "created-to-dispatch-days",
(case
when (a."dispatched-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."dispatched-at")
end) as "created-to-dispatch-hour",
--Fulfillment on delivery
(case
when (msda."store-delivered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
msda."store-delivered-at")
end) as "created-to-delivery-days",
(case
when (msda."store-delivered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
msda."store-delivered-at")
end) as "created-to-delivery-hour",
-- Re-order Timing --
(case
when (a."re-ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."re-ordered-at")
end) as "created-to-re-order-days",
(case
when (a."re-ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."re-ordered-at")
end) as "created-to-re-order-hour",
--order Timing--
(case
when (a."ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."ordered-at")
end) as "created-to-order-days",
(case
when (a."ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."ordered-at")
end) as "created-to-order-hour",
a."status" as "status",
a."requested-quantity" as "requested-quantity",
a."quantity" as "quantity",
a."required-quantity" as "required-quantity",
a."inventory-at-creation" as "inventory-at-creation" ,
a."inventory-at-ordering" as "inventory-at-ordering",
case
when a."created-at" = '0101-01-01' then null
else a."created-at"
end as "created-at",
extract(year
from
a."created-at") as "year-created-at",
extract(month
from
a."created-at") as "month-created-at",
case
when a."ordered-at" = '0101-01-01' then null
else a."ordered-at"
end as "ordered-time",
case
when a."invoiced-at" = '0101-01-01' then null
else a."invoiced-at"
end as "invoiced-at",
case
when a."dispatched-at" = '0101-01-01' then null
else a."dispatched-at"
end as "dispatched-at",
case
when a."delivered-at" = '0101-01-01' then null
else a."delivered-at"
end as "delivered-at",
case
when a."completed-at" = '0101-01-01' then null
else a."completed-at"
end as "completed-at",
case
when a."re-ordered-at" = '0101-01-01' then null
else a."re-ordered-at"
end as "re-ordered-at",
case
when msda."store-delivered-at" = '0101-01-01' then null
else msda."store-delivered-at"
end as "store-delivered-at",
a."decline-reason" as "decline-reason",
c."type",
a."store-id" as "store-id",
a."drug-id" as "drug-id",
c."company",
c."company-id" as "company-id" ,
c."composition" ,
c."composition-master-id" as "composition-master-id" ,
c."category" ,
c."schedule" ,
c."sub-type" as "sub-type" ,
f."id" as "preferred-distributor-id",
f."name" as "preferred-distributor-name",
e."drug-grade" as "drug-grade",
dp."purchase-rate" as "purchase-rate",
dp."ptr",
d."type" as "distributor-type",
d."id" as "recieved-distributor-id",
d."name" as "received-distributor-name",
j."forward-dc-id" as "forward-dc-id",
ss."name" as "dc-name",
msm."abo" ,
msm."line-manager" ,
msm."store-manager" ,
msm."city",
msm."store-b2b",
a."franchisee-short-book" as "franchisee-short-book"
from
"prod2-generico"."short-book-1" a
left join "prod2-generico"."stores" b on
b."id" = a."store-id"
left join "prod2-generico"."drugs" c on
c."id" = a."drug-id"
left join (
select
"drug-id",
AVG("purchase-rate") as "purchase-rate",
AVG(ptr) as "ptr"
from
"prod2-generico"."inventory-1" i
where
"created-at" >= dateadd(day,
-360,
CURRENT_DATE)
group by
"drug-id") as dp on
a."drug-id" = dp."drug-id"
left join "prod2-generico"."distributors" d on
d."id" = a."distributor-id"
left join "prod2-generico"."drug-order-info" e on
e."store-id" = a."store-id"
and e."drug-id" = a."drug-id"
left join "prod2-generico"."distributors" f on
a."preferred-distributor" = f."id"
left join (
select
"store-id",
"forward-dc-id"
from
"prod2-generico"."store-dc-mapping"
where
"drug-type" = 'ethical') j on
j."store-id" = a."store-id"
left join "prod2-generico"."stores" ss on
ss."id" = j."forward-dc-id"
left join "prod2-generico"."store-delivered" msda on
a."id" = msda."id"
left join "prod2-generico"."stores-master" msm on
a."store-id" = msm.id
where
a.id > {}
and
a."auto-short" = 1
and a."home-delivery" = 0
and a."status" not in ('deleted');
"""
update_as_ms_query = """
update "prod2-generico"."{}" as s
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"created-to-invoice-days" = b."created-to-invoice-days",
"created-to-invoice-hour" = b."created-to-invoice-hour",
"created-to-dispatch-days" = b."created-to-dispatch-days",
"created-to-dispatch-hour" = b."created-to-dispatch-hour",
"created-to-delivery-days" = b."created-to-delivery-days",
"created-to-delivery-hour" = b."created-to-delivery-hour",
"created-to-re-order-days" = b."created-to-re-order-days",
"created-to-re-order-hour" = b."created-to-re-order-hour",
"created-to-order-days" = b."created-to-order-days",
"created-to-order-hour" = b."created-to-order-hour",
"status" = b."status",
"quantity" = b."quantity",
"required-quantity" = b."required-quantity",
"inventory-at-ordering" = b."inventory-at-ordering",
"ordered-time" = b."ordered-time",
"invoiced-at" = b."invoiced-at",
"dispatched-at" = b."dispatched-at",
"delivered-at" = b."delivered-at",
"completed-at" = b."completed-at",
"re-ordered-at" = b."re-ordered-at",
"store-delivered-at" = b."store-delivered-at",
"decline-reason" = b."decline-reason",
"type" = b."type",
"category" = b."category",
"schedule" = b."schedule",
"sub-type" = b."sub-type",
"preferred-distributor-id" = b."preferred-distributor-id",
"preferred-distributor-name" = b."preferred-distributor-name",
"drug-grade" = b."drug-grade",
"distributor-type" = b."distributor-type",
"recieved-distributor-id" = b."recieved-distributor-id",
"received-distributor-name" = b."received-distributor-name",
"abo" = b."abo",
"line-manager" = b."line-manager",
"store-manager" = b."store-manager",
"franchisee-short-book" = b."franchisee-short-book"
from (
select
a.id,
(case
when (a."invoiced-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day, a."created-at", a."invoiced-at")
end) as "created-to-invoice-days",
(case
when (a."invoiced-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours, a."created-at", a."invoiced-at")
end) as "created-to-invoice-hour",
--Fulfillment on dispatch
(case
when (a."dispatched-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day, a."created-at", a."dispatched-at")
end) as "created-to-dispatch-days",
(case
when (a."dispatched-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours, a."created-at", a."dispatched-at")
end) as "created-to-dispatch-hour",
--Fulfillment on delivery
(case
when (msda."store-delivered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day, a."created-at", msda."store-delivered-at")
end) as "created-to-delivery-days",
(case
when (msda."store-delivered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours, a."created-at", msda."store-delivered-at")
end) as "created-to-delivery-hour",
-- Re-order Timing --
(case
when (a."re-ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day, a."created-at", a."re-ordered-at")
end) as "created-to-re-order-days",
(case
when (a."re-ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours, a."created-at", a."re-ordered-at")
end) as "created-to-re-order-hour",
--order Timing--
(case
when (a."ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day, a."created-at", a."ordered-at")
end) as "created-to-order-days",
(case
when (a."ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours, a."created-at", a."ordered-at")
end) as "created-to-order-hour",
a."status" as "status",
a."quantity" as "quantity",
a."required-quantity" as "required-quantity",
a."inventory-at-ordering" as "inventory-at-ordering",
case
when a."ordered-at" = '0101-01-01' then null else a."ordered-at"
end as "ordered-time",
case
when a."invoiced-at" = '0101-01-01' then null else a."invoiced-at"
end as "invoiced-at",
case
when a."dispatched-at" = '0101-01-01' then null else a."dispatched-at"
end as "dispatched-at",
case
when a."delivered-at" = '0101-01-01' then null else a."delivered-at"
end as "delivered-at",
case
when a."completed-at" = '0101-01-01' then null else a."completed-at"
end as "completed-at",
case
when a."re-ordered-at" = '0101-01-01' then null else a."re-ordered-at"
end as "re-ordered-at",
case
when msda."store-delivered-at" = '0101-01-01' then null else msda."store-delivered-at"
end as "store-delivered-at",
a."decline-reason" as "decline-reason",
c."type",
c."category" ,
c."schedule" ,
c."sub-type" as "sub-type" ,
f."id" as "preferred-distributor-id",
f."name" as "preferred-distributor-name",
e."drug-grade" as "drug-grade",
d."type" as "distributor-type",
d."id" as "recieved-distributor-id",
d."name" as "received-distributor-name",
msm."abo" ,
msm."line-manager" ,
msm."store-manager" ,
a."franchisee-short-book" as "franchisee-short-book"
from
"prod2-generico"."{}" s
inner join "prod2-generico"."short-book-1" a on
s.id = a.id
left join "prod2-generico"."drugs" c on
c."id" = a."drug-id"
left join "prod2-generico"."distributors" d on
d."id" = a."distributor-id"
left join "prod2-generico"."drug-order-info" e on
e."store-id" = a."store-id"
and e."drug-id" = a."drug-id"
left join "prod2-generico"."distributors" f on
a."preferred-distributor" = f."id"
left join "prod2-generico"."store-delivered" msda on
a."id" = msda."id"
left join "prod2-generico"."stores-master" msm on
a."store-id" = msm.id
where
s."updated-at" < a."updated-at"
or
s."updated-at" < c."updated-at"
or
s."updated-at" < d."updated-at"
or
s."updated-at" < e."updated-at"
or
s."updated-at" < f."updated-at"
or
s."updated-at" < msda."updated-at"
or
s."updated-at" < msm."updated-at"
) as b
where
s.id = b.id;
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/as_ms/as_ms.py | as_ms.py |
max_bill_id = """
select
max("id") as "bill-id-max"
from
"prod2-generico"."{}"
"""
insert_bill_flags_query = """
insert
into
"prod2-generico"."{}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"pr-flag",
"hd-flag",
"ecom-flag",
"crm-flag"
)
select
pso."bill-id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
bool_or(case when pso."patient-request-id" is null then false else true end) as "pr-flag",
bool_or(case when pso."order-type" = 'delivery' then true else false end) as "hd-flag",
bool_or(case when pso."order-source" = 'zeno' then true else false end) as "ecom-flag",
bool_or(case when pso."order-source" = 'crm' then true else false end) as "crm-flag"
from
"prod2-generico"."patients-store-orders" pso
left join "prod2-generico"."bill-flags" bf
on NVL(pso."bill-id",0)= bf."id"
where
bf."id" is null
group by
pso."bill-id";
"""
update_bill_flags_query = """
update "prod2-generico"."{}" as bf
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"pr-flag" = b."pr-flag",
"hd-flag" = b."hd-flag",
"ecom-flag" = b."ecom-flag",
"crm-flag" = b."crm-flag"
from (
select
"bill-id" as "id",
bool_or(case when pso."patient-request-id" is null then false else true end) as "pr-flag",
bool_or(case when pso."order-type" = 'delivery' then true else false end) as "hd-flag",
bool_or(case when pso."order-source" = 'zeno' then true else false end) as "ecom-flag",
bool_or(case when pso."order-source" = 'crm' then true else false end) as "crm-flag"
from
"prod2-generico"."{}" bf inner join
"prod2-generico"."patients-store-orders" pso on
bf.id = pso."bill-id"
where
pso."updated-at" > bf."updated-at"
group by
pso."bill-id"
) as b
where
bf.id = b.id;
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/bill_flags/bill_flags_config.py | bill_flags_config.py |
max_pso_id = """
select
max("id") as "pso-id-max"
from
"prod2-generico"."{}"
"""
insert_query = """
insert
into
"prod2-generico"."{}" (
"id" ,
"created-at",
"created-by",
"updated-by",
"updated-at",
"year-created-at",
"month-created-at",
"patient-id",
"doctor-id",
"store-id",
"bill-id",
"drug-id",
"zeno-order-id",
"drug-name",
"pso-requested-quantity",
"pso-inventory-quantity",
"order-number",
"order-source",
"order-type",
"patient-request-id",
"payment-type",
"promo-id",
"pso-status",
"fulfilled-to-consumer",
"type",
"category",
"company",
"company-id",
"composition",
"composition-master-id",
"lp-fulfilled-qty",
"sb-id" ,
"ff-distributor",
"ordered-distributor-id",
"quantity",
"required-quantity",
"ordered-at",
"completed-at",
"invoiced-at",
"dispatched-at",
"received-at",
"sb-status",
"decline-reason",
"inventory-at-ordering",
"re-ordered-at",
"dc-ff-time",
"store-received-ff-time",
"consumer-ff-time",
"order-raised-at-dc",
"order-raised-at-distributor",
"billed-at",
"store-name",
"store-manager",
"line-manager",
"abo",
"city",
"store-b2b",
"substituted",
"gross-quantity",
"gross-revenue-value",
"net-quantity",
"net-revenue-value",
"selling-rate",
"store-delivered-at",
"franchisee-short-book"
)
select
pso."id" as "id",
pso."created-at" as "created-at",
pso."created-by" as "created-by",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
extract(year from pso."created-at") as "year-created-at",
extract(month from pso."created-at") as "month-created-at",
pso."patient-id" as "patient-id" ,
pso."doctor-id" as "doctor-id" ,
pso."store-id" as "store-id" ,
pso."bill-id" as "bill-id" ,
pso."drug-id" as "drug-id",
pso."zeno-order-id" as "zeno-order-id",
pso."drug-name" as "drug-name" ,
pso."requested-quantity" as "pso-requested-quantity",
pso."inventory-quantity" as "pso-inventory-quantity",
pso."order-number" as "order-number" ,
pso."order-source" as "order-source" ,
pso."order-type" as "order-type" ,
pso."patient-request-id" as "patient-request-id" ,
pso."payment-type" as "payment-type" ,
pso."promo-id" as "promo-id",
pso.status as "pso-status",
(case
when ms."gross-quantity" > 0 then 1
else 0
end) as "fulfilled-to-consumer",
d2."type" ,
d2."category" ,
d2."company" ,
d2."company-id" as "company-id" ,
d2."composition" ,
d2."composition-master-id" as "composition-master-id",
NVL(prlp."lp-fulfilled-qty", 0) as "lp-fulfilled-qty",
sb."id" as "sb-id",
sb."distributor-id" as "ff-distributor",
sb."ordered-distributor-id" as "ordered-distributor-id",
sb."quantity" as "quantity" ,
sb."required-quantity" as "required-quantity" ,
case
when sb."ordered-at" = '0101-01-01' then null
else sb."ordered-at"
end as "ordered-at",
case
when sb."completed-at" = '0101-01-01' then null
else sb."completed-at"
end as "completed-at",
case
when sb."invoiced-at" = '0101-01-01' then null
else sb."invoiced-at"
end as "invoiced-at",
case
when sb."dispatched-at" = '0101-01-01' then null
else sb."dispatched-at"
end as "dispatched-at",
case
when sb."received-at" = '0101-01-01' then null
else sb."received-at"
end as "received-at",
sb."status" as "sb-status",
sb."decline-reason" as "decline-reason",
sb."inventory-at-ordering" as "inventory-at-ordering" ,
case
when sb."re-ordered-at" = '0101-01-01' then null
else sb."re-ordered-at"
end as "re-ordered-at",
(case
when (pso."created-at" = '0101-01-01'
or msda."store-delivered-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
msda."store-delivered-at")
end) as "dc-ff-time",
(case
when (pso."created-at" = '0101-01-01'
or sb."received-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
sb."received-at")
end) as "store-received-ff-time",
(case
when (pso."created-at" = '0101-01-01'
or b2."created-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
b2."created-at")
end) as "consumer-ff-time",
(case
when sb."quantity">0 then 1
else 0
end) as "order-raised-at-dc",
(case
when ("ordered-at" = '0101-01-01' or "ordered-at" is null) then 0
else 1
end) as "order-raised-at-distributor",
b2."created-at" as "billed-at",
msm."store" as "store-name",
msm."store-manager",
msm."line-manager",
msm."abo",
msm."city",
msm."store-b2b",
case
when "generic-flag" is null then 'not-available'
when "generic-flag" is not null
and d2."type" = 'generic' then 'substituted'
when "generic-flag" is not null
and d2."type" != 'generic' then 'not-substituted'
else 'not-available'
end as "substituted",
ms."gross-quantity",
ms."gross-revenue-value",
ms."net-quantity",
ms."net-revenue-value",
case
when sgdp."selling-rate" is null
and d2."type" = 'generic' then 35
when sgdp."selling-rate" is null
and d2."type" != 'generic' then 100
else sgdp."selling-rate"
end as "selling-rate",
msda."store-delivered-at",
sb."franchisee-short-book" as "franchisee-short-book"
from
"prod2-generico"."patients-store-orders" pso
left join
(
select
prlp."patient-request-id" ,
sum("fulfilled-quantity") as "lp-fulfilled-qty"
from
"prod2-generico"."patient-request-local-purchase" prlp
inner join
"prod2-generico"."patients-store-orders" pso on
NVL(pso."patient-request-id", 0) = prlp."patient-request-id"
group by
prlp."patient-request-id" ) as prlp on
prlp."patient-request-id" = NVL(pso."patient-request-id", 0)
left join "prod2-generico"."patient-requests-short-books-map" mprsb on
NVL(pso."patient-request-id", 0) = mprsb."patient-request-id"
left join "prod2-generico"."short-book-1" sb on
sb.id = mprsb."short-book-id"
left join "prod2-generico"."store-delivered" msda on
mprsb."short-book-id" = msda."id"
left join "prod2-generico"."bills-1" b2 on
b2.id = NVL(pso."bill-id", 0)
left join "prod2-generico"."drugs" d2 on
d2."id" = pso."drug-id"
left join "prod2-generico"."substitutable-compositions" msc on
msc."id" = d2."composition-master-id"
left join "prod2-generico"."sales-agg" ms on
ms."bill-id" = pso."bill-id"
and ms."drug-id" = pso."drug-id"
inner join "prod2-generico"."stores-master" msm on
pso."store-id" = msm.id
left join "prod2-generico"."store-group-drug-price" sgdp on
msm."store-group-id" = sgdp."store-group-id"
and pso."drug-id" = sgdp."drug-id" and sgdp."cluster-id" is null
where
pso."id" > {};
"""
update_query = """
update "prod2-generico"."{}" as t
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"bill-id" = s."bill-id",
"drug-id" = s."drug-id",
"order-number" = s."order-number",
"order-type" = s."order-type",
"patient-request-id" = s."patient-request-id",
"payment-type" = s."payment-type",
"promo-id" = s."promo-id",
"pso-status" = s."pso-status",
"fulfilled-to-consumer" = s."fulfilled-to-consumer",
"type" = s."type",
"category" = s."category",
"company" = s."company",
"composition" = s."composition",
"lp-fulfilled-qty" = s."lp-fulfilled-qty",
"sb-id" = s."sb-id",
"ff-distributor" = s."ff-distributor",
"ordered-distributor-id" = s."ordered-distributor-id",
"quantity" = s."quantity",
"required-quantity" = s."required-quantity",
"ordered-at" = s."ordered-at",
"completed-at" = s."completed-at",
"invoiced-at" = s."invoiced-at",
"dispatched-at" = s."dispatched-at",
"received-at" = s."received-at",
"sb-status" = s."sb-status",
"decline-reason" = s."decline-reason",
"inventory-at-ordering" = s."inventory-at-ordering",
"re-ordered-at" = s."re-ordered-at",
"dc-ff-time" = s."dc-ff-time",
"store-received-ff-time" = s."store-received-ff-time",
"consumer-ff-time" = s."consumer-ff-time",
"order-raised-at-dc" = s."order-raised-at-dc",
"order-raised-at-distributor" = s."order-raised-at-distributor",
"billed-at" = s."billed-at",
"store-manager" = s."store-manager",
"line-manager" = s."line-manager",
"abo" = s."abo",
"substituted" = s."substituted",
"gross-quantity" = s."gross-quantity",
"gross-revenue-value" = s."gross-revenue-value",
"net-quantity" = s."net-quantity",
"net-revenue-value" = s."net-revenue-value",
"selling-rate" = s."selling-rate",
"store-delivered-at" = s."store-delivered-at",
"franchisee-short-book" = s."franchisee-short-book"
from (
select
pso."id" as "id",
pso."bill-id" as "bill-id" ,
pso."drug-id" as "drug-id",
pso."order-number" as "order-number" ,
pso."order-type" as "order-type" ,
pso."patient-request-id" as "patient-request-id" ,
pso."payment-type" as "payment-type" ,
pso."promo-id" as "promo-id",
pso.status as "pso-status",
(case
when ms."gross-quantity" > 0 then 1
else 0
end) as "fulfilled-to-consumer",
d2."type",
d2."category" ,
d2."company" ,
d2."composition" ,
NVL(prlp."lp-fulfilled-qty", 0) as "lp-fulfilled-qty",
sb."id" as "sb-id",
sb."distributor-id" as "ff-distributor",
sb."ordered-distributor-id" as "ordered-distributor-id",
sb."quantity" as "quantity" ,
sb."required-quantity" as "required-quantity" ,
case
when sb."ordered-at" = '0101-01-01' then null
else sb."ordered-at"
end as "ordered-at",
case
when sb."completed-at" = '0101-01-01' then null
else sb."completed-at"
end as "completed-at",
case
when sb."invoiced-at" = '0101-01-01' then null
else sb."invoiced-at"
end as "invoiced-at",
case
when sb."dispatched-at" = '0101-01-01' then null
else sb."dispatched-at"
end as "dispatched-at",
case
when sb."received-at" = '0101-01-01' then null
else sb."received-at"
end as "received-at",
sb."status" as "sb-status",
sb."decline-reason" as "decline-reason",
sb."inventory-at-ordering" as "inventory-at-ordering" ,
case
when sb."re-ordered-at" = '0101-01-01' then null
else sb."re-ordered-at"
end as "re-ordered-at",
(case
when (pso."created-at" = '0101-01-01'
or msda."store-delivered-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
msda."store-delivered-at")
end) as "dc-ff-time",
(case
when (pso."created-at" = '0101-01-01'
or sb."received-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
sb."received-at")
end) as "store-received-ff-time",
(case
when (pso."created-at" = '0101-01-01'
or ms."created-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
ms."created-at")
end) as "consumer-ff-time",
(case
when sb."quantity">0 then 1
else 0
end) as "order-raised-at-dc",
(case
when (sb."ordered-at" = '0101-01-01' or sb."ordered-at" is null) then 0
else 1
end) as "order-raised-at-distributor",
ms."created-at" as "billed-at",
msm."store-manager",
msm."line-manager",
msm."abo",
case
when msc."generic-flag" is null then 'not-available'
when msc."generic-flag" is not null
and d2."type" = 'generic' then 'substituted'
when msc."generic-flag" is not null
and d2."type" != 'generic' then 'not-substituted'
else 'not-available'
end as "substituted",
ms."gross-quantity",
ms."gross-revenue-value",
ms."net-quantity",
ms."net-revenue-value",
case
when sgdp."selling-rate" is null
and d2."type" = 'generic' then 35
when sgdp."selling-rate" is null
and d2."type" != 'generic' then 100
else sgdp."selling-rate"
end as "selling-rate",
msda."store-delivered-at",
sb."franchisee-short-book" as "franchisee-short-book"
from
"prod2-generico"."{}" prm
inner join
"prod2-generico"."patients-store-orders" pso on prm.id = pso.id
left join
(
select
prlp."patient-request-id" ,
sum("fulfilled-quantity") as "lp-fulfilled-qty"
from
"prod2-generico"."patient-request-local-purchase" prlp
inner join
"prod2-generico"."patients-store-orders" pso on
NVL(pso."patient-request-id", 0) = prlp."patient-request-id"
group by
prlp."patient-request-id" ) as prlp on
prlp."patient-request-id" = NVL(pso."patient-request-id", 0)
left join "prod2-generico"."patient-requests-short-books-map" mprsb on
NVL(pso."patient-request-id", 0) = mprsb."patient-request-id"
left join "prod2-generico"."short-book-1" sb on
sb.id = mprsb."short-book-id"
left join "prod2-generico"."store-delivered" msda on
mprsb."short-book-id" = msda."id"
inner join "prod2-generico"."drugs" d2 on
d2."id" = pso."drug-id"
left join "prod2-generico"."substitutable-compositions" msc on
msc."id" = d2."composition-master-id"
left join "prod2-generico"."sales_agg" ms on
ms."bill-id" = pso."bill-id"
and ms."drug-id" = pso."drug-id"
inner join "prod2-generico"."stores-master" msm on
pso."store-id" = msm.id
left join "prod2-generico"."store-group-drug-price" sgdp on
msm."store-group-id" = sgdp."store-group-id"
and pso."drug-id" = sgdp."drug-id" and sgdp."cluster-id" is null
where
prm."updated-at" < pso."updated-at"
or prm."updated-at" < sb."updated-at"
or prm."updated-at" < msc."updated-at"
or prm."updated-at" < msda."updated-at"
or prm."updated-at" < d2."updated-at"
) as s
where
t.id = s.id;
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/patient_request/__init__.py | __init__.py |
sales_query = """
select
a."bill-id",
f."store-id",
c."id" as "drug-id",
c."drug-name",
c."type",
c."category",
c."company",
a."created-at",
a."quantity",
b."id" as "inventory-id",
b."mrp",
b."ptr" AS "zp-ptr",
b."ptr" AS "final-ptr",
b."purchase-rate" AS "wc-ptr",
a."rate",
a."cgst-rate",
a."sgst-rate",
a."igst-rate",
f."payment-method",
f."net-payable",
f."patient-id",
(a."rate" * a."quantity") as "value",
d."franchisee-id" ,
ii."franchisee-invoice"
from
{schema}."bills-1{suffix_to_table}" f
join {schema}."bill-items-1{suffix_to_table}" a on
f."id" = a."bill-id"
left join {schema}."inventory-1{suffix_to_table}" b on
a."inventory-id" = b."id"
left join {schema}."drugs{suffix_to_table}" c on
c."id" = b."drug-id"
left join {schema}."stores{suffix_to_table}" d on
d."id"= b."store-id"
left join {schema}."invoices-1{suffix_to_table}" ii
on b."franchisee-invoice-id" = ii.id
where
a."created-at" >= '{analysis_start_time}'
and a."created-at" <= '{analysis_end_time}'
-- and f."store-id" = 2
"""
customer_returns_query = """
SELECT
b."return-id",
b."bill-id",
b."bill-id" as "returned-bill-id",
a."store-id",
a."patient-id",
a."total-items",
a."return-value",
b."inventory-id",
b."returned-quantity",
b."cgst-rate",
b."sgst-rate",
b."igst-rate",
b."rate",
b."return-value",
b."billed-at",
b."returned-at",
c."drug-id",
c."drug-id" as "returned-drug-id",
d."type",
d."category",
d."company",
c."mrp",
c."ptr" AS "zp-ptr",
c."ptr" AS "final-ptr",
c."purchase-rate" AS "wc-ptr",
e."payment-method",
s."franchisee-id" ,
ii."franchisee-invoice"
FROM
{schema}."customer-returns-1{suffix_to_table}" a
JOIN {schema}."customer-return-items-1{suffix_to_table}" b ON
a."id" = b."return-id"
LEFT JOIN {schema}."inventory-1{suffix_to_table}" c ON
c."id" = b."inventory-id"
LEFT JOIN {schema}."drugs{suffix_to_table}" d ON
d."id" = c."drug-id"
LEFT JOIN {schema}."bills-1{suffix_to_table}" e ON
e."id" = b."bill-id"
left join {schema}."invoices-1{suffix_to_table}" ii
on c."franchisee-invoice-id" = ii.id
left join {schema}."stores{suffix_to_table}" s
on a."store-id" = s.id
WHERE
b."returned-at" >='{analysis_start_time}' AND
b."returned-at" <= '{analysis_end_time}'
-- AND a."store-id" = 2
"""
order_source_query = """
select
pso."bill-id" as "zeno_bill_id",
"order-source"
from
{schema}."patients-store-orders{suffix_to_table}" pso
where
"order-source" = 'zeno'
and
pso."bill-id" is not null
group by
pso."bill-id",
"order-source"
"""
store_list_query = """
SELECT
"id" AS "store-id",
"name" AS "store-name",
"franchisee-id"
FROM
{schema}."stores{suffix_to_table}"
"""
inventory_query = """
select
a."store-id",
a."drug-id",
(a."quantity"+
a."locked-for-check" +
a."locked-for-audit" +
a."locked-for-return" +
a."locked-for-transfer") as "quantity",
a."ptr" AS "final-ptr",
a."expiry",
b."drug-name",
b."type",
b."category",
b."company",
a."created-at",
c."vat",
s."franchisee-id" ,
ii."franchisee-invoice"
from
{schema}."inventory-1{suffix_to_table}" a
left join {schema}."drugs{suffix_to_table}" b on
a."drug-id" = b."id"
left join {schema}."invoice-items-1{suffix_to_table}" c on
a."invoice-item-id" = c."id"
left join {schema}."invoices-1{suffix_to_table}" ii
on a."franchisee-invoice-id" = ii.id
left join {schema}."stores{suffix_to_table}" s
on a."store-id" = s.id
where
(a."quantity"> 0
OR a."locked-for-check" > 0
OR a."locked-for-audit" > 0
OR a."locked-for-return" > 0
OR a."locked-for-transfer" > 0 )
"""
customers_initial_bill_date = """
select
f."patient-id",
f."store-id",
min (f."created-at") as "created-at"
from
{schema}."bills-1{suffix_to_table}" f
group by
f."patient-id" ,
f."store-id"
"""
purchase_from_wc_query = """
select
i."franchisee-invoice-item-id",
b."franchisee-invoice-id",
b."invoice-id",
a."invoice-date",
a."created-at",
a."store-id",
b."drug-id",
c."type",
c."category",
c."company",
b."actual-quantity",
b."net-value" as "zp_received_net_value",
b."vat" as "zp_vat",
i."net-value" ,
i."actual-quantity" as "1_actual_quantity",
b."actual-quantity" as "2_actual_quantity",
i."vat" as "wc_vat",
s."franchisee-id" ,
a."franchisee-invoice" ,
case
when s."opened-at" is NULL then 'launch_stock'
when date(s."opened-at") = '0101-01-01' then 'launch_stock'
when (inv."invoice-date") < (s."opened-at") then 'launch_stock'
else 'normal'
end as "launch_flag"
from
{schema}."invoices-1{suffix_to_table}" a
join
{schema}."invoice-items-1{suffix_to_table}" b on
a."id" = b."franchisee-invoice-id"
left join
{schema}."drugs{suffix_to_table}" c on
c."id" = b."drug-id"
join
{schema}."invoice-items{suffix_to_table}" i on
i."id" = b."invoice-item-reference"
left join {schema}."invoices{suffix_to_table}" inv on
inv."id" = a."invoice-reference"
left join {schema}."stores{suffix_to_table}" s
on a."store-id" = s.id
where
a."invoice-date" >= '{analysis_start_time}'
and a."invoice-date"<= '{analysis_end_time}'
and a.status not in ('live' , 'inbox')
-- and a."franchisee-invoice" = 0
-- and a."store-id" = 2
"""
zippin_return_data_query = """
select
*
from
(
select
row_number() over(partition by r.id
order by
r.id desc) as "row",
date(d."dispatched-at") as "dispatch-date",
date(r."settled-at") as "settled-date",
e."name",
d."serial" as "debit-note",
d."credit-note-reference",
d."status",
d."store-id",
s."name" as "cost-centre",
e."gstn",
r."id" as "return-item-id",
r.taxable as "old-taxable-value",
r.net /( (1 + r.gst / 100)) as "taxable-value",
r."gst" as "tax-rate",
r."gst-amount" as "tax-value",
r."net" as "net-value",
e."id" as "distributor-id",
d.id as "debit-note-id",
y."drug-id",
p."type",
p."category",
p."company",
y."purchase-rate" ,
y."purchase-rate" * r."returned-quantity" as "cogs",
ii2."vat",
s."franchisee-id" ,
ii."franchisee-invoice"
from
{schema}."return-items-1{suffix_to_table}" r
left join {schema}."inventory-1{suffix_to_table}" y on
y."id" = r."inventory-id"
left join {schema}."drugs{suffix_to_table}" p on
p."id" = y."drug-id"
left join {schema}."debit-note-items-1{suffix_to_table}" dni
on
r.id = dni."item-id"
left join {schema}."debit-notes-1{suffix_to_table}" d on
dni."debit-note-id" = d."id"
join {schema}."distributors{suffix_to_table}" e on
d."dist-id" = e."id"
join {schema}."stores{suffix_to_table}" s on
d."store-id" = s."id"
left join {schema}."invoices-1{suffix_to_table}" ii
on
y."franchisee-invoice-id" = ii.id
left join {schema}."invoice-items-1{suffix_to_table}" ii2
on
y."invoice-item-id" = ii2.id
where
r.status = 'settled'
and d."is-internal-debit-note" = 0
and dni."is-active" = 1
and r."settled-at" >= '{analysis_start_time}'
and r."settled-at" <= '{analysis_end_time}'
and d."dist-id" != 64)a
where a."row" = 1
"""
zippin_return_data_query_revised_1 = """
select
date(d."dispatched-at") as "dispatch-date",
date(r."settled-at") as "settled-date",
e."name",
d."serial" as "debit-note",
d."credit-note-reference",
d."status",
d."store-id",
s."name" as "cost-centre",
e."gstn",
r."id" as "return-item-id",
r.taxable as "old-taxable-value",
r.net/( (1 + r.gst / 100)) as "taxable-value",
r."gst" as "tax-rate",
r."gst-amount" as "tax-value",
r."net" as "net-value",
e."id" as "distributor-id",
d.id as "debit-note-id",
y."drug-id",
p."type",
p."category",
p."company",
y."purchase-rate" ,
y."purchase-rate" * r."returned-quantity" as "cogs",
ii2."vat",
s."franchisee-id" ,
ii."franchisee-invoice"
from
{schema}."return-items-1{suffix_to_table}" r
left join {schema}."inventory-1{suffix_to_table}" y on
y."id" = r."inventory-id"
left join {schema}."drugs{suffix_to_table}" p on
p."id" = y."drug-id"
left join {schema}."debit-notes-1{suffix_to_table}" d on
r."debit-note-reference" = d."id"
join {schema}."distributors{suffix_to_table}" e on
d."dist-id" = e."id"
join {schema}."stores{suffix_to_table}" s on
d."store-id" = s."id"
left join {schema}."invoices-1{suffix_to_table}" ii
on y."franchisee-invoice-id" = ii.id
left join {schema}."invoice-items-1{suffix_to_table}" ii2
on
y."invoice-item-id" = ii2.id
where
r.status = 'settled'
and r."settled-at" >='{analysis_start_time}'
and r."settled-at" <= '{analysis_end_time}'
and d."dist-id" != 64
"""
mysql_old_db_zippin_return_data_query = """
select
date(d.`dispatched-at`) as `dispatch-date`,
date(r.`settled-at`) as `settled-date`,
e.`name`,
d.`serial` as `debit-note`,
d.`credit-note-reference`,
d.`status`,
d.`store-id`,
s.`name` as `cost-centre`,
e.`gstn`,
r.`id` as `return-item-id`,
r.taxable as `old-taxable-value`,
r.net/( (1 + r.gst / 100)) as `taxable-value`,
r.`gst` as `tax-rate`,
r.`gst-amount` as `tax-value`,
r.`net` as `net-value`,
e.`id` as `distributor-id`,
d.id as `debit-note-id`,
y.`drug-id`,
p.`type`,
p.`category`,
p.`company`,
y.`purchase-rate` ,
y.`purchase-rate` * r.`returned-quantity` as `cogs`,
ii2.`vat`,
s.`franchisee-id` ,
ii.`franchisee-invoice`
from
`return-items-1` r
left join `inventory-1` y on
y.`id` = r.`inventory-id`
left join `drugs` p on
p.`id` = y.`drug-id`
left join `debit-notes-1` d on
r.`debit-note-reference` = d.`id`
join `distributors` e on
d.`dist-id` = e.`id`
join `stores` s on
d.`store-id` = s.`id`
left join `invoices-1` ii
on y.`franchisee-invoice-id` = ii.id
left join `invoice-items-1` ii2
on
y.`invoice-item-id` = ii2.id
where
r.status = 'settled'
and r.`settled-at` >='{analysis_start_time}'
and r.`settled-at` <= '{analysis_end_time}'
and d.`dist-id` != 64
"""
old_donotuse_zippin_return_data_query = """
select
date(d."dispatched-at") as "dispatch-date",
e."name",
d."serial" as "debit-note",
d."credit-note-reference",
d."status",
d."store-id",
s."name" as "cost-centre",
e."gstn",
r."id" as "return-item-id",
r."taxable" as "taxable-value",
r."gst" as "tax-rate",
r."gst-amount" as "tax-value",
r."net" as "net-value",
e."id" as "distributor-id",
d.id as "debit-note-id",
y."drug-id",
p."type",
p."category",
p."company",
y."purchase-rate" ,
y."purchase-rate" * r."returned-quantity" as "cogs",
ii2."vat",
s."franchisee-id" ,
ii."franchisee-invoice"
from
{schema}."return-items-1{suffix_to_table}" r
left join {schema}."inventory-1{suffix_to_table}" y on
y."id" = r."inventory-id"
left join {schema}."drugs{suffix_to_table}" p on
p."id" = y."drug-id"
join {schema}."debit-notes-1{suffix_to_table}" d on
r."debit-note-reference" = d."id"
join {schema}."distributors{suffix_to_table}" e on
d."dist-id" = e."id"
join {schema}."stores{suffix_to_table}" s on
d."store-id" = s."id"
left join {schema}."invoices-1{suffix_to_table}" ii
on y."franchisee-invoice-id" = ii.id
left join {schema}."invoice-items-1{suffix_to_table}" ii2
on
y."invoice-item-id" = ii2.id
where
d."dispatched-at">='{analysis_start_time}'
and d."dispatched-at"<= '{analysis_end_time}'
and d."dist-id" != 64
"""
workcell_return_data_query = """
select
date(d."dispatched-at") as "dispatch-date",
d."created-at" as "created-date",
e."name",
d."serial" as "debit-note",
d."credit-note-reference",
d."status",
d."store-id",
s."name" as "cost-centre",
e."gstn",
r."id" as "return-item-id",
r."taxable" as "taxable-value",
r."gst" as "tax-rate",
r."gst-amount" as "tax-value",
r."net" as "net-value",
y."drug-id",
o."type",
o."category",
o."company",
y."purchase-rate" ,
y."purchase-rate" * r."returned-quantity" as "cogs",
ii2."vat",
s."franchisee-id" ,
ii1."franchisee-invoice"
from
{schema}."return-items{suffix_to_table}" as r
left join {schema}."inventory{suffix_to_table}" y on
r."inventory-id"= y."id"
left join {schema}."drugs{suffix_to_table}" o on
o."id" = y."drug-id"
join {schema}."debit-notes{suffix_to_table}" as d on
r."debit-note-reference" = d."id"
join {schema}."distributors{suffix_to_table}" as e on
d."dist-id" = e."id"
join {schema}."stores{suffix_to_table}" as s on
d."store-id" = s."id"
left join {schema}."invoices{suffix_to_table}" ii
on y."invoice-id" = ii.id
left join {schema}."invoices-1{suffix_to_table}" ii1
on ii1."invoice-reference" = ii.id
left join {schema}."invoice-items{suffix_to_table}" ii2
on
y."invoice-item-id" = ii2.id
where
d."dispatched-at">='{analysis_start_time}'
and d."dispatched-at"<='{analysis_end_time}'
"""
local_purchase_data_query = """
select
a."id" as "inventory-id",
"invoice-reference",
b."franchisee-invoice-id",
c."distributor-id",
x."name",
a."store-id",
s."name" as "store-name",
a."drug-id",
d."drug-name",
d."type",
d."category",
d."company",
"vat",
b."actual-quantity",
b."net-value",
a."ptr",
a."purchase-rate",
a."created-at",
c."dispatch-status",
s."franchisee-id" ,
c."franchisee-invoice"
from
{schema}."inventory-1{suffix_to_table}" a
join
{schema}."invoice-items-1{suffix_to_table}" b on
a."invoice-item-id" = b."id"
left join
{schema}."invoices-1{suffix_to_table}" c on
a."franchisee-invoice-id" = c."id"
left join
{schema}."drugs{suffix_to_table}" d on
a."drug-id" = d."id"
left join
{schema}."stores{suffix_to_table}" s on
s."id" = a."store-id"
left join
{schema}."distributors{suffix_to_table}" x on
x."id" = c."distributor-id"
where
((s."franchisee-id" = 1 and "invoice-reference" is null)
or (s."franchisee-id" != 1 and c."distributor-id" = 76 ))
and c."invoice-date" >= '{analysis_start_time}'
and c."invoice-date" <= '{analysis_end_time}'
"""
# Note - Local Purchase data was based on inventory -> created-at, till Nov 2022, changed to invoice-date in dec 2022
generic_composition_count_query = """
select
count(distinct t."composition") as "count"
from
(
select
"id",
"drug-name",
"type",
"composition"
from
{schema}."drugs{suffix_to_table}"
where
"type" = 'generic'
and "composition" != ''
) t
"""
ethical_margin_query = """
select
sum(a."actual-quantity" * a."mrp") as "value1",
sum(a."net-value") as "net-value"
from
{schema}."invoice-items{suffix_to_table}" a
join {schema}."invoices{suffix_to_table}" b on
a."invoice-id" = b."id"
join {schema}."distributors{suffix_to_table}" c on
c."id" = b."distributor-id"
join {schema}."drugs{suffix_to_table}" d on
d."id" = a."drug-id"
where
c."credit-period">0
and d."type" = 'ethical'
and a."created-at" >= '{analysis_start_time}'
and a."created-at" <= '{analysis_end_time}'
group by
date_part(year, a."created-at"),
date_part (month,a."created-at")
"""
ethical_margin_fofo_query = """
select
sum(a."actual-quantity" * a."mrp") as "value1",
sum(a."net-value") as "net-value"
from
{schema}."invoice-items{suffix_to_table}" a
join {schema}."invoices{suffix_to_table}" b on
a."invoice-id" = b."id"
left join {schema}."invoices-1{suffix_to_table}" ii on
ii."invoice-reference" = b."id"
join {schema}."distributors{suffix_to_table}" c on
c."id" = b."distributor-id"
join {schema}."drugs{suffix_to_table}" d on
d."id" = a."drug-id"
left join {schema}."stores{suffix_to_table}" s on
s."id" = b."store-id"
where
c."credit-period">0
and d."type" = 'ethical'
and a."created-at" >= '{analysis_start_time}'
and a."created-at" <= '{analysis_end_time}'
and s."franchisee-id" != 1
and ii."franchisee-invoice" {equality_symbol} 0
group by
date_part(year, a."created-at"),
date_part (month,a."created-at")
"""
home_delivery_data_query = """
select
pso."order-number",
pso.id as "patient-store-order-id",
pso."patient-request-id",
pso."zeno-order-id" ,
pso."patient-id" ,
pso."order-source" as "order-source-pso" ,
pso."order-type" ,
pso."status" as "pso-status",
pso."created-at" as "pso-created-at",
pso."store-id" ,
s."name" as "store-name",
s."franchisee-id",
pso."drug-id" ,
pso."drug-name" ,
pso."requested-quantity",
pso."inventory-quantity" as "inventory-at-creation",
pr."required-quantity",
pr."quantity-to-order",
pso."bill-id",
b."created-at" as "bill-date",
dt."delivered-at",
ss."type" as "slot-type"
from
{schema}."patients-store-orders{suffix_to_table}" pso
left join {schema}."patient-requests{suffix_to_table}" pr on
pso."patient-request-id" = pr.id
join {schema}."stores{suffix_to_table}" s on
s."id" = pso."store-id"
left join {schema}."bills-1{suffix_to_table}" b on
b."id" = pso."bill-id"
left join {schema}."delivery-tracking{suffix_to_table}" dt
on
dt."patient-store-order-id" = pso."id"
left join {schema}."store-slots{suffix_to_table}" ss
on
pso."slot-id" = ss.id
where
dt."delivered-at" >= '{analysis_start_time}'
and dt."delivered-at"<= '{analysis_end_time}'
and pso."order-type" = 'delivery'
and pso."bill-id" is not null
order by
pso."created-at" desc
"""
delivery_bill_ids_query = """
select
"bill-id"
from
{schema}."patients-store-orders{suffix_to_table}" pso
where
pso."order-type" = 'delivery'
group by
"bill-id"
"""
cumulative_consumers_data_query = """
select
f."store-id" ,
count(distinct "patient-id") as "total-cons",
count(distinct case
when c.company != 'GOODAID' and c."type" in ('generic', 'high-value-generic') then "patient-id"
end) as "generic-without-gaid-cons",
count(distinct case
when c."type" in ('generic', 'high-value-generic') then "patient-id"
end) as "generic-cons",
count(distinct case
when c.company in ('GOODAID') then "patient-id"
end) as "total-gaid-cons",
count(distinct case
when c.category in ('chronic') then "patient-id"
end) as "total-chronic-cons"
from
{schema}."bills-1{suffix_to_table}" f
join {schema}."bill-items-1{suffix_to_table}" a on
f."id" = a."bill-id"
left join {schema}."inventory-1{suffix_to_table}" b on
a."inventory-id" = b."id"
left join {schema}."drugs{suffix_to_table}" c on
c."id" = b."drug-id"
-- where
-- f."store-id" = 2
group by
f."store-id"
"""
cumulative_consumers_fofo_data_query = """
select
f."store-id" ,
count(distinct "patient-id") as "total-cons",
count(distinct case
when c.company != 'GOODAID' and c."type" in ('generic', 'high-value-generic') then "patient-id"
end) as "generic-without-gaid-cons",
count(distinct case
when c."type" in ('generic', 'high-value-generic') then "patient-id"
end) as "generic-cons",
count(distinct case
when c.company in ('GOODAID') then "patient-id"
end) as "total-gaid-cons",
count(distinct case
when c.category in ('chronic') then "patient-id"
end) as "total-chronic-cons"
from
{schema}."bills-1{suffix_to_table}" f
join {schema}."bill-items-1{suffix_to_table}" a on
f."id" = a."bill-id"
left join {schema}."inventory-1{suffix_to_table}" b on
a."inventory-id" = b."id"
left join {schema}."drugs{suffix_to_table}" c on
c."id" = b."drug-id"
left join {schema}."invoices-1{suffix_to_table}" ii on
b."franchisee-invoice-id" = ii.id
left join {schema}."stores{suffix_to_table}" s on
s."id" = f."store-id"
where
s."franchisee-id" != 1
and ii."franchisee-invoice" {equality_symbol} 0
group by
f."store-id"
"""
other_files_ethical_margin_query = """
select
date_part (year,
a."created-at") as "year",
date_part(month, a."created-at") as "month",
sum(a."actual-quantity" * a."mrp") as "actual-quantity * mrp",
sum(a."net-value") as "net-value"
from
{schema}."invoice-items{suffix_to_table}" a
join {schema}."invoices{suffix_to_table}" b on
a."invoice-id" = b."id"
join {schema}."distributors{suffix_to_table}" c on
c."id" = b."distributor-id"
join {schema}."drugs{suffix_to_table}" d on
d."id" = a."drug-id"
where
c."credit-period">0
and d."type" = 'ethical'
and date(a."created-at") >= '2021-08-01'
group by
date_part(year, a."created-at"),
date_part(month, a."created-at")
"""
other_files_distributor_margin_query = """
select
date_part(year,a."created-at") as "year",
date_part(month,a."created-at") as "month",
c."name",
sum(a."actual-quantity"* a."mrp") as "actual-quantity * mrp",
sum(a."net-value") as "net-value"
from
{schema}."invoice-items{suffix_to_table}" a
join {schema}."invoices{suffix_to_table}" b on
a."invoice-id"= b."id"
join {schema}."distributors{suffix_to_table}" c on
c."id"= b."distributor-id"
join {schema}."drugs{suffix_to_table}" d on
d."id"= a."drug-id"
where
c."credit-period">0
and date_part (year ,a."created-at") = {choose_year}
and date_part (month ,a."created-at") = {choose_month}
group by
date_part (year,a."created-at"),
date_part (month,a."created-at"),
c."name"
"""
other_files_inventory_at_dc_near_expiry_data_query = """
select
a."invoice-number",
b."invoice-id",
b."vat",
a."invoice-date",
a."store-id",
b."drug-id",
a."net-payable",
b."net-value",
case
when b."actual-quantity" = 0 then 0
else
(b."net-value"*1.0 / b."actual-quantity"*1.0)
end as "final-ptr",
b."actual-quantity",
a."created-at",
a."received-at",
"status",
"dispatch-status",
c."type",
c."category",
b."expiry"
from
{schema}."invoices{suffix_to_table}" a
join {schema}."invoice-items{suffix_to_table}" b on
a."id" = b."invoice-id"
join {schema}."drugs{suffix_to_table}" c on
b."drug-id" = c."id"
where
"status" = 'approved'
and "dispatch-status" in ('dispatch-status-na')
"""
goodaid_store_sales_query = """
select
date_part (year,
b."created-AT") as "YEAR",
date_part (month,
b."created-AT") as "MONTH",
b."store-id",
max(s."name") as "store-name",
SUM(c."mrp" * a."quantity") as "gross_mrp",
SUM((c."mrp" * a."quantity") /
(1 + ((a."cgst-rate" + a."sgst-rate")/ 100))) as "gross_mrp_taxable",
SUM(a."rate" * a."quantity") as "gross_revenue",
SUM((a."rate" * a."quantity") /
(1 + ((a."cgst-rate" + a."sgst-rate")/ 100))) as "gross_revenue_taxable",
SUM(c."purchase-rate" * a."quantity") as "gross_cogs",
SUM((c."purchase-rate" * a."quantity") /
(1 + ((a."cgst-rate" + a."sgst-rate")/ 100))) as "gross_cogs_taxable",
sum(a."quantity") as "gross_quantity"
from
{schema}."bill-items-1{suffix_to_table}" a
left join {schema}."bills-1{suffix_to_table}" b on
b."id" = a."bill-id"
left join {schema}."inventory-1{suffix_to_table}" c on
c."id" = a."inventory-id"
left join {schema}."stores{suffix_to_table}" s on
s."id" = b."store-id"
left join {schema}."drugs{suffix_to_table}" d on
d."id" = c."drug-id"
where
date_part (year,
a."created-AT") = {choose_year}
and
date_part (month,
a."created-AT") ={choose_month}
and
d."company" = 'GOODAID'
group by
date_part(year, b."created-AT"),
date_part(month, b."created-AT"),
b."store-id"
"""
goodaid_store_returns_query = """
select
date_part (year,b."returned-at") as "year",
date_part (month,b."returned-at") as "month",
b."store-id",
max(s."name") as "store-name",
(SUM(c."mrp" * a."returned-quantity") * -1) as "returns_mrp",
(SUM((c."mrp" * a."returned-quantity") /
(1 + ((a."cgst-rate" + a."sgst-rate")/ 100))) * -1) as "returns_mrp_taxable",
(SUM(a."rate" * a."returned-quantity") * - 1) as "returns",
(SUM((a."rate" * a."returned-quantity") /
(1 + ((a."cgst-rate" + a."sgst-rate")/ 100))) * -1) as "returns_taxable",
(SUM(c."purchase-rate" * a."returned-quantity") * -1) as "returns_cogs",
(SUM((c."purchase-rate" * a."returned-quantity") /
(1 + ((a."cgst-rate" + a."sgst-rate")/ 100))) * -1) as "returns_cogs_taxable",
(sum(a."returned-quantity") * -1) as "returned_quantity"
from
{schema}."customer-return-items-1{suffix_to_table}" a
left join {schema}."customer-returns-1{suffix_to_table}" b on
b."id" = a."return-id"
left join {schema}."inventory-1{suffix_to_table}" c on
c."id" = a."inventory-id"
left join {schema}."stores{suffix_to_table}" s on
s."id" = b."store-id"
left join {schema}."drugs{suffix_to_table}" d on
d."id" = c."drug-id"
where
date_part(year,a."returned-at") = {choose_year}
and
date_part(month,a."returned-at") = {choose_month}
and
d."company" = 'GOODAID'
group by
date_part(year,b."returned-at"),
date_part(month,b."returned-at"),
b."store-id"
"""
goodaid_zippin_inventory_query = """
select
a."store-id",
s."name" as "store-name",
a."drug-id",
b."drug-name",
b."type",
b."category",
a."expiry",
c."vat",
((a."quantity"+ a."locked-for-check" + a."locked-for-audit" +
a."locked-for-return" + a."locked-for-transfer")) as "quantity",
((a."quantity"+ a."locked-for-check" + a."locked-for-audit" +
a."locked-for-return" + a."locked-for-transfer") * a."ptr") as "value",
a."ptr",
a."created-at"
from
{schema}."inventory-1{suffix_to_table}" a
join {schema}."invoice-items-1{suffix_to_table}" c on
a."invoice-item-id" = c."id"
left join {schema}."drugs{suffix_to_table}" b on
a."drug-id" = b."id"
left join {schema}."stores{suffix_to_table}" s on
s."id" = a."store-id"
where
(a."quantity"> 0
or
a."locked-for-check" > 0
or
a."locked-for-audit" > 0
or
a."locked-for-return" > 0
or
a."locked-for-transfer" > 0)
and b."company" = 'GOODAID'
"""
goodaid_dc_inventory_query = """
select
m."dc-id" as store_id,
(
select
"name"
from
{schema}."stores{suffix_to_table}"
where
id = m."dc-id"
limit 1) as store_name,
vat,
sum("post_tax") post_tax,
sum(taxable_amount) as taxable_amount
from
(
select
a."store-id",
(
select
"forward-dc-id"
from
{schema}."store-dc-mapping{suffix_to_table}"
where
max(dgs.type) = "drug-type"
and "store-id" = a."store-id"
limit 1) as "dc-id",
round(sum( coalesce (a."locked-quantity" * "purchase-rate", 0) )) "post_tax",
b.vat,
round(sum(coalesce (a."locked-quantity" * a."purchase-rate", 0) / (1 + b.vat / 100) )) as "taxable_amount"
from
{schema}."inventory{suffix_to_table}" a
join {schema}."invoice-items{suffix_to_table}" b on
a."invoice-item-id" = b.id
join {schema}."drugs{suffix_to_table}" dgs on
dgs.id = a."drug-id"
where
"dgs"."company" = 'GOODAID'
group by
a."store-id",
b.vat
union
select
a."store-id",
(
select
"forward-dc-id"
from
{schema}."store-dc-mapping{suffix_to_table}"
where
max(dgs."type") = "drug-type"
and "store-id" = a."store-id"
limit 1) as "dc-id",
round(sum( coalesce (a."locked-quantity" * "purchase-rate", 0) )) "post_tax",
b.vat,
round(sum( coalesce (a."locked-quantity" * a."purchase-rate", 0) / (1 + b.vat / 100) )) as "taxable_amount"
from
{schema}."inventory-1{suffix_to_table}" a
join {schema}."invoice-items-1{suffix_to_table}" b on
a."invoice-item-id" = b.id
join {schema}."drugs{suffix_to_table}" dgs on
dgs.id = a."drug-id"
where
"dgs"."company" = 'GOODAID'
group by
a."store-id",
b.vat
union
select
b."store-id",
(
select
"return-dc-id"
from
{schema}."store-dc-mapping{suffix_to_table}"
where
max(dgs.type) = "drug-type"
and "store-id" = b."store-id"
limit 1) as "dc-id",
round(sum( coalesce (a."returned-quantity" * c."purchase-rate", 0))) "post_tax",
d.vat,
round(sum( coalesce (a."returned-quantity" * c."purchase-rate", 0) / (1 + d.vat / 100) )) as "taxable_amount"
from
{schema}."return-items{suffix_to_table}" a
join {schema}."returns-to-dc{suffix_to_table}" b on
a."return-id" = b.id
join {schema}."inventory{suffix_to_table}" c on
a."inventory-id" = c.id
join {schema}."drugs{suffix_to_table}" dgs on
dgs.id = c."drug-id"
join {schema}."invoice-items{suffix_to_table}" d on
c."invoice-item-id" = d.id
where
a."status" in ('saved', 'approved')
and "dgs"."company" = 'GOODAID'
group by
b."store-id",
d.vat
) m
group by
"dc-id",
vat;
"""
goodaid_wh_inventory_query = """
select
"drug-id",
"drug-name",
sum("balance-quantity") as wh_qty,
sum("balance-value") as wh_value
from
"prod2-generico"."wh-inventory-ss" wis
where
date("created-at") = '{date}'
group by
"drug-id",
"drug-name"
"""
goodaid_drugs_query = """
select
"id" as "drug_id",
"company"
from
{schema}."drugs{suffix_to_table}"
where
"company" = 'GOODAID'
"""
store_info_query = """
select
sm.id as "store-id",
sm."name" as "store-name",
case
when sm."franchisee-id" = 1 then 'COCO'
else 'FOFO'
end as "franchise-flag",
zc."name" as "city-name",
sg."name" as "store-group-name",
zis."name" as "state-name"
from
{schema}."stores{suffix_to_table}" sm
left join "prod2-generico"."zeno-city" zc
on
sm."city-id" = zc.id
left join "prod2-generico"."store-groups" sg
on
sm."store-group-id" = sg.id
left join "prod2-generico"."zeno-indian-states" zis
on
zc."indian-state-id" = zis.id
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/mis/mis_queries.py | mis_queries.py |
import decimal
import os
# from zeno_etl_libs.queries.mis import mis_queries
#
# from zeno_etl_libs.helper.aws.s3 import S3
# from zeno_etl_libs.db.db import DB
# from zeno_etl_libs.helper import helper
import json
import datetime
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
class Mis:
def __init__(self,analysis_start_time,analysis_end_time,suffix_to_table,schema_to_select,choose_year,choose_month,rs_db=None,logger=None,mis_queries=None):
self.analysis_start_time = analysis_start_time
self.analysis_end_time = analysis_end_time
self.suffix_to_table = suffix_to_table
self.schema_to_select = schema_to_select
self.choose_year = choose_year
self.choose_month = choose_month
self.rs_db = rs_db
self.logger = logger
self.logger.info('You have instantiated Mis class')
self.mis_queries = mis_queries
def sales(self):
sales_query = self.mis_queries.sales_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time, analysis_end_time=self.analysis_end_time)
df = self.rs_db.get_df(sales_query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def customer_returns(self):
customer_returns_query = self.mis_queries.customer_returns_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table, analysis_start_time=self.analysis_start_time,
analysis_end_time=self.analysis_end_time)
df = self.rs_db.get_df(customer_returns_query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def order_source(self):
order_source_query = self.mis_queries.order_source_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table, analysis_start_time=self.analysis_start_time,
analysis_end_time=self.analysis_end_time)
order_source = self.rs_db.get_df(order_source_query)
order_source.columns = [c.replace('-', '_') for c in order_source.columns]
return order_source
def store_list(self):
store_list_query = self.mis_queries.store_list_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
store_list = self.rs_db.get_df(store_list_query)
store_list.columns = [c.replace('-', '_') for c in store_list.columns]
return store_list
def inventory(self):
inventory_query = self.mis_queries.inventory_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
inventory = self.rs_db.get_df(inventory_query)
inventory.columns = [c.replace('-', '_') for c in inventory.columns]
return inventory
def cumulative_consumers_data(self):
cumulative_consumers_data_query = self.mis_queries.cumulative_consumers_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
cumulative_consumers_data = self.rs_db.get_df(cumulative_consumers_data_query)
cumulative_consumers_data.columns = [c.replace('-', '_') for c in cumulative_consumers_data.columns]
return cumulative_consumers_data
def cumulative_consumers_fofo_data(self):
workcell_cumulative_consumers_fofo_data_query = self.mis_queries.cumulative_consumers_fofo_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,equality_symbol='=')
workcell_cumulative_consumers_fofo_data = self.rs_db.get_df(workcell_cumulative_consumers_fofo_data_query)
workcell_cumulative_consumers_fofo_data.columns = [c.replace('-', '_') for c in workcell_cumulative_consumers_fofo_data.columns]
others_cumulative_consumers_fofo_data_query = self.mis_queries.cumulative_consumers_fofo_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,equality_symbol='!=')
others_cumulative_consumers_fofo_data = self.rs_db.get_df(others_cumulative_consumers_fofo_data_query)
others_cumulative_consumers_fofo_data.columns = [c.replace('-', '_') for c in others_cumulative_consumers_fofo_data.columns]
return workcell_cumulative_consumers_fofo_data,others_cumulative_consumers_fofo_data
def purchase_from_wc_data(self):
purchase_from_wc_query = self.mis_queries.purchase_from_wc_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time, analysis_end_time=self.analysis_end_time)
df = self.rs_db.get_df(purchase_from_wc_query)
df.columns = [c.replace('-', '_') for c in df.columns]
df['wc_purchase_net_value'] = (df['net_value'] / df['1_actual_quantity']) * df['2_actual_quantity']
return df
def zippin_return_data(self):
zippin_return_data_query = self.mis_queries.zippin_return_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time, analysis_end_time=self.analysis_end_time)
df = self.rs_db.get_df(zippin_return_data_query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def workcell_return_data(self):
workcell_return_data_query = self.mis_queries.workcell_return_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time, analysis_end_time=self.analysis_end_time)
df = self.rs_db.get_df(workcell_return_data_query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def cons_initial_bill_date(self,):
customers_initial_bill_date_query = self.mis_queries.customers_initial_bill_date.format(schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table)
customers_initial_bill_date = self.rs_db.get_df(customers_initial_bill_date_query)
customers_initial_bill_date.columns = [c.replace('-', '_') for c in customers_initial_bill_date.columns]
return customers_initial_bill_date
def local_purchase_data(self):
local_purchase_data_query = self.mis_queries.local_purchase_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table ,
analysis_start_time=self.analysis_start_time, analysis_end_time=self.analysis_end_time)
local_purchase_data = self.rs_db.get_df(local_purchase_data_query )
local_purchase_data.columns = [c.replace('-', '_') for c in local_purchase_data.columns]
return local_purchase_data
def home_delivery_data(self):
home_delivery_data_query = self.mis_queries.home_delivery_data_query.format(schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time,
analysis_end_time=self.analysis_end_time)
df = self.rs_db.get_df(home_delivery_data_query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def delivery_bill_ids(self):
delivery_bill_ids_query = self.mis_queries.delivery_bill_ids_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
delivery_bill_ids = self.rs_db.get_df(delivery_bill_ids_query)
delivery_bill_ids.columns = [c.replace('-', '_') for c in delivery_bill_ids.columns]
return delivery_bill_ids
def order_type_tag(self,company, type_, tag):
if tag == 'breakup':
if company == 'GOODAID':
return 'GOODAID'
# elif type_ in ('ethical'):
# return 'ethical'
# elif type_ in ('generic'):
# return 'generic'
# else:
# return 'others'
elif type_ in ('ethical', 'high-value-ethical'):
return 'ethical'
elif type_ in ('generic', 'high-value-generic'):
return 'generic'
else:
return 'others'
elif tag == 'unified':
# if type_ in ('ethical'):
# return 'ethical'
# elif type_ in ('generic'):
# return 'generic'
# else:
# return 'others'
if type_ in ('ethical', 'high-value-ethical'):
return 'ethical'
elif type_ in ('generic', 'high-value-generic'):
return 'generic'
else:
return 'others'
else:
self.logger.info('please provide valid tag')
def taxable_value(self,quantity,rate,cgst,sgst,igst):
# igst = 0
return quantity*rate/(1 + ((cgst+sgst+igst)/100))
def taxable_value_vat_based(self,quantity,rate,vat):
quantity = float(quantity)
rate = float(rate)
vat = float(vat)
taxable = (quantity*rate)/(1 + ((vat)/100))
taxable = float(taxable)
return taxable
def taxable_value_vat_based_2(self,value,vat):
value = float(value)
vat = float(vat)
taxable = (value)/(1 + ((vat)/100))
taxable = float(taxable)
return taxable
def fofo_final_distributor(self,franchisee_id,franchisee_invoice):
if franchisee_id ==1:
if franchisee_invoice == 0 or franchisee_invoice is None:
return 'workcell'
else:
return 'other'
if franchisee_id != 1:
if franchisee_invoice == 0:
return 'workcell'
else:
return 'other'
def fofo_distributor_bifurcation(self,df):
columns = [x for x in df.columns if x not in ['tag_flag', 'fofo_distributor', 'order_source','type1','category']]
workcell = df.loc[df['fofo_distributor'] == 'workcell'][columns].reset_index()
other = df.loc[df['fofo_distributor'] == 'other'][columns].reset_index()
combined = df.loc[df['fofo_distributor'] == 'combined'][columns].reset_index()
if other.empty:
other.loc[0] = 0
if workcell.empty:
workcell.loc[0] = 0
if combined.empty:
combined.loc[0] = 0
both = workcell + other - combined
both['fofo_distributor'] = 'both'
both = both.loc[both['fofo_distributor'] == 'both'].reset_index()
only_workcell = workcell - both
only_workcell['fofo_distributor'] = 'only_workcell'
only_other = other - both
only_other['fofo_distributor'] = 'only_other'
fofo_distributor_bifurcation_ = pd.concat([only_workcell, only_other, both], sort=True)
fofo_distributor_bifurcation_.replace(0, np.nan, inplace=True)
return fofo_distributor_bifurcation_
def fofo_distributor_bifurcation_next_calculation_steps(self,df, df_fofo, groupbylist):
df['fofo_distributor'] = 'combined'
df_1 = df[df_fofo.columns]
df_fofo = pd.concat([df_fofo, df_1], sort=True)
df_fofo.fillna(0, inplace=True)
order = df_fofo['tag_flag'].drop_duplicates(keep='first').to_frame()
df_fofo = df_fofo.reset_index(drop=True)
df_fofo = df_fofo.groupby(groupbylist).apply(lambda x: self.fofo_distributor_bifurcation(x)).reset_index()[
[x for x in df_fofo.columns if x not in ['level_1', 'level_2', 'index']]]
df_fofo = order.merge(df_fofo, on='tag_flag', how='left')
return df_fofo
def gmv_gross_payment(self, Gross, stores, fofo_tag = 'no'):
gross = Gross.copy(deep = True)
gross['GMV_sale'] = gross['quantity'] * gross['mrp']
gross['gross_sale'] = gross['quantity'] * gross['rate']
if fofo_tag=='no':
gross_sale_summary = gross.groupby(['store_id', 'type1', 'category',
'payment_method', 'order_source'],
as_index=False).agg({
'quantity': ['sum'],
'GMV_sale': ['sum'],
'gross_sale': ['sum']
}).reset_index(drop=True)
gross_sale_summary.columns = ["_".join(x) for x in gross_sale_summary.columns.ravel()]
gross_sale_summary.fillna(0, inplace=True)
gross_sale = pd.merge(left=gross_sale_summary, right=stores,
how='left',
left_on=['store_id_'],
right_on=['store_id'])
gross_sale.rename(columns={'type1_': 'type1',
'category_': 'category',
'payment_method_': 'payment_method',
'order_source_': 'order_source',
'quantity_sum': 'quantity',
'GMV_sale_sum': 'GMV_sales',
'gross_sale_sum': 'gross_sales'}, inplace=True)
gross_sale[['GMV_sales', 'gross_sales']] = gross_sale[['GMV_sales', 'gross_sales']].astype(float)
gross_sale.fillna(0, inplace=True)
# #GMV
df_gross_returns2a = gross_sale.groupby(['store_id', 'store_name',
'type1', 'order_source'])[['GMV_sales']].sum().reset_index()
df_gross_returns2 = pd.pivot_table(df_gross_returns2a,
values='GMV_sales',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
df_gross_returns2['tag_flag'] = 'gmv'
# GROSS
df_gross_returns3a = gross_sale.groupby(['store_id', 'store_name',
'type1', 'order_source'])[['gross_sales']].sum().reset_index()
df_gross_returns3 = pd.pivot_table(df_gross_returns3a,
values='gross_sales',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
df_gross_returns3['tag_flag'] = 'gross'
# Payment
gross_sale['payment'] = np.where(gross_sale['payment_method'].isin(['cash', 'card']),
gross_sale['payment_method'], 'upi')
df_gross_returns4a = gross_sale.groupby(['store_id', 'store_name',
'payment', 'order_source'])[['gross_sales']].sum().reset_index()
df_gross_returns4 = pd.pivot_table(df_gross_returns4a,
values='gross_sales',
index=['payment', 'order_source'],
columns=['store_name']).reset_index()
df_gross_returns4['tag_flag'] = 'payment'
gmv_gross_payment = pd.concat([df_gross_returns2,
df_gross_returns3,
df_gross_returns4], sort=True)
cols_to_move = ['tag_flag', 'type1', 'payment', 'order_source']
gmv_gross_payment = gmv_gross_payment[cols_to_move +
[col for col in gmv_gross_payment.columns
if col not in cols_to_move]]
return gmv_gross_payment
elif fofo_tag == 'yes':
gross = gross[gross['franchisee_id']!=1]
gross_sale_summary = gross.groupby(['store_id', 'type1', 'category',
'payment_method', 'order_source','fofo_distributor'],
as_index=False).agg({
'quantity': ['sum'],
'GMV_sale': ['sum'],
'gross_sale': ['sum']
}).reset_index(drop=True)
gross_sale_summary.columns = ["_".join(x) for x in gross_sale_summary.columns.ravel()]
gross_sale_summary.fillna(0, inplace=True)
gross_sale = pd.merge(left=gross_sale_summary, right=stores,
how='left',
left_on=['store_id_'],
right_on=['store_id'])
gross_sale.rename(columns={'type1_': 'type1',
'category_': 'category',
'payment_method_': 'payment_method',
'order_source_': 'order_source',
'fofo_distributor_':'fofo_distributor',
'quantity_sum': 'quantity',
'GMV_sale_sum': 'GMV_sales',
'gross_sale_sum': 'gross_sales'}, inplace=True)
gross_sale[['GMV_sales', 'gross_sales']] = gross_sale[['GMV_sales', 'gross_sales']].astype(float)
gross_sale.fillna(0, inplace=True)
# #GMV
df_gross_returns2a = gross_sale.groupby(['store_id', 'store_name',
'type1', 'order_source','fofo_distributor'])[['GMV_sales']].sum().reset_index()
df_gross_returns2 = pd.pivot_table(df_gross_returns2a,
values='GMV_sales',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
df_gross_returns2['tag_flag'] = 'gmv'
# GROSS
df_gross_returns3a = gross_sale.groupby(['store_id', 'store_name',
'type1', 'order_source','fofo_distributor'])[['gross_sales']].sum().reset_index()
df_gross_returns3 = pd.pivot_table(df_gross_returns3a,
values='gross_sales',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
df_gross_returns3['tag_flag'] = 'gross'
# Payment
gross_sale['payment'] = np.where(gross_sale['payment_method'].isin(['cash', 'card']),
gross_sale['payment_method'], 'upi')
df_gross_returns4a = gross_sale.groupby(['store_id', 'store_name',
'payment', 'order_source','fofo_distributor'])[['gross_sales']].sum().reset_index()
df_gross_returns4 = pd.pivot_table(df_gross_returns4a,
values='gross_sales',
index=['payment', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
df_gross_returns4['tag_flag'] = 'payment'
gmv_gross_payment = pd.concat([df_gross_returns2,
df_gross_returns3,
df_gross_returns4], sort=True)
cols_to_move = ['tag_flag', 'type1', 'payment', 'order_source','fofo_distributor']
gmv_gross_payment = gmv_gross_payment[cols_to_move +
[col for col in gmv_gross_payment.columns
if col not in cols_to_move]]
return gmv_gross_payment
def netsale_tax_cogs(self,Gross,Returns,stores,fofo_tag = 'no'):
gross = Gross.copy(deep = True)
returns = Returns.copy(deep = True)
if fofo_tag=='no':
gross['gross_sale'] = gross['quantity'] * gross['rate']
gross['gross_COGS'] = gross['quantity'] * gross['wc_ptr']
gross['gross_sale_taxable'] = np.vectorize(self.taxable_value)(gross['quantity'], gross['rate'],
gross['cgst_rate'], gross['sgst_rate'],
gross['igst_rate'])
gross['gross_COGS_taxable'] = np.vectorize(self.taxable_value)(gross['quantity'], gross['wc_ptr'],
gross['cgst_rate'],
gross['sgst_rate'], gross['igst_rate'])
gross_sale_summary = gross.groupby(['store_id', 'type1', 'order_source'],
as_index=False).agg({
'quantity': ['sum'],
'gross_sale': ['sum'],
'gross_COGS': ['sum'],
'gross_sale_taxable':['sum'],
'gross_COGS_taxable':['sum']
}).reset_index(drop=True)
gross_sale_summary.columns = ["_".join(x) for x in gross_sale_summary.columns.ravel()]
returns['gross_returns'] = returns['rate'] * returns['returned_quantity']
returns['returns_COGS'] = returns['wc_ptr'] * returns['returned_quantity']
returns['gross_returns_taxable'] = np.vectorize(self.taxable_value)(returns['returned_quantity'], returns['rate'],
returns['cgst_rate'], returns['sgst_rate'],
returns['igst_rate'])
returns['returns_COGS_taxable'] = np.vectorize(self.taxable_value)(returns['returned_quantity'], returns['wc_ptr'],
returns['cgst_rate'],
returns['sgst_rate'], returns['igst_rate'])
returns_summary = returns.groupby(['store_id', 'type1', 'order_source'],
as_index=False).agg({
'returned_quantity':['sum'],
'gross_returns':['sum'],
'returns_COGS':['sum'],
'gross_returns_taxable': ['sum'],
'returns_COGS_taxable': ['sum']}).reset_index(drop=True)
returns_summary.columns = ["_".join(x) for x in returns_summary.columns.ravel()]
gross_returns = pd.merge(left=gross_sale_summary, right=returns_summary,
how='outer', on=['store_id_', 'type1_', 'order_source_'])
gross_returns.fillna(0, inplace=True)
gross_returns['net_sale'] = gross_returns['gross_sale_sum'] - gross_returns['gross_returns_sum']
gross_returns['net_sale_taxable'] = gross_returns['gross_sale_taxable_sum'] - gross_returns['gross_returns_taxable_sum']
gross_returns['net_COGS'] = gross_returns['gross_COGS_sum'] - gross_returns['returns_COGS_sum']
gross_returns['net_COGS_taxable'] = gross_returns['gross_COGS_taxable_sum'] - gross_returns[
'returns_COGS_taxable_sum']
gross_returns1 = pd.merge(left=gross_returns, right=stores,
how='left',
left_on=['store_id_'],
right_on=['store_id'])
gross_returns1.rename(columns={ 'type1_': 'type1',
'order_source_':'order_source',
'quantity_sum':'quantity',
'gross_sale_sum': 'gross_sales',
'gross_COGS_sum': 'gross_COGS',
'gross_sale_taxable_sum': 'gross_sale_taxable',
'gross_COGS_taxable_sum': 'gross_COGS_taxable',
'returned_quantity_sum': 'returned_quantity',
'gross_returns_sum': 'gross_returns',
'returns_COGS_sum': 'returns_COGS',
'gross_returns_taxable_sum': 'gross_returns_taxable',
'returns_COGS_taxable_sum': 'returns_COGS_taxable'}, inplace=True)
gross_returns1[['net_sale','net_sale_taxable','net_COGS_taxable']] = gross_returns1[['net_sale','net_sale_taxable','net_COGS_taxable']].astype(float)
gross_returns2 = pd.pivot_table(gross_returns1,
values='net_sale',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
gross_returns2['tag_flag'] = 'net_sale'
gross_returns3 = pd.pivot_table(gross_returns1,
values='net_sale_taxable',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
gross_returns3['tag_flag'] = 'net_sale_taxable'
gross_returns4 = pd.pivot_table(gross_returns1,
values='net_COGS_taxable',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
gross_returns4['tag_flag'] = 'net_COGS_taxable'
net_sale_taxes_cogs = pd.concat([gross_returns2,
gross_returns3,
gross_returns4])
cols_to_move = ['tag_flag', 'type1', 'order_source']
net_sale_taxes_cogs = net_sale_taxes_cogs[cols_to_move +
[col for col in net_sale_taxes_cogs.columns
if col not in cols_to_move]]
return net_sale_taxes_cogs
if fofo_tag == 'yes':
gross = gross[gross['franchisee_id'] != 1]
returns = returns[returns['franchisee_id'] != 1]
gross['gross_sale'] = gross['quantity'] * gross['rate']
gross['gross_COGS'] = gross['quantity'] * gross['wc_ptr']
gross['gross_sale_taxable'] = np.vectorize(self.taxable_value)(gross['quantity'], gross['rate'],
gross['cgst_rate'], gross['sgst_rate'],
gross['igst_rate'])
gross['gross_COGS_taxable'] = np.vectorize(self.taxable_value)(gross['quantity'], gross['wc_ptr'],
gross['cgst_rate'],
gross['sgst_rate'], gross['igst_rate'])
gross_sale_summary = gross.groupby(['store_id', 'type1', 'order_source','fofo_distributor'],
as_index=False).agg({
'quantity': ['sum'],
'gross_sale': ['sum'],
'gross_COGS': ['sum'],
'gross_sale_taxable': ['sum'],
'gross_COGS_taxable': ['sum']
}).reset_index(drop=True)
gross_sale_summary.columns = ["_".join(x) for x in gross_sale_summary.columns.ravel()]
returns['gross_returns'] = returns['rate'] * returns['returned_quantity']
returns['returns_COGS'] = returns['wc_ptr'] * returns['returned_quantity']
returns['gross_returns_taxable'] = np.vectorize(self.taxable_value)(returns['returned_quantity'],
returns['rate'],
returns['cgst_rate'],
returns['sgst_rate'],
returns['igst_rate'])
returns['returns_COGS_taxable'] = np.vectorize(self.taxable_value)(returns['returned_quantity'],
returns['wc_ptr'],
returns['cgst_rate'],
returns['sgst_rate'],
returns['igst_rate'])
returns_summary = returns.groupby(['store_id', 'type1', 'order_source','fofo_distributor'],
as_index=False).agg({
'returned_quantity': ['sum'],
'gross_returns': ['sum'],
'returns_COGS': ['sum'],
'gross_returns_taxable': ['sum'],
'returns_COGS_taxable': ['sum']}).reset_index(drop=True)
returns_summary.columns = ["_".join(x) for x in returns_summary.columns.ravel()]
gross_returns = pd.merge(left=gross_sale_summary, right=returns_summary,
how='outer', on=['store_id_', 'type1_', 'order_source_','fofo_distributor_'])
gross_returns.fillna(0, inplace=True)
gross_returns['net_sale'] = gross_returns['gross_sale_sum'] - gross_returns['gross_returns_sum']
gross_returns['net_sale_taxable'] = gross_returns['gross_sale_taxable_sum'] - gross_returns[
'gross_returns_taxable_sum']
gross_returns['net_COGS'] = gross_returns['gross_COGS_sum'] - gross_returns['returns_COGS_sum']
gross_returns['net_COGS_taxable'] = gross_returns['gross_COGS_taxable_sum'] - gross_returns[
'returns_COGS_taxable_sum']
gross_returns1 = pd.merge(left=gross_returns, right=stores,
how='left',
left_on=['store_id_'],
right_on=['store_id'])
gross_returns1.rename(columns={'type1_': 'type1',
'order_source_': 'order_source',
'fofo_distributor_':'fofo_distributor',
'quantity_sum': 'quantity',
'gross_sale_sum': 'gross_sales',
'gross_COGS_sum': 'gross_COGS',
'gross_sale_taxable_sum': 'gross_sale_taxable',
'gross_COGS_taxable_sum': 'gross_COGS_taxable',
'returned_quantity_sum': 'returned_quantity',
'gross_returns_sum': 'gross_returns',
'returns_COGS_sum': 'returns_COGS',
'gross_returns_taxable_sum': 'gross_returns_taxable',
'returns_COGS_taxable_sum': 'returns_COGS_taxable'}, inplace=True)
gross_returns1[['net_sale', 'net_sale_taxable', 'net_COGS_taxable']] = gross_returns1[
['net_sale', 'net_sale_taxable', 'net_COGS_taxable']].astype(float)
gross_returns2 = pd.pivot_table(gross_returns1,
values='net_sale',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
gross_returns2['tag_flag'] = 'net_sale'
gross_returns3 = pd.pivot_table(gross_returns1,
values='net_sale_taxable',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
gross_returns3['tag_flag'] = 'net_sale_taxable'
gross_returns4 = pd.pivot_table(gross_returns1,
values='net_COGS_taxable',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
gross_returns4['tag_flag'] = 'net_COGS_taxable'
net_sale_taxes_cogs = pd.concat([gross_returns2,
gross_returns3,
gross_returns4])
cols_to_move = ['tag_flag', 'type1', 'order_source','fofo_distributor']
net_sale_taxes_cogs = net_sale_taxes_cogs[cols_to_move +
[col for col in net_sale_taxes_cogs.columns
if col not in cols_to_move]]
return net_sale_taxes_cogs
def inventory_ageing(self, Inventory, stores, mis_tag = 'breakup',fofo_tag = 'no'):
inventory_data = Inventory.copy(deep = True)
inventory_data['value'] = inventory_data['quantity'] * inventory_data['final_ptr']
inventory_data['days'] = (pd.to_datetime(self.analysis_end_time) - inventory_data['created_at']).dt.days
conditions = [
(inventory_data['days'] >= 0) & (inventory_data['days'] <= 30),
(inventory_data['days'] >= 31) & (inventory_data['days'] <= 60),
(inventory_data['days'] >= 61) & (inventory_data['days'] <= 90),
(inventory_data['days'] >= 91)]
choices = ['0_30', '31_60', '61_90', '90+']
inventory_data['age_bracket'] = np.select(conditions, choices)
inventory_data['vat'] = inventory_data['vat'].fillna(0)
inventory_data['vat'] = inventory_data['vat'].astype(float)
inventory_data['taxable'] = np.vectorize(self.taxable_value_vat_based)(inventory_data['quantity'],
inventory_data['final_ptr'],
inventory_data['vat'])
if fofo_tag == 'no':
df_ageing = inventory_data.groupby(['store_id', 'type1', 'category', 'age_bracket'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'value': ['sum'],
'taxable': ['sum']}).reset_index(drop=True)
df_ageing.columns = ["_".join(x) for x in df_ageing.columns.ravel()]
df_ageing = pd.merge(left=df_ageing, right=stores,
how='left', left_on=['store_id_'], right_on=['store_id'])
df_ageing_grp = df_ageing.groupby(['store_id_', 'store_name',
'type1_', 'age_bracket_'])[['taxable_sum']].sum().reset_index()
# generic
df_ageing_generic = df_ageing_grp[df_ageing_grp['type1_'] == 'generic']
df_ageing_generic1 = pd.pivot_table(df_ageing_generic,
values='taxable_sum',
index=['type1_', 'age_bracket_'],
columns=['store_name']).reset_index()
# ethical
df_ageing_ethical = df_ageing_grp[df_ageing_grp['type1_'] == 'ethical']
df_ageing_ethical1 = pd.pivot_table(df_ageing_ethical,
values='taxable_sum',
index=['type1_', 'age_bracket_'],
columns=['store_name']).reset_index()
# others
df_ageing_grp_others = df_ageing.groupby(['store_id_', 'store_name',
'type1_'])[['taxable_sum']].sum().reset_index()
df_ageing_others = df_ageing_grp_others[df_ageing_grp_others['type1_'] == 'others']
df_ageing_others1 = pd.pivot_table(df_ageing_others,
values='taxable_sum',
index=['type1_'],
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
# GOODAID
df_ageing_goodaid = df_ageing_grp[df_ageing_grp['type1_'] == 'GOODAID']
df_ageing_goodaid1 = pd.pivot_table(df_ageing_goodaid,
values='taxable_sum',
index=['type1_', 'age_bracket_'],
columns=['store_name']).reset_index()
inventory_ageing = pd.concat([df_ageing_generic1,
df_ageing_ethical1,
df_ageing_others1,
df_ageing_goodaid1],sort=True)
elif mis_tag=='unified':
inventory_ageing = pd.concat([df_ageing_generic1,
df_ageing_ethical1,
df_ageing_others1],sort=True)
else:
self.logger.info('please pass correct mis_tag')
return None
inventory_ageing['tag_flag'] = 'inventory_ageing'
cols_to_move = ['tag_flag', 'type1_', 'age_bracket_']
inventory_ageing = inventory_ageing[cols_to_move +
[col for col in inventory_ageing.columns
if col not in cols_to_move]]
inventory_ageing.rename(columns={'type1_': 'type1'}, inplace=True)
return inventory_ageing
elif fofo_tag == 'yes':
inventory_data = inventory_data[inventory_data['franchisee_id'] != 1]
df_ageing = inventory_data.groupby(['store_id', 'type1', 'category','fofo_distributor', 'age_bracket'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'value': ['sum'],
'taxable': ['sum']}).reset_index(drop=True)
df_ageing.columns = ["_".join(x) for x in df_ageing.columns.ravel()]
df_ageing = pd.merge(left=df_ageing, right=stores,
how='left', left_on=['store_id_'], right_on=['store_id'])
df_ageing_grp = df_ageing.groupby(['store_id_', 'store_name',
'type1_','fofo_distributor_' ,'age_bracket_'])[['taxable_sum']].sum().reset_index()
# generic
df_ageing_generic = df_ageing_grp[df_ageing_grp['type1_'] == 'generic']
df_ageing_generic1 = pd.pivot_table(df_ageing_generic,
values='taxable_sum',
index=['type1_', 'age_bracket_','fofo_distributor_'],
columns=['store_name']).reset_index()
# ethical
df_ageing_ethical = df_ageing_grp[df_ageing_grp['type1_'] == 'ethical']
df_ageing_ethical1 = pd.pivot_table(df_ageing_ethical,
values='taxable_sum',
index=['type1_', 'age_bracket_','fofo_distributor_'],
columns=['store_name']).reset_index()
# others
df_ageing_grp_others = df_ageing.groupby(['store_id_', 'store_name',
'type1_','fofo_distributor_'])[['taxable_sum']].sum().reset_index()
df_ageing_others = df_ageing_grp_others[df_ageing_grp_others['type1_'] == 'others']
df_ageing_others1 = pd.pivot_table(df_ageing_others,
values='taxable_sum',
index=['type1_','fofo_distributor_'],
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
# GOODAID
df_ageing_goodaid = df_ageing_grp[df_ageing_grp['type1_'] == 'GOODAID']
df_ageing_goodaid1 = pd.pivot_table(df_ageing_goodaid,
values='taxable_sum',
index=['type1_','fofo_distributor_' ,'age_bracket_'],
columns=['store_name']).reset_index()
inventory_ageing = pd.concat([df_ageing_generic1,
df_ageing_ethical1,
df_ageing_others1,
df_ageing_goodaid1], sort=True)
elif mis_tag == 'unified':
inventory_ageing = pd.concat([df_ageing_generic1,
df_ageing_ethical1,
df_ageing_others1], sort=True)
else:
self.logger.info('please pass correct mis_tag')
return None
inventory_ageing['tag_flag'] = 'inventory_ageing'
cols_to_move = ['tag_flag', 'type1_', 'fofo_distributor_','age_bracket_']
inventory_ageing = inventory_ageing[cols_to_move +
[col for col in inventory_ageing.columns
if col not in cols_to_move]]
inventory_ageing.rename(columns={'type1_': 'type1',
'fofo_distributor_':'fofo_distributor'}, inplace=True)
return inventory_ageing
def near_expiry(self,Inventory,stores,mis_tag = 'breakup',fofo_tag = 'no'):
inventory_data = Inventory.copy(deep=True)
inventory_data['value'] = inventory_data['quantity'] * inventory_data['final_ptr']
inventory_data['expiry_date'] = pd.to_datetime(inventory_data['expiry'], format='%Y-%m-%d %H:%M:%S',
errors='coerce')
inventory_data['days_to_expiry'] = (pd.to_datetime(self.analysis_end_time) - inventory_data['expiry_date']).dt.days
inventory_data1 = inventory_data[
(inventory_data['days_to_expiry'] < 0) & (inventory_data['days_to_expiry'] > -90)]
inventory_data1['taxable'] = np.vectorize(self.taxable_value_vat_based)(inventory_data1['quantity'], inventory_data1['final_ptr'],inventory_data1['vat'])
if fofo_tag == 'no':
near_expiry = inventory_data1.groupby(['store_id', 'type1'],
as_index=False).agg({
'value': ['sum'],
'taxable': ['sum']}).reset_index(drop=True)
near_expiry.columns = ["_".join(x) for x in near_expiry.columns.ravel()]
near_expiry = pd.merge(left=near_expiry, right=stores,
how='left', left_on=['store_id_'],
right_on=['store_id'])
# generic
near_expiry_generic = near_expiry[near_expiry['type1_'] == 'generic']
near_expiry_generic1 = pd.pivot_table(near_expiry_generic,
values='taxable_sum',
index=['type1_'],
columns=['store_name']).reset_index()
# ethical
near_expiry_ethical = near_expiry[near_expiry['type1_'] == 'ethical']
near_expiry_ethical1 = pd.pivot_table(near_expiry_ethical,
values='taxable_sum',
index=['type1_'],
columns=['store_name']).reset_index()
# others
near_expiry_others = near_expiry[near_expiry['type1_'] == 'others']
near_expiry_others1 = pd.pivot_table(near_expiry_others,
values='taxable_sum',
index=['type1_'],
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
near_expiry_goodaid = near_expiry[near_expiry['type1_'] == 'GOODAID']
# If there are no items in near expiry for goodaid
if len(near_expiry_goodaid)!= 0:
near_expiry_goodaid1 = pd.pivot_table(near_expiry_goodaid,
values='taxable_sum',
index=['type1_'],
columns=['store_name']).reset_index()
else:
near_expiry_goodaid1 = near_expiry_ethical1.copy(deep=True)
near_expiry_goodaid1.loc[:] = np.nan
near_expiry_goodaid1['type1_'][0] = 'GOODAID'
near_expiry = pd.concat([near_expiry_generic1,
near_expiry_ethical1,
near_expiry_others1,
near_expiry_goodaid1],sort=True)
elif mis_tag=='unified':
near_expiry = pd.concat([near_expiry_generic1,
near_expiry_ethical1,
near_expiry_others1],sort=True)
else:
self.logger.info('please pass correct mis_tag')
return None
near_expiry['tag_flag'] = 'near_expiry'
cols_to_move = ['tag_flag', 'type1_']
near_expiry = near_expiry[cols_to_move +
[col for col in near_expiry.columns
if col not in cols_to_move]]
near_expiry.rename(columns={'type1_': 'type1'}, inplace=True)
return near_expiry
elif fofo_tag == 'yes':
inventory_data1 = inventory_data1[inventory_data1['franchisee_id']!=1]
near_expiry = inventory_data1.groupby(['store_id', 'type1','fofo_distributor'],
as_index=False).agg({
'value': ['sum'],
'taxable': ['sum']}).reset_index(drop=True)
near_expiry.columns = ["_".join(x) for x in near_expiry.columns.ravel()]
near_expiry = pd.merge(left=near_expiry, right=stores,
how='left', left_on=['store_id_'],
right_on=['store_id'])
# generic
near_expiry_generic = near_expiry[near_expiry['type1_'] == 'generic']
near_expiry_generic1 = pd.pivot_table(near_expiry_generic,
values='taxable_sum',
index=['type1_','fofo_distributor_'],
columns=['store_name']).reset_index()
# ethical
near_expiry_ethical = near_expiry[near_expiry['type1_'] == 'ethical']
near_expiry_ethical1 = pd.pivot_table(near_expiry_ethical,
values='taxable_sum',
index=['type1_','fofo_distributor_'],
columns=['store_name']).reset_index()
# others
near_expiry_others = near_expiry[near_expiry['type1_'] == 'others']
near_expiry_others1 = pd.pivot_table(near_expiry_others,
values='taxable_sum',
index=['type1_','fofo_distributor_'],
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
near_expiry_goodaid = near_expiry[near_expiry['type1_'] == 'GOODAID']
# If there are no items in near expiry for goodaid
if len(near_expiry_goodaid) != 0:
near_expiry_goodaid1 = pd.pivot_table(near_expiry_goodaid,
values='taxable_sum',
index=['type1_','fofo_distributor_'],
columns=['store_name']).reset_index()
else:
near_expiry_goodaid1 = near_expiry_ethical1.copy(deep=True)
near_expiry_goodaid1.loc[:] = np.nan
near_expiry_goodaid1['type1_'][0] = 'GOODAID'
near_expiry = pd.concat([near_expiry_generic1,
near_expiry_ethical1,
near_expiry_others1,
near_expiry_goodaid1], sort=True)
elif mis_tag == 'unified':
near_expiry = pd.concat([near_expiry_generic1,
near_expiry_ethical1,
near_expiry_others1], sort=True)
else:
self.logger.info('please pass correct mis_tag')
return None
near_expiry['tag_flag'] = 'near_expiry'
cols_to_move = ['tag_flag', 'type1_','fofo_distributor_']
near_expiry = near_expiry[cols_to_move +
[col for col in near_expiry.columns
if col not in cols_to_move]]
near_expiry.rename(columns={'type1_': 'type1',
'fofo_distributor_':'fofo_distributor'}, inplace=True)
return near_expiry
def sales_by_volume(self,Sales,stores):
sales = Sales.copy(deep = True)
generic_volume = sales.groupby(['store_id',
'type1', 'order_source'])[['quantity']].sum().reset_index().rename(
columns={'quantity': "generic_volume"})
generic_volume = pd.merge(left=generic_volume, right=stores,
how='left', left_on=['store_id'],
right_on=['store_id'])
generic_volume = pd.pivot_table(generic_volume,
values='generic_volume',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
generic_volume['tag_flag'] = 'sales_by_volume'
return generic_volume
def gross_rev_chronic_acute(self,Sales,Returns,stores):
sales = Sales.copy(deep = True)
returns = Returns.copy(deep = True)
sales['COGS'] = sales['quantity'] * sales['final_ptr']
df_a1a = sales.groupby(['store_id', 'type1', 'category', 'order_source'],
as_index=False).agg({
'value': ['sum'],
'quantity': ['sum'],
'COGS': ['sum'],
'bill_id': pd.Series.nunique,
'drug_id': pd.Series.nunique}).reset_index(drop=True)
df_a1a.columns = ["_".join(x) for x in df_a1a.columns.ravel()]
returns['returned_value'] = returns['returned_quantity'] * returns['rate']
returns['returned_COGS'] = returns['returned_quantity'] * returns['final_ptr']
df_b2 = returns.groupby(['store_id', 'type1', 'category', 'order_source'],
as_index=False).agg({
'returned_value': ['sum'],
'returned_quantity': ['sum'],
'returned_COGS': ['sum'],
'returned_bill_id': pd.Series.nunique,
'returned_drug_id': pd.Series.nunique}).reset_index(drop=True)
df_b2.columns = ["_".join(x) for x in df_b2.columns.ravel()]
df_a_b = pd.merge(left=df_a1a, right=df_b2,
how='outer', on=['store_id_', 'type1_',
'category_', 'order_source_'])
df_a_b.fillna(0, inplace=True)
df_a_b['net_sale'] = df_a_b['value_sum'] - df_a_b['returned_value_sum']
df_a_b['net_COGS'] = df_a_b['COGS_sum'] - df_a_b['returned_COGS_sum']
df_a_b['net_quantity'] = df_a_b['quantity_sum'] - df_a_b['returned_quantity_sum']
df_a4 = df_a_b.groupby(['store_id_', 'category_',
'type1_', 'order_source_'],
as_index=False).agg({
'net_sale': ['sum'],
'net_quantity': ['sum'],
'net_COGS': ['sum'],
'bill_id_nunique': ['sum'],
'drug_id_nunique': ['sum']}).reset_index(drop=True)
df_a4.columns = ["_".join(x) for x in df_a4.columns.ravel()]
df_a4 = pd.merge(left=df_a4, right=stores,
how='left', left_on=['store_id__'],
right_on=['store_id'])
df_a5 = df_a4[df_a4['category__'] == 'chronic']
df_a5_sale = df_a5.groupby(['store_id__', 'store_name',
'category__', 'type1__', 'order_source__'])[['net_sale_sum']].sum().reset_index()
df_a5_qty = df_a5.groupby(['store_id__', 'store_name',
'category__', 'type1__', 'order_source__'])[['net_quantity_sum']].sum().reset_index()
df_a5_sale['net_sale_sum'] = df_a5_sale['net_sale_sum'].astype(float)
gross_rev_chronic_sale = pd.pivot_table(df_a5_sale,
values='net_sale_sum',
index=['category__', 'type1__', 'order_source__'],
columns=['store_name']).reset_index()
df_a5_qty['net_quantity_sum'] = df_a5_qty['net_quantity_sum'].astype(float)
gross_rev_chronic_vol = pd.pivot_table(df_a5_qty,
values='net_quantity_sum',
index=['category__', 'type1__', 'order_source__'],
columns=['store_name']).reset_index()
df_a6 = df_a4[df_a4['category__'] == 'acute']
df_a6_sale = df_a6.groupby(['store_id__', 'store_name',
'category__', 'type1__', 'order_source__'])[['net_sale_sum']].sum().reset_index()
df_a6_qty = df_a6.groupby(['store_id__', 'store_name',
'category__', 'type1__', 'order_source__'])[['net_quantity_sum']].sum().reset_index()
df_a6_sale['net_sale_sum'] = df_a6_sale['net_sale_sum'].astype(float)
gross_rev_acute_sale = pd.pivot_table(df_a6_sale,
values='net_sale_sum',
index=['category__', 'type1__', 'order_source__'],
columns=['store_name']).reset_index()
df_a6_qty['net_quantity_sum'] = df_a6_qty['net_quantity_sum'].astype(float)
gross_rev_acute_vol = pd.pivot_table(df_a6_qty,
values='net_quantity_sum',
index=['category__', 'type1__', 'order_source__'],
columns=['store_name']).reset_index()
gross_rev_chronic_sale_vol = pd.concat([gross_rev_chronic_sale,
gross_rev_chronic_vol])
gross_rev_chronic_sale_vol['tag_flag'] = 'gross_rev_chronic_sale_vol'
gross_rev_chronic_sale_vol.rename(columns={'type1__': 'type1'}, inplace=True)
gross_rev_chronic_sale_vol.rename(columns={'category__': 'category'}, inplace=True)
gross_rev_chronic_sale_vol.rename(columns={'order_source__': 'order_source'}, inplace=True)
gross_rev_acute_sale_vol = pd.concat([gross_rev_acute_sale,
gross_rev_acute_vol])
gross_rev_acute_sale_vol['tag_flag'] = 'gross_rev_acute_sale_vol'
gross_rev_acute_sale_vol.rename(columns={'type1__': 'type1'}, inplace=True)
gross_rev_acute_sale_vol.rename(columns={'category__': 'category'}, inplace=True)
gross_rev_acute_sale_vol.rename(columns={'order_source__': 'order_source'}, inplace=True)
return gross_rev_chronic_sale_vol, gross_rev_acute_sale_vol
def cummulative_cons(self, Cumulative_consumers_data, mis_tag):
cumulative_consumers_data = Cumulative_consumers_data.copy(deep=True)
all_time_cons1 = pd.pivot_table(cumulative_consumers_data,
values='total_cons',
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
cumulative_consumers_data.rename(columns={'generic_without_gaid_cons': 'total_generic_cons'}, inplace=True)
all_time_generic_cons1 = pd.pivot_table(cumulative_consumers_data,
values='total_generic_cons',
columns=['store_name']).reset_index()
all_time_gaid_cons1 = pd.pivot_table(cumulative_consumers_data,
values='total_gaid_cons',
columns=['store_name']).reset_index()
else:
cumulative_consumers_data.rename(columns={'generic_cons': 'total_generic_cons'}, inplace=True)
all_time_generic_cons1 = pd.pivot_table(cumulative_consumers_data,
values='total_generic_cons',
columns=['store_name']).reset_index()
all_time_chronic_cons1 = pd.pivot_table(cumulative_consumers_data,
values='total_chronic_cons',
columns=['store_name']).reset_index()
cumulative_consumers_data['total_acute_cons'] = cumulative_consumers_data['total_cons'] - \
cumulative_consumers_data['total_chronic_cons']
all_time_acute_cons1 = pd.pivot_table(cumulative_consumers_data,
values='total_acute_cons',
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
cummulative_cons = pd.concat([all_time_cons1, all_time_generic_cons1,
all_time_gaid_cons1,
all_time_chronic_cons1, all_time_acute_cons1], sort=True)
else:
cummulative_cons = pd.concat([all_time_cons1, all_time_generic_cons1,
all_time_chronic_cons1, all_time_acute_cons1], sort=True)
cummulative_cons.rename(columns={'index': 'tag_flag'}, inplace=True)
return cummulative_cons
def cummulative_cons_fofo(self, Workcell_cumulative_consumers_fofo_data,Others_cumulative_consumers_fofo_data, mis_tag):
workcell_cumulative_consumers_fofo_data = Workcell_cumulative_consumers_fofo_data.copy(deep=True)
others_cumulative_consumers_data = Others_cumulative_consumers_fofo_data.copy(deep=True)
workcell_all_time_cons1 = pd.pivot_table(workcell_cumulative_consumers_fofo_data,
values='total_cons',
columns=['store_name']).reset_index()
others_all_time_cons1 = pd.pivot_table(others_cumulative_consumers_data,
values='total_cons',
columns=['store_name']).reset_index()
if mis_tag == 'breakup':
workcell_cumulative_consumers_fofo_data.rename(columns={'generic_without_gaid_cons': 'total_generic_cons'}, inplace=True)
workcell_all_time_generic_cons1 = pd.pivot_table(workcell_cumulative_consumers_fofo_data,
values='total_generic_cons',
columns=['store_name']).reset_index()
workcell_all_time_gaid_cons1 = pd.pivot_table(workcell_cumulative_consumers_fofo_data,
values='total_gaid_cons',
columns=['store_name']).reset_index()
others_cumulative_consumers_data.rename(columns={'generic_without_gaid_cons': 'total_generic_cons'}, inplace=True)
others_all_time_generic_cons1 = pd.pivot_table(others_cumulative_consumers_data,
values='total_generic_cons',
columns=['store_name']).reset_index()
others_all_time_gaid_cons1 = pd.pivot_table(others_cumulative_consumers_data,
values='total_gaid_cons',
columns=['store_name']).reset_index()
else:
workcell_cumulative_consumers_fofo_data.rename(columns={'generic_cons': 'total_generic_cons'}, inplace=True)
workcell_all_time_generic_cons1 = pd.pivot_table(workcell_cumulative_consumers_fofo_data,
values='total_generic_cons',
columns=['store_name']).reset_index()
others_cumulative_consumers_data.rename(columns={'generic_cons': 'total_generic_cons'}, inplace=True)
others_all_time_generic_cons1 = pd.pivot_table(others_cumulative_consumers_data,
values='total_generic_cons',
columns=['store_name']).reset_index()
workcell_all_time_chronic_cons1 = pd.pivot_table(workcell_cumulative_consumers_fofo_data,
values='total_chronic_cons',
columns=['store_name']).reset_index()
others_all_time_chronic_cons1 = pd.pivot_table(others_cumulative_consumers_data,
values='total_chronic_cons',
columns=['store_name']).reset_index()
workcell_cumulative_consumers_fofo_data['total_acute_cons'] = workcell_cumulative_consumers_fofo_data['total_cons'] - \
workcell_cumulative_consumers_fofo_data['total_chronic_cons']
others_cumulative_consumers_data['total_acute_cons'] = others_cumulative_consumers_data['total_cons'] - \
others_cumulative_consumers_data['total_chronic_cons']
workcell_all_time_acute_cons1 = pd.pivot_table(workcell_cumulative_consumers_fofo_data,
values='total_acute_cons',
columns=['store_name']).reset_index()
others_all_time_acute_cons1 = pd.pivot_table(others_cumulative_consumers_data,
values='total_acute_cons',
columns=['store_name']).reset_index()
workcell_all_time_cons1['fofo_distributor'] = 'workcell'
workcell_all_time_generic_cons1['fofo_distributor'] = 'workcell'
workcell_all_time_chronic_cons1['fofo_distributor'] = 'workcell'
workcell_all_time_acute_cons1['fofo_distributor'] = 'workcell'
others_all_time_cons1['fofo_distributor'] = 'other'
others_all_time_generic_cons1['fofo_distributor'] = 'other'
others_all_time_chronic_cons1['fofo_distributor'] = 'other'
others_all_time_acute_cons1['fofo_distributor'] = 'other'
if mis_tag == 'breakup':
workcell_all_time_gaid_cons1['fofo_distributor'] = 'workcell'
others_all_time_gaid_cons1['fofo_distributor'] = 'other'
cummulative_cons_fofo = pd.concat([workcell_all_time_cons1, others_all_time_cons1, workcell_all_time_generic_cons1, others_all_time_generic_cons1,
workcell_all_time_gaid_cons1, others_all_time_gaid_cons1,
workcell_all_time_chronic_cons1, others_all_time_chronic_cons1, workcell_all_time_acute_cons1, others_all_time_acute_cons1], sort=True)
else:
cummulative_cons_fofo = pd.concat([workcell_all_time_cons1, others_all_time_cons1, workcell_all_time_generic_cons1, others_all_time_generic_cons1,
workcell_all_time_chronic_cons1, others_all_time_chronic_cons1, workcell_all_time_acute_cons1, others_all_time_acute_cons1], sort=True)
cummulative_cons_fofo.rename(columns={'index': 'tag_flag'}, inplace=True)
return cummulative_cons_fofo
def total_cons_mis_month(self,Sales, Stores,fofo_tag = 'no'):
sales = Sales.copy(deep = True)
stores = Stores.copy(deep = True)
sales = pd.merge(left=sales, right=stores,
how='left', on=['store_id'])
if fofo_tag == 'yes':
sales = sales[sales['franchisee_id'] != 1]
if fofo_tag == 'no':
total_cons = sales.groupby(['store_id',
'store_name', 'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "total_consumers_MIS_month"})
total_cons['tag_flag'] = 'total_cons_mis_month'
total_cons_mis_month = pd.pivot_table(total_cons,
values='total_consumers_MIS_month',
index=['tag_flag', 'order_source'],
columns=['store_name']).reset_index()
elif fofo_tag =='yes':
total_cons = sales.groupby(['store_id',
'store_name', 'fofo_distributor' ,'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "total_consumers_MIS_month"})
total_cons['tag_flag'] = 'total_cons_mis_month'
total_cons_mis_month = pd.pivot_table(total_cons,
values='total_consumers_MIS_month',
index=['tag_flag','fofo_distributor' , 'order_source'],
columns=['store_name']).reset_index()
# total_cons_mis_month.rename(columns={'index': 'tag_flag'}, inplace=True)
return total_cons_mis_month
def category_wise_customer_type_count(self,Sales,Store):
sales = Sales.copy(deep = True)
stores = Store.copy(deep = True)
sales = pd.merge(left=sales, right=stores,
how='left', on=['store_id'])
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
sales_chronic = sales[sales['category'] == 'chronic']
sales_chronic.loc[:,'tag_flag'] = "customer_type_chronic"
df49 = sales_chronic.groupby(['store_id', 'store_name',
'category', 'order_source', 'tag_flag'])[
['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "customer_type_chronic"})
customer_type_chronic = pd.pivot_table(df49,
values='customer_type_chronic',
index=['tag_flag', 'category', 'order_source'],
columns=['store_name']).reset_index()
sales_acute = sales[sales['category'] == 'acute']
sales_acute.loc[:,'tag_flag'] = "customer_type_acute"
df50 = sales_acute.groupby(['store_id', 'store_name',
'category', 'order_source', 'tag_flag'])[
['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "customer_type_acute"})
customer_type_acute = pd.pivot_table(df50,
values='customer_type_acute',
index=['tag_flag', 'category', 'order_source'],
columns=['store_name']).reset_index()
category_wise_customer_type = pd.concat([customer_type_chronic, customer_type_acute])
# customer_type.rename(columns={'index': 'tag_flag'}, inplace=True)
return category_wise_customer_type
def new_customers(self,Sales,All_cons_initial_bill_date,Stores):
sales = Sales.copy(deep = True)
all_cons_initial_bill_date = All_cons_initial_bill_date.copy(deep = True)
stores = Stores.copy(deep= True)
mis_month_cons_min_bill_date = sales.groupby(['store_id',
'order_source', 'patient_id'])[['created_at']].min().reset_index()
new_cons_1 = pd.merge(left=all_cons_initial_bill_date, right=mis_month_cons_min_bill_date,
how='inner', on=['store_id', 'patient_id', 'created_at'])
new_cons_1['flag'] = "new_cons"
new_cons = new_cons_1.groupby(['store_id', 'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "new_consumers"})
new_cons = pd.merge(left=new_cons, right=stores,
how='left',
on=['store_id'])
new_cons['tag_flag'] = 'new_consumers'
new_cons_total = pd.pivot_table(new_cons,
values='new_consumers',
index=['tag_flag', 'order_source'],
columns=['store_name']).reset_index()
# new chronic consumers
df_fe = pd.merge(left=new_cons_1[['store_id', 'order_source',
'patient_id', 'flag']],
right=sales,
how='left',
on=['store_id', 'order_source', 'patient_id'])
new_cons_chronic = df_fe.groupby(['store_id',
'category', 'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "new_chronic_consumers"})
new_cons_chronic1 = new_cons_chronic[new_cons_chronic['category'].isin(['chronic'])]
new_cons_chronic1 = pd.merge(left=new_cons_chronic1, right=stores,
how='left',
on=['store_id'])
new_cons_chronic1['tag_flag'] = 'new_chronic_consumers'
new_cons_chronic2 = pd.pivot_table(new_cons_chronic1,
values='new_chronic_consumers',
index=['tag_flag', 'order_source'],
columns=['store_name']).reset_index()
new_customers = pd.concat([new_cons_total, new_cons_chronic2],sort=True)
return new_customers
def tot_repeat_consumers(self, Sales,All_cons_initial_bill_date,Stores ):
sales = Sales.copy(deep=True)
all_cons_initial_bill_date = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
total_cons = sales.groupby(['store_id',
'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "total_consumers"})
aug_cons_min = sales.groupby(['store_id', 'order_source', 'patient_id'])[['created_at']].min().reset_index()
aug_new_cons = pd.merge(left=all_cons_initial_bill_date, right=aug_cons_min,
how='inner', on=['store_id', 'patient_id', 'created_at'])
new_cons = aug_new_cons.groupby(['store_id', 'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "new_consumers"})
repeat_cons = pd.merge(left=total_cons, right=new_cons,
how='left', on=['store_id', 'order_source'])
repeat_cons['repeat_consumers'] = repeat_cons['total_consumers'] - repeat_cons['new_consumers']
repeat_cons = pd.merge(left=repeat_cons, right=stores,
how='left',
on=['store_id'])
repeat_cons['tag_flag'] = 'repeat_consumers'
repeat_cons1 = pd.pivot_table(repeat_cons,
values='repeat_consumers',
index=['tag_flag', 'order_source'],
columns=['store_name']).reset_index()
# repeat_cons1.rename(columns={'index': 'tag_flag'}, inplace=True)
return repeat_cons1
def new_cons_vol_qty(self, Sales, All_cons_initial_bill_date, Stores):
sales = Sales.copy(deep=True)
all_cons_initial_bill_date = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
aug_cons_min = sales.groupby(['store_id', 'order_source', 'patient_id'])[['created_at']].min().reset_index()
aug_new_cons = pd.merge(left=all_cons_initial_bill_date, right=aug_cons_min,
how='inner', on=['store_id', 'patient_id', 'created_at'])
new_cons = aug_new_cons.groupby(['store_id', 'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={'patient_id': "new_consumers"})
aug_value_volumne = sales.groupby(['store_id', 'order_source', 'patient_id'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'quantity': ['sum'],
'value': ['sum']}).reset_index(drop=True)
aug_value_volumne.columns = ["_".join(x) for x in aug_value_volumne.columns.ravel()]
aug_value_volumne.rename(columns={'store_id_': "store_id",
'patient_id_': "patient_id",
'order_source_': "order_source"}, inplace=True)
new_cons_value_vol = pd.merge(left=aug_new_cons, right=aug_value_volumne,
how='left', on=['store_id', 'order_source', 'patient_id'])
new_cons_value_vol1 = new_cons_value_vol.groupby(['store_id', 'order_source'],
as_index=False).agg({
'quantity_sum': ['sum'],
'value_sum': ['sum']}).reset_index(drop=True)
new_cons_value_vol1.columns = ["_".join(x) for x in new_cons_value_vol1.columns.ravel()]
new_cons_value_vol2 = pd.merge(left=new_cons_value_vol1, right=stores,
how='left',
left_on=['store_id_'],
right_on=['store_id'])
new_cons_value_vol2['value_sum_sum'] = new_cons_value_vol2['value_sum_sum'].astype(float)
new_cons_volume = pd.pivot_table(new_cons_value_vol2,
values='value_sum_sum',
index=['order_source_'],
columns=['store_name']).reset_index()
new_cons_volume['tag_flag'] = 'new_consumer_value'
new_cons_value_vol2['quantity_sum_sum'] = new_cons_value_vol2['quantity_sum_sum'].astype(float)
new_cons_qty = pd.pivot_table(new_cons_value_vol2,
values='quantity_sum_sum',
index=['order_source_'],
columns=['store_name']).reset_index()
new_cons_qty['tag_flag'] = 'new_consumer_qty'
new_cons_vol_qty = pd.concat([new_cons_volume, new_cons_qty], sort=True)
new_cons_vol_qty.rename(columns={'order_source_': "order_source"}, inplace=True)
# new_cons_vol_qty.rename(columns={'index': 'tag_flag'}, inplace=True)
# new_cons_vol_qty.loc[new_cons_vol_qty.tag_flag == 'quantity_sum_sum', 'tag_flag'] = 'new_consumer_qty'
# new_cons_vol_qty.loc[new_cons_vol_qty.tag_flag == 'value_sum_sum', 'tag_flag'] = 'new_consumer_value'
return new_cons_vol_qty
def total_bills_new_repeat(self, Sales, All_cons_initial_bill_date, Stores,choose_year,choose_month,fofo_tag = 'no'):
sales = Sales.copy(deep=True)
df1 = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
if fofo_tag == 'yes':
sales = sales[sales['franchisee_id'] != 1]
df1['year'] = df1['created_at'].dt.year
df1["month"] = df1['created_at'].dt.month
df1 = df1[(df1['year'] == int(choose_year)) & (df1['month'] == int(choose_month))]
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
if fofo_tag=='no':
df2 = sales.groupby(['store_id', 'order_source', 'patient_id'])[['flag']].sum().reset_index()
elif fofo_tag=='yes':
df2 = sales.groupby(['store_id', 'order_source', 'fofo_distributor' ,'patient_id'])[['flag']].sum().reset_index()
df2['check'] = np.where(df2['flag'] > 0, "chronic", "acute")
df3 = pd.merge(left=df1, right=df2,
how='left', on=['store_id', 'patient_id'])
df5 = pd.merge(left=df2, right=df1,
how='left', on=['store_id', 'patient_id'])
df6 = df5[df5['year'].isnull()]
if fofo_tag == 'no':
df9 = sales.groupby(['store_id', 'order_source', 'patient_id'])[['bill_id']].nunique().reset_index()
df10 = pd.merge(left=df3, right=df9,
how='left', on=['store_id', 'order_source', 'patient_id'])
df11 = df10.groupby(['store_id', 'order_source'])[['bill_id']].sum().reset_index().rename(columns={
"bill_id": "new_consumers_bills_count"})
df12 = pd.merge(left=df6, right=df9,
how='left', on=['store_id', 'order_source', 'patient_id'])
df13 = df12.groupby(['store_id', 'order_source'])[['bill_id']].sum().reset_index().rename(columns={
"bill_id": "repeat_consumers_bills_count"})
df14 = pd.merge(left=df11, right=df13,
how='left', on=['store_id', 'order_source'])
elif fofo_tag == 'yes':
df9 = sales.groupby(['store_id', 'order_source', 'fofo_distributor' , 'patient_id'])[['bill_id']].nunique().reset_index()
df10 = pd.merge(left=df3, right=df9,
how='left', on=['store_id', 'order_source', 'fofo_distributor' , 'patient_id'])
df11 = df10.groupby(['store_id', 'fofo_distributor' , 'order_source'])[['bill_id']].sum().reset_index().rename(columns={
"bill_id": "new_consumers_bills_count"})
df12 = pd.merge(left=df6, right=df9,
how='left', on=['store_id', 'fofo_distributor' , 'order_source', 'patient_id'])
df13 = df12.groupby(['store_id', 'fofo_distributor' , 'order_source'])[['bill_id']].sum().reset_index().rename(columns={
"bill_id": "repeat_consumers_bills_count"})
df14 = pd.merge(left=df11, right=df13,
how='left', on=['store_id', 'fofo_distributor' , 'order_source'])
df14 = pd.merge(left=df14, right=stores, how='left', on=['store_id'])
if fofo_tag == 'no':
total_bills_new = pd.pivot_table(df14,
values='new_consumers_bills_count',
index='order_source',
columns=['store_name']).reset_index()
total_bills_new['tag_flag'] = 'new_consumers_bills_count'
total_bills_repeat = pd.pivot_table(df14,
values='repeat_consumers_bills_count',
index='order_source',
columns=['store_name']).reset_index()
total_bills_repeat['tag_flag'] = 'repeat_consumers_bills_count'
elif fofo_tag == 'yes':
total_bills_new = pd.pivot_table(df14,
values='new_consumers_bills_count',
index=['order_source','fofo_distributor'],
columns=['store_name']).reset_index()
total_bills_new['tag_flag'] = 'new_consumers_bills_count'
total_bills_repeat = pd.pivot_table(df14,
values='repeat_consumers_bills_count',
index=['order_source','fofo_distributor'],
columns=['store_name']).reset_index()
total_bills_repeat['tag_flag'] = 'repeat_consumers_bills_count'
total_bills_new_repeat = pd.concat([total_bills_new,
total_bills_repeat], sort=True)
return total_bills_new_repeat
def total_bills_chronic_acute(self, Sales, Customer_returns, Stores, fofo_tag = 'no'):
sales = Sales.copy(deep=True)
customer_returns = Customer_returns.copy(deep=True)
stores = Stores.copy(deep=True)
sales['value'] = sales['quantity'] * sales['rate']
sales['COGS'] = sales['quantity'] * sales['final_ptr']
customer_returns['returned_value'] = customer_returns['returned_quantity'] * customer_returns['rate']
customer_returns['returned_COGS'] = customer_returns['returned_quantity'] * customer_returns['final_ptr']
if fofo_tag=='no':
df_a1a = sales.groupby(['store_id', 'order_source', 'type1', 'category'],
as_index=False).agg({
'value': ['sum'],
'quantity': ['sum'],
'COGS': ['sum'],
'bill_id': pd.Series.nunique,
'drug_id': pd.Series.nunique}).reset_index(drop=True)
df_a1a.columns = ["_".join(x) for x in df_a1a.columns.ravel()]
df_b2 = customer_returns.groupby(['store_id', 'order_source', 'type1', 'category'],
as_index=False).agg({
'returned_value': ['sum'],
'returned_quantity': ['sum'],
'returned_COGS': ['sum'],
'returned_bill_id': pd.Series.nunique,
'returned_drug_id': pd.Series.nunique}).reset_index(drop=True)
df_b2.columns = ["_".join(x) for x in df_b2.columns.ravel()]
df_a_b = pd.merge(left=df_a1a, right=df_b2,
how='outer', on=['store_id_', 'order_source_', 'type1_', 'category_'])
df_a_b.fillna(0, inplace=True)
df_a_b['net_sale'] = df_a_b['value_sum'] - df_a_b['returned_value_sum']
df_a_b['net_COGS'] = df_a_b['COGS_sum'] - df_a_b['returned_COGS_sum']
df_a_b['net_quantity'] = df_a_b['quantity_sum'] - df_a_b['returned_quantity_sum']
df_a4 = df_a_b.groupby(['store_id_', 'order_source_', 'category_', 'type1_'],
as_index=False).agg({
'net_sale': ['sum'],
'net_quantity': ['sum'],
'net_COGS': ['sum'],
'bill_id_nunique': ['sum'],
'drug_id_nunique': ['sum']}).reset_index(drop=True)
df_a4.columns = ["_".join(x) for x in df_a4.columns.ravel()]
df_a4 = pd.merge(left=df_a4, right=stores,
how='left', left_on=['store_id__'],
right_on=['store_id'])
df_a5 = df_a4[df_a4['category__'] == 'chronic']
total_bills_chronic = pd.pivot_table(df_a5,
values='bill_id_nunique_sum',
index=['order_source__', 'category__', 'type1__'],
columns=['store_name']).reset_index()
df_a6 = df_a4[df_a4['category__'] == 'acute']
total_bills_acute = pd.pivot_table(df_a6,
values='bill_id_nunique_sum',
index=['order_source__', 'category__', 'type1__'],
columns=['store_name']).reset_index()
total_bills_chronic_acute = pd.concat([total_bills_chronic, total_bills_acute])
total_bills_chronic_acute['tag_flag'] = 'total_bills'
total_bills_chronic_acute.rename(columns={'type1__': 'type1',
'category__': 'category',
'order_source__': 'order_source'}, inplace=True)
elif fofo_tag == 'yes':
sales = sales[sales['franchisee_id'] != 1]
customer_returns = customer_returns[customer_returns['franchisee_id'] != 1]
df_a1a = sales.groupby(['store_id', 'order_source', 'fofo_distributor' ,'type1', 'category'],
as_index=False).agg({
'value': ['sum'],
'quantity': ['sum'],
'COGS': ['sum'],
'bill_id': pd.Series.nunique,
'drug_id': pd.Series.nunique}).reset_index(drop=True)
df_a1a.columns = ["_".join(x) for x in df_a1a.columns.ravel()]
df_b2 = customer_returns.groupby(['store_id', 'order_source', 'fofo_distributor', 'type1', 'category'],
as_index=False).agg({
'returned_value': ['sum'],
'returned_quantity': ['sum'],
'returned_COGS': ['sum'],
'returned_bill_id': pd.Series.nunique,
'returned_drug_id': pd.Series.nunique}).reset_index(drop=True)
df_b2.columns = ["_".join(x) for x in df_b2.columns.ravel()]
df_a_b = pd.merge(left=df_a1a, right=df_b2,
how='outer', on=['store_id_', 'order_source_', 'fofo_distributor_', 'type1_', 'category_'])
df_a_b.fillna(0, inplace=True)
df_a_b['net_sale'] = df_a_b['value_sum'] - df_a_b['returned_value_sum']
df_a_b['net_COGS'] = df_a_b['COGS_sum'] - df_a_b['returned_COGS_sum']
df_a_b['net_quantity'] = df_a_b['quantity_sum'] - df_a_b['returned_quantity_sum']
df_a4 = df_a_b.groupby(['store_id_', 'order_source_', 'fofo_distributor_', 'category_', 'type1_'],
as_index=False).agg({
'net_sale': ['sum'],
'net_quantity': ['sum'],
'net_COGS': ['sum'],
'bill_id_nunique': ['sum'],
'drug_id_nunique': ['sum']}).reset_index(drop=True)
df_a4.columns = ["_".join(x) for x in df_a4.columns.ravel()]
df_a4 = pd.merge(left=df_a4, right=stores,
how='left', left_on=['store_id__'],
right_on=['store_id'])
df_a5 = df_a4[df_a4['category__'] == 'chronic']
total_bills_chronic = pd.pivot_table(df_a5,
values='bill_id_nunique_sum',
index=['order_source__', 'fofo_distributor__', 'category__', 'type1__'],
columns=['store_name']).reset_index()
df_a6 = df_a4[df_a4['category__'] == 'acute']
total_bills_acute = pd.pivot_table(df_a6,
values='bill_id_nunique_sum',
index=['order_source__', 'fofo_distributor__', 'category__', 'type1__'],
columns=['store_name']).reset_index()
total_bills_chronic_acute = pd.concat([total_bills_chronic, total_bills_acute])
total_bills_chronic_acute['tag_flag'] = 'total_bills'
total_bills_chronic_acute.rename(columns={'type1__': 'type1',
'category__': 'category',
'order_source__': 'order_source',
'fofo_distributor__': 'fofo_distributor'}, inplace=True)
return total_bills_chronic_acute
def bills_per_cons_new_repeat(self, Sales, All_cons_initial_bill_date ,Stores, choose_year,choose_month):
sales = Sales.copy(deep=True)
df1 = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
df1['year'] = df1['created_at'].dt.year
df1["month"] = df1['created_at'].dt.month
df1 = df1[(df1['year'] == int(choose_year)) & (df1['month'] == int(choose_month))]
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
df2 = sales.groupby(['store_id', 'order_source',
'patient_id'])[['flag']].sum().reset_index()
df2['check'] = np.where(df2['flag'] > 0, "chronic", "acute")
df3 = pd.merge(left=df1, right=df2,
how='left', on=['store_id', 'patient_id'])
df5 = pd.merge(left=df2, right=df1,
how='left', on=['store_id', 'patient_id'])
df6 = df5[df5['year'].isnull()]
df30 = sales.groupby(['store_id', 'order_source',
'patient_id'])[['bill_id']].nunique().reset_index().rename(columns={
"bill_id": "no_of_bills"})
# new consumers
df31 = pd.merge(left=df3, right=df30,
how='left', on=['store_id', 'order_source', 'patient_id'])
df32 = df31.groupby(['store_id', 'order_source'])[['no_of_bills']].mean().reset_index().rename(columns={
"no_of_bills": "new_consumers_avg_no_of_bills"})
# repeat consumers
df33 = pd.merge(left=df6, right=df30,
how='left', on=['store_id', 'order_source', 'patient_id'])
df34 = df33.groupby(['store_id', 'order_source'])[['no_of_bills']].mean().reset_index().rename(columns={
"no_of_bills": "repeat_consumers_avg_no_of_bills"})
df35 = pd.merge(left=df32, right=df34,
how='left', on=['store_id', 'order_source'])
df35 = pd.merge(left=df35, right=stores, how='left', on=['store_id'])
bills_per_cons_new = pd.pivot_table(df35,
values='new_consumers_avg_no_of_bills',
index='order_source',
columns=['store_name']).reset_index()
bills_per_cons_new['tag_flag'] = 'new_consumers_avg_no_of_bills'
bills_per_cons_repeat = pd.pivot_table(df35,
values='repeat_consumers_avg_no_of_bills',
index='order_source',
columns=['store_name']).reset_index()
bills_per_cons_repeat['tag_flag'] = 'repeat_consumers_avg_no_of_bills'
bills_per_cons_new_repeat = pd.concat([bills_per_cons_new,
bills_per_cons_repeat])
# bills_per_cons_new_repeat.rename(columns={'index': 'tag_flag'}, inplace=True)
return bills_per_cons_new_repeat
def abv_new_repeat_chronic(self, Sales, All_cons_initial_bill_date ,Stores, choose_year,choose_month ):
sales = Sales.copy(deep=True)
df1 = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
df1['year'] = df1['created_at'].dt.year
df1["month"] = df1['created_at'].dt.month
df1 = df1[(df1['year'] == int(choose_year)) & (df1['month'] == int(choose_month))]
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
df2 = sales.groupby(['store_id',
'order_source', 'patient_id'])[['flag']].sum().reset_index()
df2['check'] = np.where(df2['flag'] > 0, "chronic", "acute")
df3 = pd.merge(left=df1, right=df2,
how='left', on=['store_id', 'patient_id'])
df5 = pd.merge(left=df2, right=df1,
how='left', on=['store_id', 'patient_id'])
df6 = df5[df5['year'].isnull()]
sales['value'] = sales['quantity'] * sales['rate']
sales['value'] = sales['value'].astype(float)
df36 = sales.groupby(['store_id', 'order_source',
'patient_id', 'bill_id'])[['value']].sum().reset_index()
df37 = df36.groupby(['store_id', 'order_source',
'patient_id'])[['value']].mean().reset_index()
# new consumers
df38 = pd.merge(left=df3, right=df37,
how='left', on=['store_id', 'order_source', 'patient_id'])
df39 = df38.groupby(['store_id', 'order_source'])[['value']].mean().reset_index().rename(columns={
"value": "new_consumers_avg_bill_value"})
# repeat consumers
df40 = pd.merge(left=df6, right=df37,
how='left', on=['store_id', 'order_source', 'patient_id'])
df41 = df40.groupby(['store_id', 'order_source'])[['value']].mean().reset_index().rename(columns={
"value": "repeat_consumers_avg_bill_value"})
df42 = pd.merge(left=df39, right=df41,
how='left', on=['store_id', 'order_source'])
df42 = pd.merge(left=df42, right=stores, how='left', on=['store_id'])
df42['new_consumers_avg_bill_value'] = df42['new_consumers_avg_bill_value'].astype(float)
abv_new = pd.pivot_table(df42,
values='new_consumers_avg_bill_value',
index='order_source',
columns=['store_name']).reset_index()
abv_new['tag_flag'] = 'new_consumers_avg_bill_value'
df42['repeat_consumers_avg_bill_value'] = df42['repeat_consumers_avg_bill_value'].astype(float)
abv_repeat = pd.pivot_table(df42,
values='repeat_consumers_avg_bill_value',
index='order_source',
columns=['store_name']).reset_index()
abv_repeat['tag_flag'] = 'repeat_consumers_avg_bill_value'
df_a9 = sales.groupby(['store_id', 'order_source',
'bill_id', 'category'])[['value']].sum().reset_index()
df_a10 = df_a9.groupby(['store_id', 'order_source', 'category'])[['value']].mean().reset_index().rename(columns=
{
'value': "chronic_consumers_avg_bill_value"})
df_a11 = df_a10[df_a10['category'] == 'chronic']
df_a11 = pd.merge(left=df_a11, right=stores, how='left', on=['store_id'])
abv_chronic = pd.pivot_table(df_a11,
values='chronic_consumers_avg_bill_value',
index='order_source',
columns=['store_name']).reset_index()
abv_chronic['tag_flag'] = 'chronic_consumers_avg_bill_value'
abv_new_repeat_chronic = pd.concat([abv_new, abv_repeat, abv_chronic])
return abv_new_repeat_chronic
def items_per_cons_new_repeat(self,Sales, All_cons_initial_bill_date ,Stores, choose_year,choose_month ):
sales = Sales.copy(deep=True)
df1 = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
df1['year'] = df1['created_at'].dt.year
df1["month"] = df1['created_at'].dt.month
df1 = df1[(df1['year'] == int(choose_year)) & (df1['month'] == int(choose_month))]
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
df2 = sales.groupby(['store_id',
'order_source', 'patient_id'])[['flag']].sum().reset_index()
df2['check'] = np.where(df2['flag'] > 0, "chronic", "acute")
df3 = pd.merge(left=df1, right=df2,
how='left', on=['store_id', 'patient_id'])
df5 = pd.merge(left=df2, right=df1,
how='left', on=['store_id', 'patient_id'])
df6 = df5[df5['year'].isnull()]
df43 = sales.groupby(['store_id',
'order_source', 'patient_id'])[['drug_id']].nunique().reset_index()
# new consumers
df44 = pd.merge(left=df3, right=df43,
how='left', on=['store_id', 'order_source', 'patient_id'])
df45 = df44.groupby(['store_id', 'order_source'])[['drug_id']].mean().reset_index().rename(columns={
"drug_id": "new_consumers_avg_items"})
# repeat consumers
df46 = pd.merge(left=df6, right=df43,
how='left', on=['store_id', 'order_source', 'patient_id'])
df47 = df46.groupby(['store_id', 'order_source'])[['drug_id']].mean().reset_index().rename(columns={
"drug_id": "repeat_consumers_avg_items"})
df48 = pd.merge(left=df45, right=df47,
how='left', on=['store_id', 'order_source'])
df48 = pd.merge(left=df48, right=stores, how='left', on=['store_id'])
items_per_cons_new = pd.pivot_table(df48,
values='new_consumers_avg_items',
index='order_source',
columns=['store_name']).reset_index()
items_per_cons_new['tag_flag'] = 'new_consumers_avg_items'
items_per_cons_repeat = pd.pivot_table(df48,
values='repeat_consumers_avg_items',
index='order_source',
columns=['store_name']).reset_index()
items_per_cons_repeat['tag_flag'] = 'repeat_consumers_avg_items'
items_per_cons_new_repeat = pd.concat([items_per_cons_new,
items_per_cons_repeat])
# items_per_cons_new_repeat.rename(columns={'index': 'tag_flag'}, inplace=True)
return items_per_cons_new_repeat
def tot_items_sold_new_repeat(self, Sales, All_cons_initial_bill_date, Stores, choose_year, choose_month):
sales = Sales.copy(deep=True)
df1 = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
df1['year'] = df1['created_at'].dt.year
df1["month"] = df1['created_at'].dt.month
df1 = df1[(df1['year'] == int(choose_year)) & (df1['month'] == int(choose_month))]
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
df2 = sales.groupby(['store_id', 'order_source', 'patient_id'])[['flag']].sum().reset_index()
df2['check'] = np.where(df2['flag'] > 0, "chronic", "acute")
df3 = pd.merge(left=df1, right=df2,
how='left', on=['store_id', 'patient_id'])
df5 = pd.merge(left=df2, right=df1,
how='left', on=['store_id', 'patient_id'])
df6 = df5[df5['year'].isnull()]
df24 = sales.groupby(['store_id',
'order_source', 'patient_id'])[['quantity']].sum().reset_index()
# new consumers
df25 = pd.merge(left=df3, right=df24,
how='left', on=['store_id', 'order_source', 'patient_id'])
df26 = df25.groupby(['store_id', 'order_source'])[['quantity']].sum().reset_index().rename(columns={
"quantity": "new_consumers_qty"})
# repeat consumers
df27 = pd.merge(left=df6, right=df24,
how='left', on=['store_id', 'order_source', 'patient_id'])
df28 = df27.groupby(['store_id', 'order_source'])[['quantity']].sum().reset_index().rename(columns={
"quantity": "repeat_consumers_qty"})
df29 = pd.merge(left=df26, right=df28,
how='left', on=['store_id', 'order_source'])
df29 = pd.merge(left=df29, right=stores, how='left', on=['store_id'])
tot_items_sold_new = pd.pivot_table(df29,
values='new_consumers_qty',
index='order_source',
columns=['store_name']).reset_index()
tot_items_sold_new['tag_flag'] = 'new_consumers_qty'
tot_items_sold_repeat = pd.pivot_table(df29,
values='repeat_consumers_qty',
index='order_source',
columns=['store_name']).reset_index()
tot_items_sold_repeat['tag_flag'] = 'repeat_consumers_qty'
tot_items_sold_new_repeat = pd.concat([tot_items_sold_new,
tot_items_sold_repeat])
# tot_items_sold_new_repeat.rename(columns={'index': 'tag_flag'}, inplace=True)
return tot_items_sold_new_repeat
def generic_cons_overall_new(self, Sales, All_cons_initial_bill_date, Stores,fofo_tag = 'no'):
sales = Sales.copy(deep=True)
all_cons_first = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
if fofo_tag=='no':
generic_cons = sales.groupby(['store_id',
'order_source', 'type'])[['patient_id']].nunique().reset_index().rename(columns={
"patient_id": "generic_cons_overall"})
generic_cons = generic_cons[generic_cons['type'].isin(['generic', 'high-value-generic'])]
generic_cons = pd.merge(left=generic_cons, right=stores,
how='left', on=['store_id'])
generic_cons_overall = pd.pivot_table(generic_cons,
values='generic_cons_overall',
index='order_source',
columns=['store_name']).reset_index()
generic_cons_overall['tag_flag'] = 'generic_cons_overall'
aug_cons_min = sales.groupby(['store_id', 'order_source', 'patient_id'])[['created_at']].min().reset_index()
aug_new_cons = pd.merge(left=all_cons_first, right=aug_cons_min,
how='inner', on=['store_id', 'patient_id', 'created_at'])
aug_new_cons['flag'] = "aug_new"
new_cons_generic = pd.merge(left=aug_new_cons[['store_id', 'order_source', 'patient_id', 'flag']],
right=sales,
how='left', on=['store_id', 'order_source', 'patient_id'])
new_cons_generic1 = new_cons_generic.groupby(['store_id', 'order_source', 'type'])[
['patient_id']].nunique().reset_index().rename(columns={
"patient_id": "generic_cons_new"})
new_cons_generic1 = new_cons_generic1[new_cons_generic1['type'].isin(['generic', 'high-value-generic'])]
new_cons_generic1 = pd.merge(left=new_cons_generic1, right=stores,
how='left', on=['store_id'])
generic_cons_new = pd.pivot_table(new_cons_generic1,
values='generic_cons_new',
index='order_source',
columns=['store_name']).reset_index()
generic_cons_new['tag_flag'] = 'generic_cons_new'
generic_cons_overall_new = pd.concat([generic_cons_overall, generic_cons_new])
elif fofo_tag=='yes':
sales = sales[sales['franchisee_id'] != 1]
generic_cons = sales.groupby(['store_id',
'order_source','fofo_distributor', 'type'])[['patient_id']].nunique().reset_index().rename(
columns={
"patient_id": "generic_cons_overall"})
generic_cons = generic_cons[generic_cons['type'].isin(['generic', 'high-value-generic'])]
generic_cons = pd.merge(left=generic_cons, right=stores,
how='left', on=['store_id'])
generic_cons_overall = pd.pivot_table(generic_cons,
values='generic_cons_overall',
index=['order_source','fofo_distributor'],
columns=['store_name']).reset_index()
generic_cons_overall['tag_flag'] = 'generic_cons_overall'
aug_cons_min = sales.groupby(['store_id', 'order_source', 'patient_id'])[['created_at']].min().reset_index()
aug_new_cons = pd.merge(left=all_cons_first, right=aug_cons_min,
how='inner', on=['store_id', 'patient_id', 'created_at'])
aug_new_cons['flag'] = "aug_new"
new_cons_generic = pd.merge(left=aug_new_cons[['store_id', 'order_source', 'patient_id', 'flag']],
right=sales,
how='left', on=['store_id', 'order_source', 'patient_id'])
new_cons_generic1 = new_cons_generic.groupby(['store_id', 'order_source','fofo_distributor', 'type'])[
['patient_id']].nunique().reset_index().rename(columns={
"patient_id": "generic_cons_new"})
new_cons_generic1 = new_cons_generic1[new_cons_generic1['type'].isin(['generic', 'high-value-generic'])]
new_cons_generic1 = pd.merge(left=new_cons_generic1, right=stores,
how='left', on=['store_id'])
generic_cons_new = pd.pivot_table(new_cons_generic1,
values='generic_cons_new',
index=['order_source','fofo_distributor'],
columns=['store_name']).reset_index()
generic_cons_new['tag_flag'] = 'generic_cons_new'
generic_cons_overall_new = pd.concat([generic_cons_overall, generic_cons_new])
return generic_cons_overall_new
def power_cons_overall_new(self, Sales, All_cons_initial_bill_date, Stores, power_consumer_value):
sales = Sales.copy(deep=True)
all_cons_first = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
power_cons_aug = sales.groupby(['store_id', 'order_source', 'patient_id'])[['value']].sum().reset_index()
power_cons_aug1 = power_cons_aug[power_cons_aug['value'] > power_consumer_value]
power_cons = power_cons_aug1.groupby(['store_id', 'order_source'])[
['patient_id']].nunique().reset_index().rename(
columns={
"patient_id": "power_cons_overall"})
power_cons = pd.merge(left=power_cons, right=stores, how='left', on=['store_id'])
power_cons_overall = pd.pivot_table(power_cons,
values='power_cons_overall',
index='order_source',
columns=['store_name']).reset_index()
power_cons_overall['tag_flag'] = 'power_cons_overall'
aug_cons_min = sales.groupby(['store_id', 'order_source', 'patient_id'])[['created_at']].min().reset_index()
aug_new_cons = pd.merge(left=all_cons_first, right=aug_cons_min,
how='inner', on=['store_id', 'patient_id', 'created_at'])
aug_new_cons['flag'] = "aug_new"
df_fg = pd.merge(left=aug_new_cons[['store_id', 'order_source', 'patient_id', 'flag']], right=power_cons_aug1,
how='left', on=['store_id', 'order_source', 'patient_id'])
df_fg1 = df_fg[df_fg['value'].notnull()]
new_power_cons = df_fg1.groupby(['store_id', 'order_source'])[['patient_id']].nunique().reset_index().rename(
columns={
"patient_id": "power_cons_new"})
new_power_cons = pd.merge(left=new_power_cons, right=stores,
how='left', on=['store_id'])
power_cons_new = pd.pivot_table(new_power_cons,
values='power_cons_new',
index='order_source',
columns=['store_name']).reset_index()
power_cons_new['tag_flag'] = 'power_cons_new'
power_cons_overall_new = pd.concat([power_cons_overall, power_cons_new])
# power_cons_overall_new.rename(columns={'index': 'tag_flag'}, inplace=True)
return power_cons_overall_new
def power_consumers_sale(self, Sales, Stores, power_consumer_value, mis_type, fofo_tag = 'no'):
sales = Sales.copy(deep=True)
stores = Stores.copy(deep=True)
if fofo_tag == 'no':
ad1 = sales.groupby(['store_id', 'order_source', 'patient_id'])[['value']].sum().reset_index()
ad2 = ad1[ad1['value'] > power_consumer_value]
ad3 = pd.merge(left=ad2[['store_id', 'order_source', 'patient_id']],
right=sales,
how='left', on=['store_id', 'order_source', 'patient_id'])
power_cons_sales = ad3.groupby(['store_id', 'order_source', 'type1'])[['value']].sum().reset_index()
power_cons_sales['value'] = power_cons_sales['value'].astype(float)
power_cons_sales = pd.merge(left=power_cons_sales, right=stores,
how='left', on=['store_id'])
power_cons_sales_ethical = power_cons_sales[power_cons_sales['type1'] == 'ethical']
power_cons_sales_ethical = pd.pivot_table(power_cons_sales_ethical,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
power_cons_sales_generic = power_cons_sales[power_cons_sales['type1'] == 'generic']
power_cons_sales_generic = pd.pivot_table(power_cons_sales_generic,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
power_cons_sales_others = power_cons_sales[power_cons_sales['type1'] == 'others']
power_cons_sales_others = pd.pivot_table(power_cons_sales_others,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
power_cons_sales_gaid = power_cons_sales[power_cons_sales['type1'] == 'GOODAID']
power_cons_sales_gaid = pd.pivot_table(power_cons_sales_gaid,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
power_cons_sales_overall = power_cons_sales.groupby(['store_id',
'store_name', 'order_source'])[
['value']].sum().reset_index().rename(columns={
"value": "total"})
power_cons_sales_overall = pd.pivot_table(power_cons_sales_overall,
values='total',
index=['order_source'],
columns=['store_name']).reset_index().rename(columns={
"index": "type1"})
if mis_type == 'breakup':
power_consumers_sale = pd.concat([power_cons_sales_overall,
power_cons_sales_ethical,
power_cons_sales_generic,
power_cons_sales_others,
power_cons_sales_gaid], sort=True)
elif mis_type == 'unified':
power_consumers_sale = pd.concat([power_cons_sales_overall,
power_cons_sales_ethical,
power_cons_sales_generic,
power_cons_sales_others], sort=True)
else:
self.logger.info('provide valid mis_type')
return None
power_consumers_sale['tag_flag'] = 'power_cons_sale'
return power_consumers_sale
elif fofo_tag == 'yes':
sales = sales[sales['franchisee_id']!=1]
ad1 = sales.groupby(['store_id', 'order_source','patient_id'])[['value']].sum().reset_index()
ad2 = ad1[ad1['value'] > power_consumer_value]
ad3 = pd.merge(left=ad2[['store_id', 'order_source' ,'patient_id']],
right=sales,
how='left', on=['store_id', 'order_source' ,'patient_id'])
power_cons_sales = ad3.groupby(['store_id', 'order_source', 'fofo_distributor' , 'type1'])[['value']].sum().reset_index()
power_cons_sales['value'] = power_cons_sales['value'].astype(float)
power_cons_sales = pd.merge(left=power_cons_sales, right=stores,
how='left', on=['store_id'])
power_cons_sales_ethical = power_cons_sales[power_cons_sales['type1'] == 'ethical']
power_cons_sales_ethical = pd.pivot_table(power_cons_sales_ethical,
values='value',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
power_cons_sales_generic = power_cons_sales[power_cons_sales['type1'] == 'generic']
power_cons_sales_generic = pd.pivot_table(power_cons_sales_generic,
values='value',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
power_cons_sales_others = power_cons_sales[power_cons_sales['type1'] == 'others']
power_cons_sales_others = pd.pivot_table(power_cons_sales_others,
values='value',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
power_cons_sales_gaid = power_cons_sales[power_cons_sales['type1'] == 'GOODAID']
power_cons_sales_gaid = pd.pivot_table(power_cons_sales_gaid,
values='value',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
power_cons_sales_overall = power_cons_sales.groupby(['store_id',
'store_name', 'order_source','fofo_distributor'])[
['value']].sum().reset_index().rename(columns={
"value": "total"})
power_cons_sales_overall = pd.pivot_table(power_cons_sales_overall,
values='total',
index=['order_source','fofo_distributor'],
columns=['store_name']).reset_index().rename(columns={
"index": "type1"})
if mis_type == 'breakup':
power_consumers_sale = pd.concat([power_cons_sales_overall,
power_cons_sales_ethical,
power_cons_sales_generic,
power_cons_sales_others,
power_cons_sales_gaid], sort=True)
elif mis_type == 'unified':
power_consumers_sale = pd.concat([power_cons_sales_overall,
power_cons_sales_ethical,
power_cons_sales_generic,
power_cons_sales_others], sort=True)
else:
self.logger.info('provide valid mis_type')
return None
power_consumers_sale['tag_flag'] = 'power_cons_sale'
return power_consumers_sale
def power_cons_bills(self, Sales, Stores, power_consumer_value,fofo_tag = 'no'):
sales = Sales.copy(deep=True)
stores = Stores.copy(deep=True)
if fofo_tag == 'no':
power_cons_aug = sales.groupby(['store_id', 'order_source', 'patient_id'])[['value']].sum().reset_index()
power_cons_aug1 = power_cons_aug[power_cons_aug['value'] > power_consumer_value]
df_lp = pd.merge(left=power_cons_aug1[['store_id', 'order_source', 'patient_id']],
right=sales,
how='left', on=['store_id', 'order_source', 'patient_id'])
power_cons_bills = df_lp.groupby(['store_id', 'order_source'])[['bill_id']].nunique().reset_index().rename(
columns={
"bill_id": "no_of_bills"})
power_cons_bills = pd.merge(left=power_cons_bills, right=stores,
how='left', on=['store_id'])
power_cons_bills = pd.pivot_table(power_cons_bills,
values='no_of_bills',
index='order_source',
columns=['store_name']).reset_index()
power_cons_bills['tag_flag'] = "no_of_bills"
# power_cons_bills.rename(columns={'index': 'tag_flag'}, inplace=True)
power_cons_bills.loc[power_cons_bills['tag_flag'] == 'no_of_bills',
'tag_flag'] = 'Power cons no_of_bills'
return power_cons_bills
elif fofo_tag == 'yes':
sales = sales[sales['franchisee_id'] != 1]
power_cons_aug = sales.groupby(['store_id', 'order_source' ,'patient_id'])[['value']].sum().reset_index()
power_cons_aug1 = power_cons_aug[power_cons_aug['value'] > power_consumer_value]
df_lp = pd.merge(left=power_cons_aug1[['store_id', 'order_source','patient_id']],
right=sales,
how='left', on=['store_id', 'order_source','patient_id'])
power_cons_bills = df_lp.groupby(['store_id', 'order_source','fofo_distributor'])[['bill_id']].nunique().reset_index().rename(
columns={
"bill_id": "no_of_bills"})
power_cons_bills = pd.merge(left=power_cons_bills, right=stores,
how='left', on=['store_id'])
power_cons_bills = pd.pivot_table(power_cons_bills,
values='no_of_bills',
index=['order_source','fofo_distributor'],
columns=['store_name']).reset_index()
power_cons_bills['tag_flag'] = "no_of_bills"
# power_cons_bills.rename(columns={'index': 'tag_flag'}, inplace=True)
power_cons_bills.loc[power_cons_bills['tag_flag'] == 'no_of_bills',
'tag_flag'] = 'Power cons no_of_bills'
return power_cons_bills
def home_delivery(self, Sales, Customer_returns, Home_delivery_data, Stores, delivery_bill_ids,mis_tag,fofo_tag = 'no' ):
sales = Sales.copy(deep=True)
customer_returns = Customer_returns.copy(deep=True)
home_delivery_data = Home_delivery_data.copy(deep=True)
stores = Stores.copy(deep=True)
delivery_bill_ids = delivery_bill_ids.copy(deep=True)
if fofo_tag == 'yes':
home_delivery_data = home_delivery_data[home_delivery_data['franchisee_id'] != 1]
sales = sales[sales['franchisee_id'] != 1]
customer_returns = customer_returns[customer_returns['franchisee_id'] != 1]
# Consumer Count
HD_cons = home_delivery_data.groupby(['store_id', 'order_source'])[
['patient_id']].nunique().reset_index().rename(columns={
"patient_id": "no_of_HD_consumers"})
HD_cons = pd.merge(left=HD_cons, right=stores,
how='left', on=['store_id'])
HD_cons_count = pd.pivot_table(HD_cons,
values='no_of_HD_consumers',
index='order_source',
columns=['store_name']).reset_index()
HD_cons_count['tag_flag'] = 'no_of_HD_consumers'
# Deliverd count
home_delivery_data['key'] = home_delivery_data['patient_id'].astype(str) + '-' + home_delivery_data[
'order_number'].astype(str) + '-' + home_delivery_data['bill_id'].astype(str)
if mis_tag == 'breakup':
home_delivery_data['order_source2'] = np.where(home_delivery_data.order_source_pso.isin(['zeno']),
"ecomm", "store")
elif mis_tag == 'unified':
home_delivery_data['order_source2'] = 'all'
HD_count = home_delivery_data.groupby(['store_id', 'order_source2']).agg(
{'key': pd.Series.nunique}).reset_index()
HD_count = pd.merge(left=HD_count, right=stores,
how='left', on=['store_id'])
HD_count.rename(columns={'key': 'count_of_HD_delivered'}, inplace=True)
HD_count_delivered = pd.pivot_table(HD_count,
values='count_of_HD_delivered',
index='order_source2',
columns=['store_name']).reset_index()
HD_count_delivered.rename(columns={'order_source2': 'order_source'}, inplace =True)
HD_count_delivered['tag_flag'] = 'count_of_HD_delivered'
# HD sales
if fofo_tag == 'no':
sales = sales[['store_id', 'order_source', 'bill_id', 'rate', 'quantity', 'type1']]
customer_returns = customer_returns[
['store_id', 'order_source', 'bill_id', 'rate', 'returned_quantity', 'type1']]
elif fofo_tag=='yes':
sales = sales[['store_id', 'order_source', 'fofo_distributor', 'bill_id', 'rate', 'quantity', 'type1']]
customer_returns = customer_returns[
['store_id', 'order_source', 'fofo_distributor', 'bill_id', 'rate', 'returned_quantity', 'type1']]
customer_returns['returned_quantity'] = customer_returns['returned_quantity'] * (-1)
customer_returns.rename(columns={'returned_quantity': 'quantity'}, inplace=True)
sales = pd.concat([sales, customer_returns], sort=True)
sales['quantity'] = sales['quantity'].astype(float)
sales['rate'] = sales['rate'].astype(float)
sales['value'] = sales['rate'] * sales['quantity']
HD_bills = tuple(map(int, list(delivery_bill_ids[~delivery_bill_ids['bill_id'].isnull()]['bill_id'].unique())))
HD_sales = sales[sales['bill_id'].isin(HD_bills)]
if fofo_tag=='no':
HD_sales_by_type = HD_sales.groupby(['store_id', 'type1', 'order_source'])[["value"]].sum().reset_index()
elif fofo_tag == 'yes':
HD_sales_by_type = HD_sales.groupby(['store_id', 'type1', 'order_source', 'fofo_distributor'])[["value"]].sum().reset_index()
HD_sales_by_type = pd.merge(left=HD_sales_by_type, right=stores,
how='left', on=['store_id'])
HD_sales_by_type['value'] = HD_sales_by_type['value'].astype(float)
if fofo_tag == 'no':
HD_sales = pd.pivot_table(HD_sales_by_type,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
elif fofo_tag == 'yes':
HD_sales = pd.pivot_table(HD_sales_by_type,
values='value',
index=['type1', 'order_source', 'fofo_distributor'],
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
HD_sales.rename(columns={'index': 'tag_flag'}, inplace=True)
home_delivery = pd.concat([HD_cons_count, HD_count_delivered, HD_sales], sort=True)
home_delivery.loc[home_delivery['tag_flag'] == 'ethical',
'tag_flag'] = 'HD ethical sale'
home_delivery.loc[home_delivery['tag_flag'] == 'generic',
'tag_flag'] = 'HD generic sale'
home_delivery.loc[home_delivery['tag_flag'] == 'others',
'tag_flag'] = 'HD others sale'
if mis_tag == 'breakup':
home_delivery.loc[home_delivery['tag_flag'] == 'GOODAID',
'tag_flag'] = 'GOODAID sale'
return home_delivery
def home_delivery_fofo_consumers(self, Workcell_home_delivery_data_fofo, Other_home_delivery_data_fofo, Stores, mis_tag, fofo_tag = 'yes'):
workcell_home_delivery_data_fofo = Workcell_home_delivery_data_fofo.copy(deep=True)
other_home_delivery_data_fofo = Other_home_delivery_data_fofo.copy(deep=True)
stores = Stores.copy(deep=True)
# workcell Consumer Count
workcell_HD_cons = workcell_home_delivery_data_fofo.groupby(['store_id', 'order_source'])[
['patient_id']].nunique().reset_index().rename(columns={
"patient_id": "no_of_HD_consumers"})
workcell_HD_cons = pd.merge(left=workcell_HD_cons, right=stores,
how='left', on=['store_id'])
workcell_HD_cons_count = pd.pivot_table(workcell_HD_cons,
values='no_of_HD_consumers',
index='order_source',
columns=['store_name']).reset_index()
workcell_HD_cons_count['tag_flag'] = 'no_of_HD_consumers'
workcell_HD_cons_count['fofo_distributor'] = 'workcell'
# other Consumer Count
other_HD_cons = other_home_delivery_data_fofo.groupby(['store_id', 'order_source'])[
['patient_id']].nunique().reset_index().rename(columns={
"patient_id": "no_of_HD_consumers"})
other_HD_cons = pd.merge(left=other_HD_cons, right=stores,
how='left', on=['store_id'])
other_HD_cons_count = pd.pivot_table(other_HD_cons,
values='no_of_HD_consumers',
index='order_source',
columns=['store_name']).reset_index()
other_HD_cons_count['tag_flag'] = 'no_of_HD_consumers'
other_HD_cons_count['fofo_distributor'] = 'other'
# Deliverd count
workcell_home_delivery_data_fofo['key'] = workcell_home_delivery_data_fofo['patient_id'].astype(str) + '-' + workcell_home_delivery_data_fofo[
'order_number'].astype(str) + '-' + workcell_home_delivery_data_fofo['bill_id'].astype(str)
other_home_delivery_data_fofo['key'] = other_home_delivery_data_fofo['patient_id'].astype(str) + '-' + other_home_delivery_data_fofo[
'order_number'].astype(str) + '-' + other_home_delivery_data_fofo['bill_id'].astype(str)
if mis_tag == 'breakup':
workcell_home_delivery_data_fofo['order_source2'] = np.where( workcell_home_delivery_data_fofo.order_source_pso.isin(['zeno']),"ecomm", "store")
other_home_delivery_data_fofo['order_source2'] = np.where(
other_home_delivery_data_fofo.order_source_pso.isin(['zeno']), "ecomm", "store")
elif mis_tag == 'unified':
workcell_home_delivery_data_fofo['order_source2'] = 'all'
other_home_delivery_data_fofo['order_source2'] = 'all'
workcell_HD_count = workcell_home_delivery_data_fofo.groupby(['store_id', 'order_source2']).agg(
{'key': pd.Series.nunique}).reset_index()
other_HD_count = other_home_delivery_data_fofo.groupby(['store_id', 'order_source2']).agg(
{'key': pd.Series.nunique}).reset_index()
workcell_HD_count = pd.merge(left=workcell_HD_count, right=stores,
how='left', on=['store_id'])
workcell_HD_count.rename(columns={'key': 'count_of_HD_delivered'}, inplace=True)
workcell_HD_count_delivered = pd.pivot_table(workcell_HD_count,
values='count_of_HD_delivered',
index='order_source2',
columns=['store_name']).reset_index()
workcell_HD_count_delivered.rename(columns={'order_source2': 'order_source'}, inplace=True)
workcell_HD_count_delivered['tag_flag'] = 'count_of_HD_delivered'
other_HD_count = pd.merge(left=other_HD_count, right=stores,
how='left', on=['store_id'])
other_HD_count.rename(columns={'key': 'count_of_HD_delivered'}, inplace=True)
other_HD_count_delivered = pd.pivot_table(other_HD_count,
values='count_of_HD_delivered',
index='order_source2',
columns=['store_name']).reset_index()
other_HD_count_delivered.rename(columns={'order_source2': 'order_source'}, inplace=True)
other_HD_count_delivered['tag_flag'] = 'count_of_HD_delivered'
workcell_HD_count_delivered['fofo_distributor'] = 'workcell'
other_HD_count_delivered['fofo_distributor'] = 'other'
home_delivery = pd.concat([workcell_HD_cons_count,other_HD_cons_count,workcell_HD_count_delivered,other_HD_count_delivered], sort=True)
return home_delivery
def purchase_from_worckell(self, Purchase_from_workcell_data, Stores,mis_tag,fofo_tag = 'no', launch_flag = 'normal'):
purchase_from_wc_data = Purchase_from_workcell_data.copy(deep=True)
stores = Stores.copy(deep=True)
if launch_flag == 'launch_stock':
purchase_from_wc_data = purchase_from_wc_data[purchase_from_wc_data['launch_flag'] == 'launch_stock']
purchase_from_wc_data['zp_received_tax'] = np.vectorize(self.taxable_value_vat_based_2)(
purchase_from_wc_data['zp_received_net_value'], purchase_from_wc_data['zp_vat'])
purchase_from_wc_data['wc_purchased_tax'] = np.vectorize(self.taxable_value_vat_based_2)(
purchase_from_wc_data['wc_purchase_net_value'], purchase_from_wc_data['wc_vat'])
if fofo_tag=='no':
df_yy1 = purchase_from_wc_data.groupby(['store_id', 'type1', 'category'],
as_index=False).agg({
'zp_received_net_value': ['sum'],
'zp_received_tax': ['sum'],
'wc_purchase_net_value': ['sum'],
'wc_purchased_tax': ['sum']}).reset_index(drop=True)
df_yy1.columns = ["_".join(x) for x in df_yy1.columns.ravel()]
df_yy1.columns = df_yy1.columns.str.rstrip('_x')
df_yy2 = df_yy1.groupby(['store_id', 'type1'])[["zp_received_tax_sum"]].sum().reset_index()
df_yy2 = pd.merge(left=df_yy2, right=stores,
how='left', on=['store_id'])
purchase_from_wc = pd.pivot_table(df_yy2,
values='zp_received_tax_sum',
index='type1',
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
purchase_from_wc.rename(columns={'index': 'tag_flag'}, inplace=True)
if launch_flag == 'normal':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'purchase_from_wc_ethical'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'purchase_from_wc_generic'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'purchase_from_wc_others'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'purchase_from_wc_GOODAID'
elif launch_flag == 'launch_stock':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_ethical'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_generic'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_others'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_GOODAID'
return purchase_from_wc
elif fofo_tag =='yes':
purchase_from_wc_data = purchase_from_wc_data[purchase_from_wc_data['franchisee_id']!=1]
df_yy1 = purchase_from_wc_data.groupby(['store_id', 'type1','fofo_distributor', 'category'],
as_index=False).agg({
'zp_received_net_value': ['sum'],
'zp_received_tax': ['sum'],
'wc_purchase_net_value': ['sum'],
'wc_purchased_tax': ['sum']}).reset_index(drop=True)
df_yy1.columns = ["_".join(x) for x in df_yy1.columns.ravel()]
df_yy1.columns = df_yy1.columns.str.rstrip('_x')
df_yy2 = df_yy1.groupby(['store_id', 'type1','fofo_distributor'])[["zp_received_tax_sum"]].sum().reset_index()
df_yy2 = pd.merge(left=df_yy2, right=stores,
how='left', on=['store_id'])
purchase_from_wc = pd.pivot_table(df_yy2,
values='zp_received_tax_sum',
index=['type1','fofo_distributor'],
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
purchase_from_wc.rename(columns={'index': 'tag_flag'}, inplace=True)
if launch_flag == 'normal':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'purchase_from_wc_ethical'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'purchase_from_wc_generic'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'purchase_from_wc_others'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'purchase_from_wc_GOODAID'
elif launch_flag == 'launch_stock':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_ethical'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_generic'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_others'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_GOODAID'
return purchase_from_wc
def purchase_from_worckell_including_tax(self, Purchase_from_workcell_data, Stores,mis_tag,fofo_tag = 'no', launch_flag = 'normal'):
purchase_from_wc_data = Purchase_from_workcell_data.copy(deep=True)
stores = Stores.copy(deep=True)
if launch_flag == 'launch_stock':
purchase_from_wc_data = purchase_from_wc_data[purchase_from_wc_data['launch_flag'] == 'launch_stock']
purchase_from_wc_data['zp_received_tax'] = np.vectorize(self.taxable_value_vat_based_2)(
purchase_from_wc_data['zp_received_net_value'], purchase_from_wc_data['zp_vat'])
purchase_from_wc_data['wc_purchased_tax'] = np.vectorize(self.taxable_value_vat_based_2)(
purchase_from_wc_data['wc_purchase_net_value'], purchase_from_wc_data['wc_vat'])
if fofo_tag=='no':
df_yy1 = purchase_from_wc_data.groupby(['store_id', 'type1', 'category'],
as_index=False).agg({
'zp_received_net_value': ['sum'],
'zp_received_tax': ['sum'],
'wc_purchase_net_value': ['sum'],
'wc_purchased_tax': ['sum']}).reset_index(drop=True)
df_yy1.columns = ["_".join(x) for x in df_yy1.columns.ravel()]
df_yy1.columns = df_yy1.columns.str.rstrip('_x')
df_yy2 = df_yy1.groupby(['store_id', 'type1'])[["zp_received_net_value_sum"]].sum().reset_index()
df_yy2 = pd.merge(left=df_yy2, right=stores,
how='left', on=['store_id'])
purchase_from_wc = pd.pivot_table(df_yy2,
values='zp_received_net_value_sum',
index='type1',
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
purchase_from_wc.rename(columns={'index': 'tag_flag'}, inplace=True)
if launch_flag == 'normal':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'purchase_from_wc_ethical_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'purchase_from_wc_generic_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'purchase_from_wc_others_inc_tax'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'purchase_from_wc_GOODAID_inc_tax'
elif launch_flag == 'launch_stock':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_ethical_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_generic_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_others_inc_tax'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_GOODAID_inc_tax'
return purchase_from_wc
elif fofo_tag =='yes':
purchase_from_wc_data = purchase_from_wc_data[purchase_from_wc_data['franchisee_id']!=1]
df_yy1 = purchase_from_wc_data.groupby(['store_id', 'type1','fofo_distributor', 'category'],
as_index=False).agg({
'zp_received_net_value': ['sum'],
'zp_received_tax': ['sum'],
'wc_purchase_net_value': ['sum'],
'wc_purchased_tax': ['sum']}).reset_index(drop=True)
df_yy1.columns = ["_".join(x) for x in df_yy1.columns.ravel()]
df_yy1.columns = df_yy1.columns.str.rstrip('_x')
df_yy2 = df_yy1.groupby(['store_id', 'type1','fofo_distributor'])[["zp_received_net_value_sum"]].sum().reset_index()
df_yy2 = pd.merge(left=df_yy2, right=stores,
how='left', on=['store_id'])
purchase_from_wc = pd.pivot_table(df_yy2,
values='zp_received_net_value_sum',
index=['type1','fofo_distributor'],
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
purchase_from_wc.rename(columns={'index': 'tag_flag'}, inplace=True)
if launch_flag == 'normal':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'purchase_from_wc_ethical_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'purchase_from_wc_generic_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'purchase_from_wc_others_inc_tax'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'purchase_from_wc_GOODAID_inc_tax'
elif launch_flag == 'launch_stock':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_ethical_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'generic',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_generic_inc_tax'
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'others',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_others_inc_tax'
if mis_tag == 'breakup':
purchase_from_wc.loc[purchase_from_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'launch_stock_in_purchase_from_wc_GOODAID_inc_tax'
return purchase_from_wc
def cogs_for_wc(self, Purchase_from_workcell_data, Stores, mis_tag,fofo_tag = 'no'):
purchase_from_wc_data = Purchase_from_workcell_data.copy(deep=True)
stores = Stores.copy(deep=True)
purchase_from_wc_data['zp_received_tax'] = np.vectorize(self.taxable_value_vat_based_2)(
purchase_from_wc_data['zp_received_net_value'], purchase_from_wc_data['zp_vat'])
purchase_from_wc_data['wc_purchased_tax'] = np.vectorize(self.taxable_value_vat_based_2)(
purchase_from_wc_data['wc_purchase_net_value'], purchase_from_wc_data['wc_vat'])
if fofo_tag == 'no':
df_yy1 = purchase_from_wc_data.groupby(['store_id', 'type1', 'category'],
as_index=False).agg({
'zp_received_net_value': ['sum'],
'zp_received_tax': ['sum'],
'wc_purchase_net_value': ['sum'],
'wc_purchased_tax': ['sum']}).reset_index(drop=True)
df_yy1.columns = ["_".join(x) for x in df_yy1.columns.ravel()]
df_yy1.columns = df_yy1.columns.str.rstrip('_x')
df_yy2 = df_yy1.groupby(['store_id', 'type1'])[["wc_purchased_tax_sum"]].sum().reset_index()
df_yy2 = pd.merge(left=df_yy2, right=stores,
how='left', on=['store_id'])
cogs_for_wc = pd.pivot_table(df_yy2,
values='wc_purchased_tax_sum',
index='type1',
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
cogs_for_wc.rename(columns={'index': 'tag_flag'}, inplace=True)
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'cogs_for_wc_ethical'
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'generic',
'tag_flag'] = 'cogs_for_wc_generic'
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'others',
'tag_flag'] = 'cogs_for_wc_others'
if mis_tag == 'breakup':
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'cogs_for_wc_GOODAID'
return cogs_for_wc
elif fofo_tag == 'yes':
purchase_from_wc_data = purchase_from_wc_data[purchase_from_wc_data['franchisee_id']!=1]
df_yy1 = purchase_from_wc_data.groupby(['store_id', 'type1','fofo_distributor', 'category'],
as_index=False).agg({
'zp_received_net_value': ['sum'],
'zp_received_tax': ['sum'],
'wc_purchase_net_value': ['sum'],
'wc_purchased_tax': ['sum']}).reset_index(drop=True)
df_yy1.columns = ["_".join(x) for x in df_yy1.columns.ravel()]
df_yy1.columns = df_yy1.columns.str.rstrip('_x')
df_yy2 = df_yy1.groupby(['store_id','fofo_distributor', 'type1'])[["wc_purchased_tax_sum"]].sum().reset_index()
df_yy2 = pd.merge(left=df_yy2, right=stores,
how='left', on=['store_id'])
cogs_for_wc = pd.pivot_table(df_yy2,
values='wc_purchased_tax_sum',
index=['type1','fofo_distributor'],
columns=['store_name']).reset_index().rename(columns={
"type1": "index"})
cogs_for_wc.rename(columns={'index': 'tag_flag'}, inplace=True)
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'ethical',
'tag_flag'] = 'cogs_for_wc_ethical'
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'generic',
'tag_flag'] = 'cogs_for_wc_generic'
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'others',
'tag_flag'] = 'cogs_for_wc_others'
if mis_tag == 'breakup':
cogs_for_wc.loc[cogs_for_wc['tag_flag'] == 'GOODAID',
'tag_flag'] = 'cogs_for_wc_GOODAID'
return cogs_for_wc
def return_from_zippin(self, Zp_to_wc_return, mis_tag,fofo_tag = 'no'):
zp_to_wc_return = Zp_to_wc_return.copy(deep=True)
zp_to_wc_return['return_cogs_taxable'] = np.vectorize(self.taxable_value_vat_based_2)(
zp_to_wc_return['cogs'], zp_to_wc_return['vat'])
if fofo_tag == 'no':
zp_to_wc_return1 = zp_to_wc_return.groupby(['cost_centre',
'type1'])[["taxable_value"]].sum().reset_index()
zp_to_wc_return1['taxable_value'] = zp_to_wc_return1['taxable_value'].astype(float)
return_from_zippin = pd.pivot_table(zp_to_wc_return1,
values='taxable_value',
index='type1',
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_zippin.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_zp_ethical'
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_zp_generic'
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'others',
'tag_flag'] = 'return_from_zp_others'
if mis_tag == 'breakup':
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_zp_GOODAID'
# Return Cogs Taxable
zp_to_wc_return2 = zp_to_wc_return.groupby(['cost_centre',
'type1'])[["return_cogs_taxable"]].sum().reset_index()
zp_to_wc_return2['return_cogs_taxable'] = zp_to_wc_return2['return_cogs_taxable'].astype(float)
return_from_zippin_Cogs_taxable = pd.pivot_table(zp_to_wc_return2,
values='return_cogs_taxable',
index='type1',
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_zippin_Cogs_taxable.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_zp_COGS_taxable_ethical'
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_zp_COGS_taxable_generic'
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'others',
'tag_flag'] = 'return_from_zp_COGS_taxable_others'
if mis_tag == 'breakup':
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_zp_COGS_taxable_GOODAID'
# Return net value
zp_to_wc_return3 = zp_to_wc_return.groupby(['cost_centre',
'type1'])[["net_value"]].sum().reset_index()
zp_to_wc_return3['net_value'] = zp_to_wc_return3['net_value'].astype(float)
return_from_zippin_net_value_inc_tax = pd.pivot_table(zp_to_wc_return3,
values='net_value',
index='type1',
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_zippin_net_value_inc_tax.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_zp_inc_tax_ethical'
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_zp_inc_tax_generic'
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'others',
'tag_flag'] = 'return_from_zp_inc_tax_others'
if mis_tag == 'breakup':
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_zp_inc_tax_GOODAID'
elif fofo_tag == 'yes':
zp_to_wc_return = zp_to_wc_return[zp_to_wc_return['franchisee_id']!=1]
zp_to_wc_return1 = zp_to_wc_return.groupby(['cost_centre',
'type1','fofo_distributor'])[["taxable_value"]].sum().reset_index()
zp_to_wc_return1['taxable_value'] = zp_to_wc_return1['taxable_value'].astype(float)
return_from_zippin = pd.pivot_table(zp_to_wc_return1,
values='taxable_value',
index=['type1','fofo_distributor'],
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_zippin.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_zp_ethical'
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_zp_generic'
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'others',
'tag_flag'] = 'return_from_zp_others'
if mis_tag == 'breakup':
return_from_zippin.loc[return_from_zippin['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_zp_GOODAID'
# Return Cogs Taxable
zp_to_wc_return2 = zp_to_wc_return.groupby(['cost_centre',
'type1', 'fofo_distributor'])[
["return_cogs_taxable"]].sum().reset_index()
zp_to_wc_return2['return_cogs_taxable'] = zp_to_wc_return2['return_cogs_taxable'].astype(float)
return_from_zippin_Cogs_taxable = pd.pivot_table(zp_to_wc_return2,
values='return_cogs_taxable',
index=['type1', 'fofo_distributor'],
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_zippin_Cogs_taxable.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_zp_COGS_taxable_ethical'
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_zp_COGS_taxable_generic'
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'others',
'tag_flag'] = 'return_from_zp_COGS_taxable_others'
if mis_tag == 'breakup':
return_from_zippin_Cogs_taxable.loc[return_from_zippin_Cogs_taxable['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_zp_COGS_taxable_GOODAID'
# Return net value
zp_to_wc_return3 = zp_to_wc_return.groupby(['cost_centre',
'type1', 'fofo_distributor'])[["net_value"]].sum().reset_index()
zp_to_wc_return3['net_value'] = zp_to_wc_return3['net_value'].astype(float)
return_from_zippin_net_value_inc_tax = pd.pivot_table(zp_to_wc_return3,
values='net_value',
index=['type1', 'fofo_distributor'],
columns=['cost_centre']).reset_index().rename(
columns={
"type1": "index"})
return_from_zippin_net_value_inc_tax.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_zp_inc_tax_ethical'
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_zp_inc_tax_generic'
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'others',
'tag_flag'] = 'return_from_zp_inc_tax_others'
if mis_tag == 'breakup':
return_from_zippin_net_value_inc_tax.loc[return_from_zippin_net_value_inc_tax['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_zp_inc_tax_GOODAID'
return_from_zippin = pd.concat([return_from_zippin,return_from_zippin_Cogs_taxable,return_from_zippin_net_value_inc_tax],sort=True)
return return_from_zippin
def return_from_workcell(self, Wc_return, mis_tag, fofo_tag = 'no'):
wc_return = Wc_return.copy(deep=True)
wc_return['return_cogs_taxable'] = np.vectorize(self.taxable_value_vat_based_2)(
wc_return['cogs'], wc_return['vat'])
if fofo_tag == 'no':
wc_return1 = wc_return.groupby(['cost_centre',
'type1'])[["taxable_value"]].sum().reset_index()
wc_return1['taxable_value'] = wc_return1['taxable_value'].astype(float)
return_from_workcell = pd.pivot_table(wc_return1,
values='taxable_value',
index='type1',
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_workcell.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_wc_ethical'
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_wc_generic'
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'others',
'tag_flag'] = 'return_from_wc_others'
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_wc_GOODAID'
# Return Cogs Taxable
wc_return2 = wc_return.groupby(['cost_centre',
'type1'])[["return_cogs_taxable"]].sum().reset_index()
wc_return2['return_cogs_taxable'] = wc_return2['return_cogs_taxable'].astype(float)
return_from_workcell_Cogs_taxable = pd.pivot_table(wc_return2,
values='return_cogs_taxable',
index='type1',
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_workcell_Cogs_taxable.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_wc_COGS_taxable_ethical'
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_wc_COGS_taxable_generic'
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'others',
'tag_flag'] = 'return_from_wc_COGS_taxable_others'
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_wc_COGS_taxable_GOODAID'
elif fofo_tag == 'yes':
wc_return = wc_return[wc_return['franchisee_id']!=1]
wc_return1 = wc_return.groupby(['cost_centre',
'type1','fofo_distributor'])[["taxable_value"]].sum().reset_index()
wc_return1['taxable_value'] = wc_return1['taxable_value'].astype(float)
return_from_workcell = pd.pivot_table(wc_return1,
values='taxable_value',
index=['type1','fofo_distributor'],
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_workcell.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_wc_ethical'
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_wc_generic'
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'others',
'tag_flag'] = 'return_from_wc_others'
return_from_workcell.loc[return_from_workcell['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_wc_GOODAID'
# Return COGS taxable
wc_return2 = wc_return.groupby(['cost_centre',
'type1', 'fofo_distributor'])[["return_cogs_taxable"]].sum().reset_index()
wc_return2['return_cogs_taxable'] = wc_return2['return_cogs_taxable'].astype(float)
return_from_workcell_Cogs_taxable = pd.pivot_table(wc_return2,
values='return_cogs_taxable',
index=['type1', 'fofo_distributor'],
columns=['cost_centre']).reset_index().rename(columns={
"type1": "index"})
return_from_workcell_Cogs_taxable.rename(columns={'index': 'tag_flag'}, inplace=True)
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'ethical',
'tag_flag'] = 'return_from_wc_COGS_taxable_ethical'
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'generic',
'tag_flag'] = 'return_from_wc_COGS_taxable_generic'
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'others',
'tag_flag'] = 'return_from_wc_COGS_taxable_others'
return_from_workcell_Cogs_taxable.loc[return_from_workcell_Cogs_taxable['tag_flag'] == 'GOODAID',
'tag_flag'] = 'return_from_wc_COGS_taxable_GOODAID'
# return_from_workcell = pd.concat([return_from_workcell,return_from_workcell_Cogs_taxable],sort=True)
return return_from_workcell
def total_sku_instock(self, Inventory, mis_tag,fofo_tag = 'no'):
inventory = Inventory.copy(deep=True)
if fofo_tag == 'no':
total_sku_instock = inventory.groupby(['type1'])[["drug_id"]].nunique().reset_index()
elif fofo_tag == 'yes':
inventory = inventory[inventory['franchisee_id']!=1]
total_sku_instock = inventory.groupby(['type1','fofo_distributor'])[["drug_id"]].nunique().reset_index()
total_sku_instock.rename(columns={'type1': 'tag_flag'}, inplace=True)
total_sku_instock.loc[total_sku_instock['tag_flag'] == 'ethical',
'tag_flag'] = 'total_sku_instock_ethical'
total_sku_instock.loc[total_sku_instock['tag_flag'] == 'generic',
'tag_flag'] = 'total_sku_instock_generic'
total_sku_instock.loc[total_sku_instock['tag_flag'] == 'others',
'tag_flag'] = 'total_sku_instock_others'
if mis_tag == 'breakup':
total_sku_instock.loc[total_sku_instock['tag_flag'] == 'GOODAID',
'tag_flag'] = 'total_sku_instock_GOODAID'
total_sku_instock.rename(columns={'drug_id': 'count'}, inplace=True)
return total_sku_instock
def chronic_acute_qty(self, Inventory, Stores):
total_SKU = Inventory.copy(deep=True)
stores = Stores.copy(deep=True)
total_SKU['value'] = total_SKU['quantity'] * total_SKU['final_ptr']
total_SKU_amount_qty = total_SKU.groupby(['store_id', 'type1', 'category'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'quantity': ['sum'],
'value': ['sum']}).reset_index(drop=True)
total_SKU_amount_qty.columns = ["_".join(x) for x in total_SKU_amount_qty.columns.ravel()]
total_SKU_amount_qty = pd.merge(left=total_SKU_amount_qty, right=stores,
how='left', left_on=['store_id_'], right_on=['store_id'])
total_SKU_amount_qty2 = total_SKU_amount_qty[total_SKU_amount_qty['category_'] == 'chronic']
chronic_qty = pd.pivot_table(total_SKU_amount_qty2,
values='quantity_sum',
index='type1_',
columns=['store_name']).reset_index()
chronic_qty['tag_flag'] = 'chronic_qty'
total_SKU_amount_qty3 = total_SKU_amount_qty[total_SKU_amount_qty['category_'] == 'acute']
acute_qty = pd.pivot_table(total_SKU_amount_qty3,
values='quantity_sum',
index='type1_',
columns=['store_name']).reset_index()
acute_qty['tag_flag'] = 'acute_qty'
chronic_acute_qty = pd.concat([chronic_qty, acute_qty], sort=True)
chronic_acute_qty.rename(columns={'type1_': 'type1'}, inplace=True)
return chronic_acute_qty
def lp_chronic_acute(self, Local_purchase_data, Sales, fofo_tag = 'no'):
local_purchase_data = Local_purchase_data.copy(deep=True)
sales = Sales.copy(deep=True)
if fofo_tag == 'yes':
local_purchase_data = local_purchase_data[local_purchase_data['franchisee_id']!=1]
sales = sales[sales['franchisee_id']!=1]
sold_local_purchase = sales.merge(local_purchase_data, on='inventory_id', how='inner')
sold_local_purchase['sold_quantity'] = sold_local_purchase['quantity']
sold_local_purchase['revenue'] = sold_local_purchase['quantity'] * sold_local_purchase['rate']
df1 = sold_local_purchase.groupby(['inventory_id'], as_index=False).agg({
'sold_quantity': ['sum'],
'revenue': ['sum']}).reset_index(drop=True)
df1.columns = ["_".join(x) for x in df1.columns.ravel()]
df1.columns = df1.columns.str.rstrip('_')
df2 = pd.merge(left=local_purchase_data, right=df1, how='left', on=['inventory_id'])
if fofo_tag == 'no':
lp1 = df2.groupby(['store_id', 'store_name',
'category', 'type1'])[['net_value']].sum().reset_index()
elif fofo_tag == 'yes':
lp1 = df2.groupby(['store_id', 'store_name',
'category', 'type1', 'fofo_distributor'])[['net_value']].sum().reset_index()
lp_chronic = lp1[lp1['category'] == 'chronic']
lp_chronic['net_value'] = lp_chronic['net_value'].astype(float)
if fofo_tag == 'no':
lp_chronic = pd.pivot_table(lp_chronic,
values='net_value',
index='type1',
columns=['store_name']).reset_index()
lp_chronic['tag_flag'] = 'local_purchase_chronic'
lp_acute = lp1[lp1['category'] == 'acute']
lp_acute['net_value'] = lp_acute['net_value'].astype(float)
lp_acute = pd.pivot_table(lp_acute,
values='net_value',
index='type1',
columns=['store_name']).reset_index()
lp_acute['tag_flag'] = 'local_purchase_acute'
elif fofo_tag == 'yes':
lp_chronic = pd.pivot_table(lp_chronic,
values='net_value',
index=['type1','fofo_distributor'],
columns=['store_name']).reset_index()
lp_chronic['tag_flag'] = 'local_purchase_chronic'
lp_acute = lp1[lp1['category'] == 'acute']
lp_acute['net_value'] = lp_acute['net_value'].astype(float)
lp_acute = pd.pivot_table(lp_acute,
values='net_value',
index=['type1','fofo_distributor'],
columns=['store_name']).reset_index()
lp_acute['tag_flag'] = 'local_purchase_acute'
lp_chronic_acute = pd.concat([lp_chronic, lp_acute], sort=True)
return lp_chronic_acute
def repeat_consumer_chronic_acute(self, Sales, All_cons_initial_bill_date, Stores,choose_year,choose_month):
sales = Sales.copy(deep=True)
df1 = All_cons_initial_bill_date.copy(deep=True)
stores = Stores.copy(deep=True)
df1['year'] = df1['created_at'].dt.year
df1["month"] = df1['created_at'].dt.month
df1 = df1[(df1['year'] == int(choose_year)) & (df1['month'] == int(choose_month))]
sales['flag'] = np.where(sales['category'] == "chronic", 1, 0)
df2 = sales.groupby(['store_id', 'order_source', 'patient_id'])[['flag']].sum().reset_index()
df2['check'] = np.where(df2['flag'] > 0, "chronic", "acute")
df5 = pd.merge(left=df2, right=df1,
how='left', on=['store_id', 'patient_id'])
df6 = df5[df5['year'].isnull()]
df6['repeat'] = "yes"
zx = pd.merge(left=sales, right=df6[['store_id', 'order_source', 'patient_id', 'repeat']],
how='left', on=['store_id', 'order_source', 'patient_id'])
zx1 = zx[zx['repeat'] == 'yes']
zx1['value'] = zx1['rate'] * zx1['quantity']
zx2 = zx1.groupby(['store_id', 'order_source',
'category', 'type1'])["value"].sum().reset_index()
zx2 = pd.merge(left=zx2, right=stores,
how='left', on=['store_id'])
repeat_chronic_sale = zx2[zx2['category'] == 'chronic']
repeat_chronic_sale['value'] = repeat_chronic_sale['value'].astype(float)
repeat_chronic_sale = pd.pivot_table(repeat_chronic_sale,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
repeat_chronic_sale['tag_flag'] = 'repeat_consumer_chronic_sale'
repeat_acute_sale = zx2[zx2['category'] == 'acute']
repeat_acute_sale['value'] = repeat_acute_sale['value'].astype(float)
repeat_acute_sale = pd.pivot_table(repeat_acute_sale,
values='value',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
repeat_acute_sale['tag_flag'] = 'repeat_consumer_acute_sale'
repeat_consumer_chronic_acute = pd.concat([repeat_chronic_sale, repeat_acute_sale],sort=True)
return repeat_consumer_chronic_acute
def inventory_6to12months(self, Inventory, Stores, mis_tag = 'breakup'):
df_dd = Inventory.copy(deep=True)
stores = Stores.copy(deep=True)
df_dd['value'] = df_dd['quantity'] * df_dd['final_ptr']
df_dd['days'] = (pd.to_datetime(self.analysis_end_time) - df_dd['created_at']).dt.days
conditions = [
(df_dd['days'] >= 180) & (df_dd['days'] <= 365),
(df_dd['days'] >= 365)]
choices = ['6_12', '12+']
df_dd['age_bracket'] = np.select(conditions, choices)
df_dd['type1'] = np.where(df_dd['type'].isin(['ethical', 'high-value-ethical']),
"ethical", df_dd['type'])
df_dd['type1'] = np.where(df_dd['type'].isin(['generic', 'high-value-generic']),
"generic", df_dd['type'])
df_dd['type1'] = np.where(~df_dd['type1'].isin(['ethical', 'generic']), "others", df_dd['type1'])
if mis_tag == 'breakup':
df_dd['type1'] = np.where(df_dd['company'].isin(['GOODAID']),
"GOODAID", df_dd['type1'])
df_dd['taxable'] = (df_dd['quantity'] * df_dd['final_ptr']) / (1 + ((df_dd['vat']) / 100))
df_ageing = df_dd.groupby(['store_id', 'category', 'type1', 'age_bracket'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'value': ['sum'],
'taxable': ['sum']}).reset_index(drop=True)
df_ageing.columns = ["_".join(x) for x in df_ageing.columns.ravel()]
df_ageing1 = df_ageing[df_ageing['age_bracket_'].isin(['6_12', '12+'])]
df_ageing1 = pd.merge(left=df_ageing1, right=stores,
how='left', left_on=['store_id_'], right_on=['store_id'])
df_ageing1['taxable_sum'] = df_ageing1['taxable_sum'].astype(float)
inventory_6to12months = pd.pivot_table(df_ageing1,
values='taxable_sum',
index=['age_bracket_', 'category_', 'type1_'],
columns=['store_name']).reset_index()
inventory_6to12months.rename(columns={'type1_': 'type1'}, inplace=True)
inventory_6to12months.rename(columns={'category_': 'category'}, inplace=True)
inventory_6to12months['tag_flag'] = 'inventory_6to12months'
return inventory_6to12months
def zippin_pl_cogs(self, Sales, Customer_returns, Stores,fofo_tag = 'no'):
df_aa = Sales.copy(deep=True)
df_bb = Customer_returns.copy(deep=True)
stores = Stores.copy(deep=True)
df_aa['GMV'] = df_aa['quantity'] * df_aa['mrp']
df_aa['GMV_tax'] = np.vectorize(self.taxable_value)(df_aa['quantity'], df_aa['mrp'],
df_aa['cgst_rate'], df_aa['sgst_rate'],
df_aa['igst_rate'])
df_aa['REVENUE'] = df_aa['quantity'] * df_aa['rate']
df_aa['REVENUE_tax'] = np.vectorize(self.taxable_value)(df_aa['quantity'], df_aa['rate'],
df_aa['cgst_rate'], df_aa['sgst_rate'],
df_aa['igst_rate'])
df_aa['COGS'] = df_aa['quantity'] * df_aa['final_ptr']
df_aa['COGS_tax'] = np.vectorize(self.taxable_value)(df_aa['quantity'], df_aa['final_ptr'],
df_aa['cgst_rate'],
df_aa['sgst_rate'], df_aa['igst_rate'])
# df_aa['TAX'] = (df_aa['quantity'] * df_aa['final_ptr']) / (1 + ((df_aa['cgst_rate'] + df_aa['sgst_rate']) / 100))
df_aa[['GMV', 'GMV_tax', 'REVENUE', 'REVENUE_tax', 'COGS', 'COGS_tax']] = df_aa[
['GMV', 'GMV_tax', 'REVENUE', 'REVENUE_tax', 'COGS', 'COGS_tax']].astype(float)
if fofo_tag == 'no':
df_gross = df_aa.groupby(['store_id', 'type1', 'category', 'payment_method',
'order_source'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'quantity': ['sum'],
'GMV': ['sum'],
'GMV_tax': ['sum'],
'REVENUE': ['sum'],
'REVENUE_tax': ['sum'],
'COGS': ['sum'],
'COGS_tax': ['sum'],
'bill_id': pd.Series.nunique
# 'net_payable': ['mean']
}).reset_index(drop=True)
df_gross.columns = ["_".join(x) for x in df_gross.columns.ravel()]
df_gross.rename(columns={'store_id_': 'store_id',
'type1_': 'type1',
'category_': 'category',
'payment_method_': 'payment_method',
'order_source_': 'order_source'}, inplace=True)
ABV_temp = df_aa.drop_duplicates(subset=['store_id', 'type1', 'category',
'payment_method', 'bill_id',
'net_payable', 'order_source'])
ABV_temp['net_payable'] = ABV_temp['net_payable'].astype(float)
ABV = ABV_temp.groupby(['store_id', 'type1', 'category',
'payment_method', 'order_source'])["net_payable"].mean().reset_index()
df_gross_all = pd.merge(left=df_gross, right=ABV,
how='left', on=['store_id', 'type1', 'category',
'payment_method', 'order_source'])
df_bb['GMV'] = df_bb['returned_quantity'] * df_bb['mrp']
df_bb['GMV_tax'] = np.vectorize(self.taxable_value)(df_bb['returned_quantity'], df_bb['mrp'],
df_bb['cgst_rate'], df_bb['sgst_rate'],
df_bb['igst_rate'])
df_bb['REVENUE'] = df_bb['returned_quantity'] * df_bb['rate']
df_bb['REVENUE_tax'] = np.vectorize(self.taxable_value)(df_bb['returned_quantity'], df_bb['rate'],
df_bb['cgst_rate'], df_bb['sgst_rate'],
df_bb['igst_rate'])
df_bb['COGS'] = df_bb['returned_quantity'] * df_bb['final_ptr']
df_bb['COGS_tax'] = np.vectorize(self.taxable_value)(df_bb['returned_quantity'], df_bb['final_ptr'],
df_bb['cgst_rate'], df_bb['sgst_rate'],
df_bb['igst_rate'])
# df_bb['TAX'] = (df_bb['returned_quantity'] * df_bb['final_ptr']) / (1 + ((df_bb['cgst_rate'] + df_bb['sgst_rate']) / 100))
df_bb[['GMV', 'GMV_tax', 'REVENUE', 'REVENUE_tax', 'COGS', 'COGS_tax']] = df_bb[
['GMV', 'GMV_tax', 'REVENUE', 'REVENUE_tax', 'COGS', 'COGS_tax']].astype(float)
df_returns = df_bb.groupby(['store_id', 'type1', 'category',
'payment_method', 'order_source'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'returned_quantity': ['sum'],
'GMV': ['sum'],
'GMV_tax': ['sum'],
'REVENUE': ['sum'],
'REVENUE_tax': ['sum'],
'COGS': ['sum'],
'COGS_tax': ['sum']}).reset_index(drop=True)
df_returns.columns = ["_".join(x) for x in df_returns.columns.ravel()]
df_returns.rename(columns={'store_id_': 'store_id',
'type1_': 'type1',
'category_': 'category',
'payment_method_': 'payment_method',
'order_source_': 'order_source'}, inplace=True)
df_gross_returns = pd.merge(left=df_gross_all, right=df_returns,
how='outer', on=['store_id', 'type1', 'category',
'payment_method', 'order_source'])
df_gross_returns.rename(columns={'store_id_': 'store_id',
'type1_': 'type',
'category_': 'category',
'drug_id_nunique_x': 'no_of_drugs_sales',
'GMV_sum_x': 'GMV_sales',
'GMV_tax_sum_x': 'GMV_sales_tax',
'REVENUE_sum_x': 'REVENUE_sales',
'REVENUE_tax_sum_x': 'REVENUE_sales_tax',
'COGS_sum_x': 'COGS_sales',
'COGS_tax_sum_x': 'COGS_sales_tax',
'drug_id_nunique_y': 'no_of_drugs_returns',
'GMV_sum_y': 'GMV_returns',
'GMV_tax_sum_y': 'GMV_returns_tax',
'REVENUE_sum_y': 'REVENUE_returns',
'REVENUE_tax_sum_y': 'REVENUE_returns_tax',
'COGS_sum_y': 'COGS_returns',
'COGS_tax_sum_y': 'COGS_returns_tax'}, inplace=True)
df_gross_returns.fillna(0, inplace=True)
df_gross_returns['net_cogs'] = df_gross_returns['COGS_sales_tax'] - df_gross_returns['COGS_returns_tax']
df_gross_returns = pd.merge(left=df_gross_returns, right=stores,
how='left', on=['store_id'])
zp_pl_cogs = df_gross_returns.groupby(['store_id', 'store_name',
'type1', 'order_source'])[['net_cogs']].sum().reset_index()
zp_pl_cogs['net_cogs'] = zp_pl_cogs['net_cogs'].astype(float)
zp_pl_cogs1 = pd.pivot_table(zp_pl_cogs,
values='net_cogs',
index=['type1', 'order_source'],
columns=['store_name']).reset_index()
zp_pl_cogs1['tag_flag'] = 'zp_pl_cogs'
return zp_pl_cogs1
elif fofo_tag == 'yes':
df_aa = df_aa[df_aa['franchisee_id']!=1]
df_bb = df_bb[df_bb['franchisee_id'] != 1]
df_gross = df_aa.groupby(['store_id', 'type1', 'category', 'fofo_distributor' ,'payment_method',
'order_source'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'quantity': ['sum'],
'GMV': ['sum'],
'GMV_tax': ['sum'],
'REVENUE': ['sum'],
'REVENUE_tax': ['sum'],
'COGS': ['sum'],
'COGS_tax': ['sum'],
'bill_id': pd.Series.nunique
# 'net_payable': ['mean']
}).reset_index(drop=True)
df_gross.columns = ["_".join(x) for x in df_gross.columns.ravel()]
df_gross.rename(columns={'store_id_': 'store_id',
'type1_': 'type1',
'category_': 'category',
'payment_method_': 'payment_method',
'order_source_': 'order_source',
'fofo_distributor_':'fofo_distributor'}, inplace=True)
ABV_temp = df_aa.drop_duplicates(subset=['store_id', 'type1', 'category',
'payment_method', 'bill_id',
'net_payable', 'order_source','fofo_distributor'])
ABV_temp['net_payable'] = ABV_temp['net_payable'].astype(float)
ABV = ABV_temp.groupby(['store_id', 'type1', 'category',
'payment_method', 'order_source','fofo_distributor'])["net_payable"].mean().reset_index()
df_gross_all = pd.merge(left=df_gross, right=ABV,
how='left', on=['store_id', 'type1', 'category',
'payment_method', 'order_source','fofo_distributor'])
df_bb['GMV'] = df_bb['returned_quantity'] * df_bb['mrp']
df_bb['GMV_tax'] = np.vectorize(self.taxable_value)(df_bb['returned_quantity'], df_bb['mrp'],
df_bb['cgst_rate'], df_bb['sgst_rate'],
df_bb['igst_rate'])
df_bb['REVENUE'] = df_bb['returned_quantity'] * df_bb['rate']
df_bb['REVENUE_tax'] = np.vectorize(self.taxable_value)(df_bb['returned_quantity'], df_bb['rate'],
df_bb['cgst_rate'], df_bb['sgst_rate'],
df_bb['igst_rate'])
df_bb['COGS'] = df_bb['returned_quantity'] * df_bb['final_ptr']
df_bb['COGS_tax'] = np.vectorize(self.taxable_value)(df_bb['returned_quantity'], df_bb['final_ptr'],
df_bb['cgst_rate'], df_bb['sgst_rate'],
df_bb['igst_rate'])
# df_bb['TAX'] = (df_bb['returned_quantity'] * df_bb['final_ptr']) / (1 + ((df_bb['cgst_rate'] + df_bb['sgst_rate']) / 100))
df_bb[['GMV', 'GMV_tax', 'REVENUE', 'REVENUE_tax', 'COGS', 'COGS_tax']] = df_bb[
['GMV', 'GMV_tax', 'REVENUE', 'REVENUE_tax', 'COGS', 'COGS_tax']].astype(float)
df_returns = df_bb.groupby(['store_id', 'type1', 'category',
'payment_method', 'order_source','fofo_distributor'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'returned_quantity': ['sum'],
'GMV': ['sum'],
'GMV_tax': ['sum'],
'REVENUE': ['sum'],
'REVENUE_tax': ['sum'],
'COGS': ['sum'],
'COGS_tax': ['sum']}).reset_index(drop=True)
df_returns.columns = ["_".join(x) for x in df_returns.columns.ravel()]
df_returns.rename(columns={'store_id_': 'store_id',
'type1_': 'type1',
'category_': 'category',
'payment_method_': 'payment_method',
'order_source_': 'order_source',
'fofo_distributor_':'fofo_distributor'}, inplace=True)
df_gross_returns = pd.merge(left=df_gross_all, right=df_returns,
how='outer', on=['store_id', 'type1', 'category',
'payment_method', 'order_source','fofo_distributor'])
df_gross_returns.rename(columns={'store_id_': 'store_id',
'type1_': 'type',
'category_': 'category',
'fofo_distributor_':'fofo_distributor',
'drug_id_nunique_x': 'no_of_drugs_sales',
'GMV_sum_x': 'GMV_sales',
'GMV_tax_sum_x': 'GMV_sales_tax',
'REVENUE_sum_x': 'REVENUE_sales',
'REVENUE_tax_sum_x': 'REVENUE_sales_tax',
'COGS_sum_x': 'COGS_sales',
'COGS_tax_sum_x': 'COGS_sales_tax',
'drug_id_nunique_y': 'no_of_drugs_returns',
'GMV_sum_y': 'GMV_returns',
'GMV_tax_sum_y': 'GMV_returns_tax',
'REVENUE_sum_y': 'REVENUE_returns',
'REVENUE_tax_sum_y': 'REVENUE_returns_tax',
'COGS_sum_y': 'COGS_returns',
'COGS_tax_sum_y': 'COGS_returns_tax'}, inplace=True)
df_gross_returns.fillna(0, inplace=True)
df_gross_returns['net_cogs'] = df_gross_returns['COGS_sales_tax'] - df_gross_returns['COGS_returns_tax']
df_gross_returns = pd.merge(left=df_gross_returns, right=stores,
how='left', on=['store_id'])
zp_pl_cogs = df_gross_returns.groupby(['store_id', 'store_name',
'type1', 'order_source','fofo_distributor'])[['net_cogs']].sum().reset_index()
zp_pl_cogs['net_cogs'] = zp_pl_cogs['net_cogs'].astype(float)
zp_pl_cogs1 = pd.pivot_table(zp_pl_cogs,
values='net_cogs',
index=['type1', 'order_source','fofo_distributor'],
columns=['store_name']).reset_index()
zp_pl_cogs1['tag_flag'] = 'zp_pl_cogs'
return zp_pl_cogs1
def comp_count(self, Inventory, mis_tag):
inventory = Inventory.copy(deep=True)
if mis_tag == 'breakup':
conditions = [inventory['type1'] == 'GOODAID', inventory['type1'] != 'GOODAID']
choices = ['GOODAID', inventory['type']]
inventory['type'] = np.select(conditions, choices)
comp_count = inventory.groupby(['type'])['drug_id'].nunique().reset_index()
comp_count['tag_flag'] = 'drug_count_by_type'
comp_count.rename(columns = {'drug_id':'count',
'type':'type1'}, inplace = True)
return comp_count
def generic_composition_count(self):
generic_composition_count_query = self.mis_queries.generic_composition_count_query.format(
schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table)
generic_composition_count = self.rs_db.get_df(generic_composition_count_query)
generic_composition_count.columns = [c.replace('-', '_') for c in generic_composition_count.columns]
generic_composition_count['tag_flag'] = 'generic_composition_count'
return generic_composition_count
def ethical_margin(self):
ethical_margin_query = self.mis_queries.ethical_margin_query.format(
schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time,
analysis_end_time=self.analysis_end_time)
ethical_margin = self.rs_db.get_df(ethical_margin_query)
ethical_margin.columns = [c.replace('-', '_') for c in ethical_margin.columns]
ethical_margin['margin'] = 1 - (ethical_margin['net_value']/ethical_margin['value1'])
ethical_margin = ethical_margin[['margin']]
ethical_margin['tag_flag'] = 'ethical_margin'
return ethical_margin
def ethical_margin_fofo(self):
ethical_margin_query = self.mis_queries.ethical_margin_fofo_query.format(
schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time,
analysis_end_time=self.analysis_end_time,
equality_symbol = '=' )
ethical_margin = self.rs_db.get_df(ethical_margin_query)
ethical_margin.columns = [c.replace('-', '_') for c in ethical_margin.columns]
ethical_margin['margin'] = 1 - (ethical_margin['net_value'] / ethical_margin['value1'])
ethical_margin = ethical_margin[['margin']]
ethical_margin['tag_flag'] = 'ethical_margin'
ethical_margin['fofo_distributor'] = 'workcell'
other_ethical_margin_query = self.mis_queries.ethical_margin_fofo_query.format(
schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table,
analysis_start_time=self.analysis_start_time,
analysis_end_time=self.analysis_end_time,
equality_symbol='!=')
other_ethical_margin = self.rs_db.get_df(other_ethical_margin_query)
other_ethical_margin.columns = [c.replace('-', '_') for c in other_ethical_margin.columns]
other_ethical_margin['margin'] = 1 - (other_ethical_margin['net_value'] / other_ethical_margin['value1'])
other_ethical_margin = other_ethical_margin[['margin']]
other_ethical_margin['tag_flag'] = 'ethical_margin'
other_ethical_margin['fofo_distributor'] = 'other'
ethical_margin = pd.concat([ethical_margin,other_ethical_margin],sort = True)
return ethical_margin
def chronic_generic_count(self, Sales, fofo_tag='no'):
df_r = Sales.copy(deep=True)
if fofo_tag == 'yes':
df_r = df_r[df_r['franchisee_id'] != 1]
df_r['flag'] = np.where((df_r['category'] == 'chronic'), 1, 0)
df_r['flag2'] = np.where(((df_r['category'] == 'chronic') &
(df_r['type'] == 'generic')), 1, 0)
if fofo_tag == 'no':
df_r3 = df_r.groupby(['store_id',
'patient_id'])[['flag2']].sum().reset_index()
chronic_generic = df_r3[df_r3['flag2'] > 0].count()["flag2"]
total = df_r3['flag2'].count()
chronic_generic_percentage = chronic_generic / total
chronic_generic_count = pd.DataFrame({'tag_flag': pd.Series("Chronic customers buying generics",
dtype='str'),
'count': pd.Series(chronic_generic, dtype='float')})
elif fofo_tag == 'yes':
df_r3 = df_r.groupby(['store_id',
'patient_id', 'fofo_distributor'])[['flag2']].sum().reset_index()
chronic_generic = df_r3[df_r3['flag2'] > 0].count()["flag2"]
chronic_generic_workcell = df_r3[(df_r3['flag2'] > 0) & (df_r3['fofo_distributor'] == 'workcell')].count()[
"flag2"]
chronic_generic_other = df_r3[(df_r3['flag2'] > 0) & (df_r3['fofo_distributor'] == 'other')].count()[
"flag2"]
chronic_generic_count_combined = pd.DataFrame({'tag_flag': pd.Series("Chronic customers buying generics",
dtype='str'),
'count': pd.Series(chronic_generic, dtype='float'),
'fofo_distributor': pd.Series("combined",
dtype='str')})
chronic_generic_count_workcell = pd.DataFrame({'tag_flag': pd.Series("Chronic customers buying generics",
dtype='str'),
'count': pd.Series(chronic_generic_workcell, dtype='float'),
'fofo_distributor': pd.Series("workcell",
dtype='str')})
chronic_generic_count_other = pd.DataFrame({'tag_flag': pd.Series("Chronic customers buying generics",
dtype='str'),
'count': pd.Series(chronic_generic_other, dtype='float'),
'fofo_distributor': pd.Series("other",
dtype='str')})
chronic_generic_count = pd.concat([chronic_generic_count_workcell, chronic_generic_count_other], sort=True)
chronic_generic_count = self.fofo_distributor_bifurcation_next_calculation_steps(
chronic_generic_count_combined,
chronic_generic_count,
['tag_flag'])
return chronic_generic_count
def sales_data_for_repeat_customer(self,date1,date2):
sales_query = self.mis_queries.sales_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table,
analysis_start_time=date1, analysis_end_time=date2)
df = self.rs_db.get_df(sales_query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def repeat_cons_other_def_curr_month(self, sales_data_for_repeat_customer, stores,choose_month):
choose_month = int(choose_month)
df_aa = sales_data_for_repeat_customer.copy(deep=True)
df_aa['year'] = df_aa['created_at'].dt.year
df_aa['month'] = df_aa['created_at'].dt.month
df_aa['type1'] = np.where(df_aa['type'].isin(['ethical', 'high-value-ethical']),
"ethical", df_aa['type'])
df_aa['type1'] = np.where(df_aa['type'].isin(['generic', 'high-value-generic']),
"generic", df_aa['type'])
df_aa['type1'] = np.where(~df_aa['type1'].isin(['ethical', 'generic']),
"others", df_aa['type1'])
df1 = df_aa.groupby(['store_id', 'patient_id', 'type1', 'category',
'bill_id', 'year', 'month'])[["value"]].sum().reset_index()
df2 = df1.groupby(['store_id', 'patient_id',
'type1', 'category'])[["month"]].nunique().reset_index().rename(columns={
'month': 'unique_months_billed'})
# =============================================================================
# Total repeat consumers
# =============================================================================
df3 = df2[df2['unique_months_billed'] >= 2]
df4 = df3.groupby(['store_id',
'type1', 'category'])[["patient_id"]].count().reset_index().rename(columns={
'patient_id': 'repeat_consumers_count'})
df4['tag_flag'] = 'repeat_cons_other_def_curr_month_count'
df4 = pd.merge(left=df4, right=stores,
how='left', on=['store_id'])
df4 = pd.pivot_table(df4,
values='repeat_consumers_count',
index=['tag_flag', 'type1', 'category'],
columns=['store_name']).reset_index()
# =============================================================================
# Repeat consumers lost
# =============================================================================
# df5 = df1[df1['month'].isin([9, 10, 11])]
def previous_months(month):
month = int(month)
if month<=0:
return month + 12
else:
return month
df5 = df1[df1['month'].isin([previous_months(choose_month-5), previous_months(choose_month-4), previous_months(choose_month-3)])]
df6 = df5.groupby(['store_id', 'patient_id',
'type1', 'category'])[["month"]].nunique().reset_index().rename(columns={
'month': 'unique_months_billed_till_July'})
# df7 = df1[df1['month'].isin([12, 1, 2])]
df7 = df1[df1['month'].isin([previous_months(choose_month-2), previous_months(choose_month-1), previous_months(choose_month)])]
df8 = df7.groupby(['store_id', 'patient_id',
'type1', 'category'])[["month"]].nunique().reset_index().rename(columns={
'month': 'unique_months_billed_after_July'})
df9 = pd.merge(left=df6, right=df8,
how='left', on=['store_id', 'patient_id', 'type1', 'category'])
df10 = df9[df9['unique_months_billed_after_July'].isnull()]
df11 = df10.groupby(['store_id', 'type1', 'category'])[["patient_id"]].count().reset_index()
df11['tag_flag'] = 'repeat_cons_other_def_curr_month_lost'
df11 = pd.merge(left=df11, right=stores,
how='left', on=['store_id'])
df11 = pd.pivot_table(df11,
values='patient_id',
index=['tag_flag', 'type1', 'category'],
columns=['store_name']).reset_index()
repeat_cons_other_def_curr_month = pd.concat([df4, df11])
return repeat_cons_other_def_curr_month
def repeat_cons_other_def_past3_month(self, sales_data_for_repeat_customer, stores,choose_month):
choose_month = int(choose_month)
df_aa = sales_data_for_repeat_customer.copy(deep=True)
df_aa['year'] = df_aa['created_at'].dt.year
df_aa['month'] = df_aa['created_at'].dt.month
df_aa['type1'] = np.where(df_aa['type'].isin(['ethical', 'high-value-ethical']),
"ethical", df_aa['type'])
df_aa['type1'] = np.where(df_aa['type'].isin(['generic', 'high-value-generic']),
"generic", df_aa['type'])
df_aa['type1'] = np.where(~df_aa['type1'].isin(['ethical', 'generic']), "others", df_aa['type1'])
df1 = df_aa.groupby(['store_id', 'patient_id', 'type1', 'category',
'bill_id', 'year', 'month'])[["value"]].sum().reset_index()
df2 = df1.groupby(['store_id', 'patient_id', 'type1', 'category'])[["month"]].nunique().reset_index().rename(
columns={
'month': 'unique_months_billed'})
# =============================================================================
# Total repeat consumers
# =============================================================================
df3 = df2[df2['unique_months_billed'] >= 2]
df4 = df3.groupby(['store_id',
'type1', 'category'])[["patient_id"]].count().reset_index().rename(columns={
'patient_id': 'repeat_consumers_count'})
df4['tag_flag'] = 'repeat_cons_other_def_past3_month_count'
df4 = pd.merge(left=df4, right=stores,
how='left', on=['store_id'])
df4 = pd.pivot_table(df4,
values='repeat_consumers_count',
index=['tag_flag', 'type1', 'category'],
columns=['store_name']).reset_index()
# =============================================================================
# Repeat consumers lost
# =============================================================================
# df5 = df1[df1['month'].isin([6, 7, 8])]
def previous_months(month):
month = int(month)
if month <= 0:
return month + 12
else:
return month
df5 = df1[df1['month'].isin(
[previous_months(choose_month - 8), previous_months(choose_month - 7), previous_months(choose_month - 6)])]
df6 = df5.groupby(['store_id', 'patient_id', 'type1', 'category'])[["month"]].nunique().reset_index().rename(
columns={
'month': 'unique_months_billed_till_July'})
# df7 = df1[df1['month'].isin([9, 10, 11])]
df7 = df1[df1['month'].isin(
[previous_months(choose_month - 5), previous_months(choose_month - 4), previous_months(choose_month-3)])]
df8 = df7.groupby(['store_id', 'patient_id', 'type1', 'category'])[["month"]].nunique().reset_index().rename(
columns={
'month': 'unique_months_billed_after_July'})
df9 = pd.merge(left=df6, right=df8,
how='left', on=['store_id', 'patient_id', 'type1', 'category'])
df10 = df9[df9['unique_months_billed_after_July'].isnull()]
df11 = df10.groupby(['store_id', 'type1', 'category'])[["patient_id"]].count().reset_index()
df11['tag_flag'] = 'repeat_cons_other_def_past3_month_lost'
df11 = pd.merge(left=df11, right=stores,
how='left', on=['store_id'])
df11 = pd.pivot_table(df11,
values='patient_id',
index=['tag_flag', 'type1', 'category'],
columns=['store_name']).reset_index()
repeat_cons_other_def_past3_month = pd.concat([df4, df11])
return repeat_cons_other_def_past3_month
def other_files_ethical_margin(self):
other_files_ethical_margin_query = self.mis_queries.other_files_ethical_margin_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
other_files_ethical_margin = self.rs_db.get_df(other_files_ethical_margin_query)
other_files_ethical_margin.columns = [c.replace('-', '_') for c in other_files_ethical_margin.columns]
return other_files_ethical_margin
def other_files_distributor_margin(self):
other_files_distributor_margin_query = self.mis_queries.other_files_distributor_margin_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table, choose_year=self.choose_year, choose_month = self.choose_month)
other_files_distributor_margin = self.rs_db.get_df(other_files_distributor_margin_query)
other_files_distributor_margin.columns = [c.replace('-', '_') for c in other_files_distributor_margin.columns]
return other_files_distributor_margin
def other_files_inventory_at_dc_near_expiry(self):
other_files_inventory_at_dc_near_expiry_data_query = self.mis_queries.other_files_inventory_at_dc_near_expiry_data_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
df_yy = self.rs_db.get_df(other_files_inventory_at_dc_near_expiry_data_query)
df_yy.columns = [c.replace('-', '_') for c in df_yy.columns]
df_yy['type1'] = np.where(df_yy['type'].isin(['ethical', 'high-value-ethical']),
"ethical", df_yy['type'])
df_yy['type1'] = np.where(df_yy['type'].isin(['generic', 'high-value-generic']),
"generic", df_yy['type'])
df_yy['type1'] = np.where(~df_yy['type1'].isin(['ethical', 'generic']), "others", df_yy['type1'])
df_yy['taxable'] = (df_yy['actual_quantity'] * df_yy['final_ptr']) / (1 + ((df_yy['vat']) / 100))
df_yy['days'] = (pd.to_datetime('today') - df_yy['created_at']).dt.days
conditions = [
(df_yy['days'] >= 0) & (df_yy['days'] <= 30),
(df_yy['days'] >= 31) & (df_yy['days'] <= 60),
(df_yy['days'] >= 61) & (df_yy['days'] <= 90),
(df_yy['days'] >= 91)]
choices = ['0_30', '31_60', '61_90', '90+']
df_yy['age_bracket'] = np.select(conditions, choices)
df_yy['expiry_date'] = pd.to_datetime(df_yy['expiry'], format='%Y-%m-%d %H:%M:%S', errors='coerce')
df_yy['days_to_expiry'] = (pd.to_datetime('today') - df_yy['expiry_date']).dt.days
df_yy2 = df_yy[(df_yy['days_to_expiry'] < 0) & (df_yy['days_to_expiry'] > -90)]
DC_near_expiry = df_yy2.groupby(['store_id', 'type1', 'category', 'age_bracket'],
as_index=False).agg({
'drug_id': pd.Series.nunique,
'net_value': ['sum'],
'taxable': ['sum']}).reset_index(drop=True)
DC_near_expiry.columns = ["_".join(x) for x in DC_near_expiry.columns.ravel()]
return DC_near_expiry
def goodaid_gross_return(self):
goodaid_store_sales_query = self.mis_queries.goodaid_store_sales_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table, choose_year=self.choose_year, choose_month = self.choose_month)
goodaid_store_sales = self.rs_db.get_df(goodaid_store_sales_query)
goodaid_store_sales.columns = [c.replace('-', '_') for c in goodaid_store_sales.columns]
goodaid_store_returns_query = self.mis_queries.goodaid_store_returns_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table, choose_year=self.choose_year, choose_month = self.choose_month)
goodaid_store_returns = self.rs_db.get_df(goodaid_store_returns_query)
goodaid_store_returns.columns = [c.replace('-', '_') for c in goodaid_store_returns.columns]
gross_and_returns = pd.merge(left=goodaid_store_sales, right=goodaid_store_returns,
how='left', on=['year', 'month', 'store_id', 'store_name'])
return gross_and_returns
def goodaid_zippin_inventory(self):
goodaid_zippin_inventory_query = self.mis_queries.goodaid_zippin_inventory_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
df_gg = self.rs_db.get_df(goodaid_zippin_inventory_query)
df_gg.columns = [c.replace('-', '_') for c in df_gg.columns]
df_gg['days'] = (pd.to_datetime('today') - df_gg['created_at']).dt.days
conditions = [
(df_gg['days'] >= 0) & (df_gg['days'] <= 30),
(df_gg['days'] >= 31) & (df_gg['days'] <= 60),
(df_gg['days'] >= 61) & (df_gg['days'] <= 90),
(df_gg['days'] >= 91)]
choices = ['0_30', '31_60', '61_90', '90+']
df_gg['ageing'] = np.select(conditions, choices)
df_gg['expiry_date'] = pd.to_datetime(df_gg['expiry'], format='%Y-%m-%d %H:%M:%S', errors='coerce')
df_gg['days_to_expiry'] = (df_gg['expiry_date'] - pd.to_datetime('today')).dt.days
del df_gg['days']
del df_gg['expiry']
return df_gg
def goodaid_dc_inventory(self):
goodaid_dc_inventory_query = self.mis_queries.goodaid_dc_inventory_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
df_jk1 = self.rs_db.get_df(goodaid_dc_inventory_query)
df_jk1.columns = [c.replace('-', '_') for c in df_jk1.columns]
return df_jk1
def goodaid_wh_inventory(self):
date = datetime.datetime(int(self.choose_year), int(self.choose_month)+1, 1, 0, 0, 0).strftime('%Y-%m-%d')
goodaid_wh_inventory_query = self.mis_queries.goodaid_wh_inventory_query.format(date = date)
wh_inv = self.rs_db.get_df(goodaid_wh_inventory_query)
wh_inv.columns = [c.replace('-', '_') for c in wh_inv.columns]
goodaid_drugs_query = self.mis_queries.goodaid_drugs_query.format(schema=self.schema_to_select, suffix_to_table=self.suffix_to_table)
goodaid_drugs = self.rs_db.get_df(goodaid_drugs_query)
goodaid_drugs.columns = [c.replace('-', '_') for c in goodaid_drugs.columns]
wh_inventory = pd.merge(left=wh_inv, right=goodaid_drugs,
how='inner', on=['drug_id'])
return wh_inventory
return df_jk1
def store_info(self):
store_info_query = self.mis_queries.store_info_query.format(schema=self.schema_to_select,
suffix_to_table=self.suffix_to_table)
store_info = self.rs_db.get_df(store_info_query)
store_info.columns = [c.replace('-', '_') for c in store_info.columns]
return store_info | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/mis/mis_class.py | mis_class.py |
max_store_id = """
select
max("id") as "store-id-max"
from
"prod2-generico"."{}"
"""
insert_stores_query = """insert
into
"prod2-generico"."{}" (
"id",
"etl-created-by",
"created-at",
"updated-by",
"updated-at",
"store",
"line-manager",
"abo",
"store-manager",
"store-type",
"opened-at",
"date-diff",
"month-diff",
"latitude",
"longitude",
"store-contact-1",
"store-contact-2",
"store-address",
"city",
"store-b2b",
"line",
"landmark",
"store-group-id",
"franchisee-id",
"franchisee-name",
"cluster-id",
"cluster-name",
"acquired",
"old-new-static",
"line-manager-email",
"abo-email",
"store-manager-email",
"store-email"
)
select
st.id as id,
'etl-automation' as "etl-created-by",
max(st."created-at") as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
max(st.name) as "store",
max(case when b.type = 'line-manager' then b.name end) as "line-manager",
max(case when b.type = 'area-business-owner' then b.name end) as "abo",
max(case when b.type = 'store-manager' then b.name end) as "store-manager",
max(st.category) as "store-type",
DATE("opened-at") as "opened-at",
datediff(day,
DATE("opened-at"),
current_date) as "date-diff",
datediff(month,
DATE("opened-at"),
current_date) as "month-diff",
max(st."lat") as "latitude",
max(st."lon") as "longitude",
max(st."contact-number-1") as "store-contact-1",
max(st."contact-number-2") as "store-contact-2",
max(st."address") as "store-address",
max(sg.name) as "city",
case
when lower(SUBSTRING(st.name, 1, 3))= 'b2b' then 'B2B'
else 'Store'
end as "store-b2b",
'' as line,
'' as landmark,
max(st."store-group-id") as "store-group-id",
st."franchisee-id",
f."name",
s."cluster-id",
s."cluster-name",
st."acquired",
(case
when date(st."opened-at")>= '2022-04-01' then 'new'
when date(st."opened-at")= '0101-01-01' then 'not-opened'
else 'old'
end) as "old-new-static",
max(case when b.type = 'line-manager' then b.email end) as "line-manager-email",
max(case when b.type = 'area-business-owner' then b.email end) as "abo-email",
max(case when b.type = 'store-manager' then b.email end) as "store-manager-email",
max(st.email) as "store-email"
from
"prod2-generico".stores st
inner join "prod2-generico".franchisees f
on
st."franchisee-id" = f.id
left join
(
select
us."store-id",
u."name" as name,
u."created-at" as "date",
u.type,
u.email ,
row_number() over(
partition by us."store-id",
u.type
order by
u."created-at" desc) as t_rank
from
"prod2-generico"."users-stores" as us
inner join "prod2-generico"."users" as u on
u."id" = us."user-id"
where
u.type in ('line-manager', 'store-manager', 'area-business-owner')) as b
on
st.id = b."store-id"
and b.t_rank = 1
inner join "prod2-generico"."store-groups" sg on
st."store-group-id" = sg.id
left join (
select
sf."store-id" as "store-id",
sf."is-active" as "sf-is-active",
sc."cluster-id" as "cluster-id",
c.name as "cluster-name",
sc."is-active" as "sc-is-active"
from
"prod2-generico".features f
join
"prod2-generico"."store-features" sf
on
f.id = sf."feature-id"
join
"prod2-generico"."store-clusters" sc
on
sc."store-id" = sf."store-id"
join
"prod2-generico".clusters c
on
c.id = sc."cluster-id"
where
sf."feature-id" = 69
and sf."is-active" = 1
and sc."is-active" = 1
) as s
on
st.id = s."store-id"
where
st.id > {}
group by
st.id,
st.name,
st."opened-at",
st."franchisee-id",
f."name",
s."cluster-id",
s."cluster-name",
st."acquired",
"old-new-static";"""
update_stores_query = """update "prod2-generico"."{}" as sm
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"line-manager" = b."line-manager",
"abo" = b."abo",
"store" = b."store",
"franchisee-name" = b."franchisee-name",
"cluster-id" = b."cluster-id",
"cluster-name" = b."cluster-name",
"acquired"=b."acquired",
"opened-at"=b."opened-at",
"old-new-static"=b."old-new-static",
"line-manager-email" = b."line-manager-email",
"abo-email" = b."abo-email",
"store-manager-email" = b."store-manager-email",
"store-email" = b."store-email"
from (
select
st.id as id,
st."acquired",
st.name as "store",
st."opened-at",
(case when date(st."opened-at")>='2022-04-01' then 'new'
when date(st."opened-at")='0101-01-01' then 'not-opened'
else 'old' end) as "old-new-static",
f."name" as "franchisee-name",
s."cluster-id" as "cluster-id",
s."cluster-name" as "cluster-name",
max(case when b.type = 'line-manager' then b.name end) as "line-manager",
max(case when b.type = 'area-business-owner' then b.name end) as "abo",
max(case when b.type = 'line-manager' then b.email end) as "line-manager-email",
max(case when b.type = 'area-business-owner' then b.email end) as "abo-email",
max(case when b.type = 'store-manager' then b.email end) as "store-manager-email",
max(st.email) as "store-email"
from "prod2-generico"."{}" sm
inner join "prod2-generico".stores st on
st.id = sm.id
inner join "prod2-generico".franchisees f
on
st."franchisee-id" = f.id
left join
(
select
us."store-id",
u."name" as name,
u."created-at" as "date",
u.type,
u.email,
row_number() over(
partition by us."store-id",
u.type
order by
u."created-at" desc) as t_rank
from
"prod2-generico"."users-stores" as us
inner join "prod2-generico"."users" as u on
u."id" = us."user-id"
where
u.type in ('line-manager', 'store-manager', 'area-business-owner')) as b
on
st.id = b."store-id"
and b.t_rank = 1
inner join "prod2-generico"."store-groups" sg on
st."store-group-id" = sg.id
left join (
select
sf."store-id" as "store-id",
sf."is-active" as "sf-is-active",
sc."cluster-id" as "cluster-id",
c.name as "cluster-name",
sc."is-active" as "sc-is-active"
from
"prod2-generico".features f
join
"prod2-generico"."store-features" sf
on
f.id = sf."feature-id"
join
"prod2-generico"."store-clusters" sc
on
sc."store-id" = sf."store-id"
join
"prod2-generico".clusters c
on
c.id = sc."cluster-id"
where
sf."feature-id" = 69
and sf."is-active" = 1
and sc."is-active" = 1
) as s
on
st.id = s."store-id"
group by
st.id,
st."acquired",
st.name,
st."opened-at",
"old-new-static",
f."name",
s."cluster-id",
s."cluster-name") as b
where
sm.id = b.id
and
(sm.abo != b.abo
or
sm."line-manager" != b."line-manager"
or
sm."acquired" != b."acquired"
or
b."cluster-id" != sm."cluster-id"
or
b."cluster-name" != sm."cluster-name"
or
b."franchisee-name" != sm."franchisee-name"
or
b."opened-at" != sm."opened-at"
or
b."line-manager-email" != sm."line-manager-email"
or
b."abo-email" != sm."abo-email"
or
b."store-manager-email" != sm."store-manager-email"
or
b."store-email" != sm."store-email");
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/storesmaster/storesmaster_config.py | storesmaster_config.py |
update_query = """update "prod2-generico"."{}"
SET
"cohort-quarter" = stg."cohort-quarter",
"cohort-quarter-number" = stg."cohort-quarter-number",
"year-cohort" = stg."year-cohort",
"store-id" = stg."store-id",
"bill-quarter" = stg."bill-quarter",
"bill-quarter-number" = stg."bill-quarter-number",
"year-bill" = stg."year-bill",
"bill-date" = stg."bill-date",
"day-zero-in-cohort-quarter" = stg."day-zero-in-cohort-quarter",
"day-zero-in-bill-quarter" = stg."day-zero-in-bill-quarter",
"day-index" = stg."day-index",
"quarter-diff" = stg."quarter-diff",
"resurrection-candidate" = stg."resurrection-candidate",
"cohort-quarter-patients" = stg."cohort-quarter-patients",
"cohort-resurrection-candidates" = stg."cohort-resurrection-candidates"
from "prod2-generico"."{}" retention
inner join "{}-stg" stg on
stg."patient-id" = retention."patient-id"
"""
insert_query = """insert into "prod2-generico"."{}"
select
stg.*
from
"{}-stg" stg
left join
"prod2-generico"."{}" retention
on
stg."patient-id" = retention."patient-id"
where
retention."patient-id" IS NULL
"""
temp_create = """create temp table "{}-stg"
(
"created-at" TIMESTAMP WITHOUT TIME ZONE ENCODE az64
,"created-by" VARCHAR(765) ENCODE lzo
,"updated-by" VARCHAR(765) ENCODE lzo
,"updated-at" TIMESTAMP WITHOUT TIME ZONE ENCODE az64
,"patient-id" BIGINT NOT NULL ENCODE az64
,"store-id" INTEGER NOT NULL ENCODE az64
,"bill-date" DATE NOT NULL ENCODE az64
,"last-bill-created-at" TIMESTAMP WITHOUT TIME ZONE ENCODE az64
,"year-bill" INTEGER NOT NULL ENCODE az64
,"bill-quarter" VARCHAR(255) ENCODE lzo
,"bill-quarter-number" INTEGER NOT NULL ENCODE az64
,"year-cohort" INTEGER NOT NULL ENCODE az64
,"cohort-quarter" VARCHAR(255) ENCODE lzo
,"cohort-quarter-number" INTEGER NOT NULL ENCODE az64
,"day-zero-in-cohort-quarter" TIMESTAMP WITHOUT TIME ZONE ENCODE az64
,"day-zero-in-bill-quarter" TIMESTAMP WITHOUT TIME ZONE ENCODE az64
,"day-index" INTEGER ENCODE az64
,"quarter-diff" INTEGER ENCODE az64
,"resurrection-candidate" INTEGER ENCODE az64
,"cohort-quarter-patients" BIGINT ENCODE az64
,"cohort-resurrection-candidates" BIGINT ENCODE az64
);""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/queries/retention_day_quarter/retention_day_quarter_config.py | retention_day_quarter_config.py |
import os
import time
from io import BytesIO
import boto3
import pandas as pd
import pg8000
import pymongo
import redshift_connector as rc
from sqlalchemy import create_engine
from zeno_etl_libs.config.common import Config
class Athena:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.athena_connection = None
self.db_secrets = secrets
self.region = 'ap-south-1'
self.aws_access_key_id = secrets['AWS_ACCESS_KEY_ID']
self.aws_secret_access_key = secrets['AWS_SECRET_ACCESS_KEY_ID']
self.s3_resource = boto3.resource('s3', self.region, aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
self.s3_client = boto3.client('s3', self.region, aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
def connection(self, schema_name=None):
"""
This function creates an Athena connection based on the schema name
input:
schema_name: STRING (This variable connects with the specified database in Athena, default value: None)
output:
Athena Connection object
"""
s3_staging_dir = 's3://aws-glue-temporary-921939243643-ap-south-1/athena/'
if schema_name:
conn_str = f"awsathena+rest://{self.db_secrets['AWS_ACCESS_KEY_ID']}:{self.db_secrets['AWS_SECRET_ACCESS_KEY_ID']}@athena.{self.db_secrets['AWS_REGION']}.amazonaws.com:443/{schema_name}?s3_staging_dir{s3_staging_dir}&work_group=primary"
else:
schema_name = self.db_secrets['DATALAKE_DATABASE']
conn_str = f"awsathena+rest://{self.db_secrets['AWS_ACCESS_KEY_ID']}:{self.db_secrets['AWS_SECRET_ACCESS_KEY_ID']}@athena.{self.db_secrets['AWS_REGION']}.amazonaws.com:443/{schema_name}?s3_staging_dir{s3_staging_dir}&work_group=primary"
# Create the SQLAlchemy connection. Note that you need to have pyathena installed for this.
engine = create_engine(
conn_str.format(
aws_access_key_id=self.db_secrets['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=self.db_secrets['AWS_SECRET_ACCESS_KEY_ID'],
region_name=self.db_secrets['AWS_REGION'],
schema_name=schema_name,
s3_staging_dir=s3_staging_dir,
)
)
athena_connection = engine.connect()
self.athena_connection = athena_connection
return athena_connection
def get_df(self, query=None):
df = pd.read_sql_query(query, self.athena_connection)
return df
def ingest_df_to_datalake(self, df, table_name=None, index=False, is_mis=False):
file_name = self.db_secrets['DATALAKE_DATABASE'] + '/' + table_name + f"/LOAD{int(time.time() * 1000)}.parquet"
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
if not os.path.exists(path):
os.mkdir(path, 0o777)
parquet_buffer = BytesIO()
# df.to_parquet(parquet_buffer, index=index, engine='fastparquet')
df.to_parquet(parquet_buffer, index=index, engine='pyarrow')
# df.to_csv(local_file_path_csv)
if is_mis:
bucket_name = 'prod-mis-datalake'
else:
bucket_name = 'zeno-data-lake'
self.s3_resource.Object(bucket_name, file_name).put(Body=parquet_buffer.getvalue())
s3_uri = f"s3://{bucket_name}/{file_name}"
return s3_uri
class DB:
def __init__(self, read_only=True):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
self.cursor = None
self.connection = None
self.read_only = read_only
def open_connection(self):
""" :return DB cursor """
"""function returns the redshift connection or cursor"""
if self.read_only:
self.connection = rc.connect(
host=self.db_secrets['REDSHIFT_HOST'],
database=self.db_secrets['REDSHIFT_DB'],
user=self.db_secrets['REDSHIFT_USER'],
password=self.db_secrets['REDSHIFT_PASSWORD'],
port=int(self.db_secrets['REDSHIFT_PORT']),
ssl=bool(int(self.db_secrets['REDSHIFT_SSL']))
)
else:
self.connection = rc.connect(
host=self.db_secrets['REDSHIFT_WRITE_HOST'],
database=self.db_secrets['REDSHIFT_WRITE_DB'],
user=self.db_secrets['REDSHIFT_WRITE_USER'],
password=self.db_secrets['REDSHIFT_WRITE_PASSWORD'],
port=int(self.db_secrets['REDSHIFT_WRITE_PORT']),
ssl=bool(int(self.db_secrets['REDSHIFT_WRITE_SSL']))
)
self.connection.autocommit = True
cursor: rc.Cursor = self.connection.cursor()
self.cursor = cursor
return cursor
def execute(self, query, params=None):
"""
query: "select * from table where col = '%s' and col2 = '%s' "
params: (x, y)
"""
try:
self.cursor.execute(query, params)
except Exception as e:
print(f"e: {e}")
if not self.connection.autocommit:
self.cursor.execute("rollback")
raise Exception(e)
def get_df(self, query) -> pd.DataFrame:
self.execute(query, params=None)
df: pd.DataFrame = self.cursor.fetch_dataframe()
if isinstance(df, type(None)):
return pd.DataFrame(
columns=[desc[0].decode("utf-8") for desc in self.cursor.description])
else:
return df
def close_connection(self):
""" make sure to close the connection, after all the DB operation are over """
print("Redshift DB connection closed successfully.")
self.cursor.close()
class RedShiftDB(DB):
pass
class RedShiftPG8000:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
self.cursor = None
self.connection = None
def open_connection(self):
""" :return DB cursor """
"""function returns the redshift connection or cursor"""
self.connection = pg8000.connect(
host=self.db_secrets['REDSHIFT_HOST'],
database=self.db_secrets['REDSHIFT_DB'],
user=self.db_secrets['REDSHIFT_USER'],
password=self.db_secrets['REDSHIFT_PASSWORD'],
port=int(self.db_secrets['REDSHIFT_PORT'])
)
self.connection.autocommit = True
cursor: rc.Cursor = self.connection.cursor()
self.cursor = cursor
return cursor
def execute(self, query):
"""
query: "select * from table where col = '%s' and col2 = '%s' "
params: (x, y)
"""
try:
self.cursor.execute(query)
except Exception as e:
print(f"e: {e}")
self.cursor.execute("rollback")
# self.cursor.execute(query)
# self.cursor.execute(query)
raise Exception(e)
def close_connection(self):
""" make sure to close the connection, after all the DB operation are over """
print("Redshift DB connection closed successfully.")
self.cursor.close()
class RedShift:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
self.connection = None
self.engine = None # dispose the engine after user
self.url = f"postgresql+psycopg2://{self.db_secrets['REDSHIFT_USER']}:{self.db_secrets['REDSHIFT_PASSWORD']}@" \
f"{self.db_secrets['REDSHIFT_HOST']}:{self.db_secrets['REDSHIFT_PORT']}/" \
f"{self.db_secrets['REDSHIFT_DB']}"
def open_connection(self):
""" :return DB cursor """
"""function returns the redshift connection or cursor"""
self.engine = create_engine(self.url)
self.connection = self.engine.connect()
self.connection.autocommit = True
return self.connection
def execute(self, query):
try:
self.engine.execute(query)
except Exception as e:
print(f"e: {e}")
self.engine.execute("rollback")
raise Exception(e)
def close_connection(self):
""" make sure to close the connection, after all the DB operation are over """
self.connection.close()
self.engine.dispose()
print("Redshift DB connection closed successfully.")
class MySQL:
""" MySQL DB Connection """
""" implementing singleton design pattern for DB Class """
def __init__(self, read_only=True):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
if read_only:
self.user = self.db_secrets['MS_USER']
self.password = self.db_secrets['MS_PASSWORD']
self.host = self.db_secrets['MS_HOST']
self.port = self.db_secrets['MS_PORT']
self.db = self.db_secrets['MS_DB']
else:
self.user = self.db_secrets['MYSQL_WRITE_USER']
self.password = self.db_secrets['MYSQL_WRITE_PASSWORD']
self.host = self.db_secrets['MYSQL_WRITE_HOST']
self.port = self.db_secrets['MYSQL_WRITE_PORT']
self.db = self.db_secrets['MYSQL_WRITE_DATABASE']
self.url = f"mysql+pymysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
self.engine = None
self.connection = None
self.cursor = None
# Not calling open_connection() function, to avoid DB connection at class instantiation
# self.connection = self.open_connection()
def open_connection(self):
"""
:return: connection to mysql DB using pymysql lib
"""
# self.connection = pymysql.connect(host=self.host, user=self.user, password=self.password, db=self.db,
# port=int(self.port))
self.engine = create_engine(self.url, connect_args={'connect_timeout': 3600})
self.connection = self.engine.raw_connection()
self.cursor = self.connection.cursor()
return self.cursor
def close(self):
"""
closes the DB connection
:return None
"""
print("MySQL DB connection closed successfully!")
self.connection.close()
self.engine.dispose()
class PostGre:
""" MySQL DB Connection """
""" implementing singleton design pattern for DB Class """
def __init__(self, is_internal=False):
"""
@param is_internal: True means DB is owned by tech team, we want a connection with that
"""
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
if is_internal:
self.user = self.db_secrets[f'INTERNAL_PG_USER']
self.password = self.db_secrets['INTERNAL_PG_PASSWORD']
self.host = self.db_secrets['INTERNAL_PG_HOST']
self.port = self.db_secrets['INTERNAL_PG_PORT']
self.db = self.db_secrets['INTERNAL_PG_DB']
else:
self.user = self.db_secrets['PG_USER']
self.password = self.db_secrets['PG_PASSWORD']
self.host = self.db_secrets['PG_HOST']
self.port = self.db_secrets['PG_PORT']
self.db = self.db_secrets['PG_DB']
self.url = f"postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
self.connection = None
self.cursor = None
def open_connection(self):
# import psycopg2
# conn_string = f"dbname='{self.db}' port='{self.port}' user='{self.user}' password='{self.password}' " \
# f"host='{self.host}'"
# self.connection = psycopg2.connect(conn_string)
self.connection = pg8000.connect(
database=self.db,
user=self.user,
password=self.password,
host=self.host,
port=self.port
)
self.cursor = self.connection.cursor()
return self.connection
def connection(self):
"""
:return: connection to mysql DB using pymysql lib
"""
return self.open_connection()
def execute(self, query, params=None):
"""
query: "select * from table where col = '%s' and col2 = '%s' "
params: (x, y)
"""
try:
self.connection.execute(query, params)
except Exception as e:
print(f"e: {e}")
self.connection.execute("rollback")
def close(self):
"""
closes the DB connection
:return None
"""
self.connection.close()
print("PostGre DB connection closed successfully!")
def close_connection(self):
self.close()
class PostGreWrite:
""" implementing singleton design pattern for DB Class """
def __init__(self, is_internal=False):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
if is_internal:
self.user = self.db_secrets[f'INTERNAL_PG_USER']
self.password = self.db_secrets['INTERNAL_PG_PASSWORD']
self.host = self.db_secrets['INTERNAL_PG_HOST']
self.port = self.db_secrets['INTERNAL_PG_PORT']
self.db = self.db_secrets['INTERNAL_PG_DB']
else:
self.user = self.db_secrets['PG_USER']
self.password = self.db_secrets['PG_PASSWORD']
self.host = self.db_secrets['PG_HOST']
self.port = self.db_secrets['PG_PORT']
self.db = self.db_secrets['PG_DB']
self.url = f"postgresql+pg8000://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
# self.url = f"postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
self.engine = None
self.connection = None
self.cursor = None
def open_connection(self):
"""
:return: connection to mysql DB using pymysql lib
"""
self.engine = create_engine(self.url)
self.connection = self.engine.raw_connection()
self.cursor = self.connection.cursor()
return self.connection
def close(self):
"""
closes the DB connection
:return None
"""
print("PostGre DB connection closed successfully!")
self.engine.dispose()
def close_connection(self):
self.close()
class RedshiftEngine:
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
self.connection = None
def open_connection(self):
""" :return DB cursor """
"""function returns the redshift connection or cursor"""
host = self.db_secrets['REDSHIFT_HOST']
database = self.db_secrets['REDSHIFT_DB']
user = self.db_secrets['REDSHIFT_USER']
password = self.db_secrets['REDSHIFT_PASSWORD']
port = int(self.db_secrets['REDSHIFT_PORT'])
ssl = bool(int(self.db_secrets['REDSHIFT_SSL']))
uri = f"postgresql://{user}:{password}@{host}:{port}/{database}"
self.connection = pg8000.connect(uri)
def execute(self, query, params=None):
"""
query: "select * from table where col = '%s' and col2 = '%s' "
params: (x, y)
"""
try:
self.connection.execute(query, params)
except Exception as e:
print(f"e: {e}")
self.connection.execute("rollback")
def create_report_table_using_df(self, df, table_name, schema):
try:
df.head(5).to_sql(
name=table_name,
con=self.connection,
index=False,
if_exists='fail',
schema=schema)
query = f"""truncate table "{schema}"."{table_name}"; """
self.connection.execute(query)
print(f"Created table: {table_name}, successfully.")
except Exception as e:
print(f"Error creating table: {e}")
def close_connection(self):
""" make sure to close the connection, after all the DB operation are over """
print("Redshift DB connection closed successfully.")
self.connection.close()
class MongoDB:
""" Mongo DB Connection """
""" implementing singleton design pattern for DB Class """
def __init__(self):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
self.user = self.db_secrets['MONGO_USER']
self.password = self.db_secrets['MONGO_PASSWORD']
self.host = self.db_secrets['MONGO_HOST']
self.port = self.db_secrets['MONGO_PORT']
self.connection = None
self.cursor = None
def open_connection(self, auth_source):
self.connection = pymongo.MongoClient(self.host, self.port, username=self.user,
password=self.password,
authSource=auth_source)
return self.connection
def connection(self, auth_source):
return self.open_connection(auth_source)
def close(self):
"""
closes the DB connection
:return None
"""
self.connection.close()
print("Mongo DB connection closed successfully!")
def close_connection(self):
self.close()
def download_private_key_from_s3():
# s3 = boto3.resource('s3')
# file = "id_rsa"
# ssh_pkey_full_path = '/tmp/' + file
# bucket_name = "aws-prod-glue-assets-921939243643-ap-south-1"
# logger.info(f"bucket_name: {bucket_name}")
# logger.info(f"ssh_pkey_full_path: {ssh_pkey_full_path}")
# s3.Bucket(bucket_name).download_file("private/" + file, file)
# logger.info(f"ssh_pkey_full_path downloaded successfully")
# return ssh_pkey_full_path
pass
class MSSql:
""" MSSQL DB Connection """
""" implementing singleton design pattern for DB Class """
def __init__(self, connect_via_tunnel=True, db=None):
configobj = Config.get_instance()
secrets = configobj.get_secrets()
self.db_secrets = secrets
self.user = self.db_secrets['WH_MSSQL_USER']
self.password = self.db_secrets['WH_MSSQL_PASSWORD']
self.host = self.db_secrets['WH_MSSQL_HOST']
self.port = self.db_secrets['WH_MSSQL_PORT']
if db is None:
self.db = self.db_secrets['WH_MSSQL_DATABASE']
else:
self.db = db
self.connection_string = "DRIVER={ODBC Driver 17 for SQL Server};SERVER=" + self.host + ";DATABASE=" + \
self.db + ";UID=" + self.user + ";PWD=" + self.password + ";TrustServerCertificate=yes"
# self.url = f"mssql+pymssql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
# TODO: switch on tunnel connect
self.connect_via_tunnel = False
self.connection = None
self.cursor = None
self.tunnel = None
def __start_tunnel(self):
from sshtunnel import SSHTunnelForwarder
try:
self.tunnel = SSHTunnelForwarder(
('workstation.generico.in', 22),
ssh_username='glue',
ssh_pkey=download_private_key_from_s3(),
ssh_private_key_password='',
remote_bind_address=('wh1.zeno.health', 1433)
)
# logger.info("Tunnel class ok")
self.tunnel.start()
# logger.info("Tunnel started")
except Exception as error:
raise Exception("MSSQL error while starting tunnel is: {}".format(error))
def open_connection(self):
try:
if self.connect_via_tunnel:
self.__start_tunnel()
import pymssql
self.connection = pymssql.connect(server=self.host, user=self.user,
password=self.password, database=self.db, port=self.port)
self.cursor = self.connection.cursor()
return self.connection
except Exception as error:
raise Exception("MSSQL error while establishing connection is: {}".format(error))
def connection(self):
"""
:return: connection to mysql DB using pymysql lib
"""
return self.open_connection()
def close(self):
"""
closes the DB connection
:return None
"""
self.connection.close()
if self.connect_via_tunnel:
self.tunnel.close()
print("MSSQL DB connection closed successfully!")
def close_connection(self):
self.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/db/db.py | db.py |
from math import sin, cos, sqrt, atan2, radians
##################################
# month diff calculation
##################################
def month_diff(date_a, date_b):
"""
This function returns month difference between calendar dates 'date_a' and 'date_b'
"""
return 12 * (date_a.dt.year - date_b.dt.year) + (date_a.dt.month - date_b.dt.month)
# Pass cleaner tuple (string format) into SQL query
# Handles single item tuple
def sql_tuple(a_tuple):
"""
This function converts a tuple into a parasable string format for SQL query purpose
For example - (1,) a single item tuple will be converted to (1), But if tuple is (1,2) then will remain as (1,2)
"""
if len(a_tuple) == 1:
return '({})'.format(a_tuple[0])
return str(a_tuple)
def nearest_store(store_id, data, lat_lon_col_name=['latitude', 'longitude'], from_distance=5, ):
"""
helper function to get the nearby stores
"""
""" filtering out the nan values """
R = 6378.1 # radius of earth in KM
print(f"nearest_store calculation started for store id: {store_id}")
data['lat_r'] = data[lat_lon_col_name[0]].apply(lambda x: radians(float(x)))
data['lon_r'] = data[lat_lon_col_name[1]].apply(lambda x: radians(float(x)))
lat1 = data[data['store_id'] == store_id]['lat_r'].values[0]
lon1 = data[data['store_id'] == store_id]['lon_r'].values[0]
data['lat1'] = lat1
data['lon1'] = lon1
data['dlon'] = data['lon_r'] - data['lon1']
data['dlat'] = data['lat_r'] - data['lat1']
data['a'] = data.apply(
lambda x: sin(x['dlat'] / 2) ** 2 + cos(x['lat1']) * cos(x['lat_r']) * sin(x['dlon'] / 2) ** 2, 1)
data['c'] = data.apply(lambda x: 2 * atan2(sqrt(x['a']), sqrt(1 - x['a'])), 1)
data['distance'] = R * data['c']
near_stores = data[data['distance'] <= from_distance].sort_values('distance')['store_id'].values
data.drop(columns=['lat_r', 'lon_r', 'lat1', 'lon1', 'dlon', 'dlat', 'a', 'c', 'distance'], inplace=True)
return near_stores
# Change HH:MM:SS to seconds
def hms_to_seconds(t):
"""
This function converts HH:MM:SS (for example 02:23:18 (2hrs, 23minutes, 18seconds)
# into total seconds value, which is 8598 in this)
"""
split_t = [int(i) for i in t.split(':')]
if len(split_t) == 3:
h, m, s = split_t
return 3600 * h + 60 * m + s
elif len(split_t) == 2:
m, s = split_t
return 60 * m + s
else:
s = split_t[0]
return s | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/general_funcs.py | general_funcs.py |
import pandas as pd
import numpy as np
import time
from zeno_etl_libs.django.api import Sql
from zeno_etl_libs.db.db import MySQL
def doid_custom_write(data, logger, ss_col=None, rop_col=None, oup_col=None):
"""
data : (pd.DataFrame) can contains columns ["store_id", "drug_id", ss_col, rop_col, oup_col]
if only "store_id" and "drug_id" present then ss,rop,oup is set to zero
and updated into DOID
"""
data = data.drop_duplicates()
if None in [ss_col, rop_col, oup_col]:
data["min"] = 0
data["safe_stock"] = 0
data["max"] = 0
else:
data.rename({ss_col: 'min', rop_col: 'safe_stock', oup_col: 'max'},
axis=1, inplace=True)
mysql = MySQL(read_only=False)
mysql.open_connection()
sql = Sql()
missed_entries = pd.DataFrame()
logger.info("MySQL DOID write starts")
for store_id in data['store_id'].unique():
logger.info('Mysql upload for store ' + str(store_id))
current_ss_query = f"""
SELECT doid.id, doid.`store-id` , doid.`drug-id` , doid.min,
doid.`safe-stock` , doid.max
FROM `drug-order-info-data` doid
where doid.`store-id` = {store_id}
"""
current_ss = pd.read_sql(current_ss_query, mysql.connection)
current_ss.columns = [c.replace('-', '_') for c in current_ss.columns]
data_store = data.loc[
data['store_id'] == store_id,
['store_id', 'drug_id', 'min', 'safe_stock', 'max']]
ss_joined = current_ss.merge(
data_store, on=['store_id', 'drug_id'], how='right',
suffixes=('_old', ''))
ss_joined['flag'] = np.where(
(ss_joined['min_old'] == ss_joined['min']) &
(ss_joined['safe_stock_old'] == ss_joined['safe_stock']) &
(ss_joined['max_old'] == ss_joined['max']),
'values same', 'values changed')
ss_to_upload = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['id', 'min', 'safe_stock', 'max']]
logger.info('SS to update only for ' + str(
ss_joined[ss_joined['flag'] != 'values same'].shape[0]))
ss_to_upload["id"] = ss_to_upload["id"].astype(float)
data_to_be_updated_list = list(ss_to_upload.apply(dict, axis=1))
if len(data_to_be_updated_list) > 0:
chunk_size = 1000
for i in range(0, len(data_to_be_updated_list), chunk_size):
status, msg = sql.update(
{'table': 'DrugOrderInfoData',
'data_to_be_updated': data_to_be_updated_list[
i:i + chunk_size]}, logger)
logger.info(f"DrugOrderInfoData update API "
f"count: {min(i + chunk_size, len(data_to_be_updated_list))}, "
f"status: {status}, msg: {msg}")
drug_list = str(list(ss_joined.loc[
ss_joined[
'flag'] == 'values changed', 'drug_id'].unique())
).replace('[', '(').replace(']', ')')
update_test_query = f"""
SELECT `store-id` , `drug-id` , min , `safe-stock` , max
from `drug-order-info-data` doid
where `store-id` = {store_id}
and `drug-id` in {drug_list}
"""
# time.sleep(15)
update_test = pd.read_sql(update_test_query, mysql.connection)
update_test.columns = [c.replace('-', '_') for c in
update_test.columns]
update_test = ss_joined.loc[
ss_joined['flag'] == 'values changed',
['store_id', 'drug_id', 'min', 'safe_stock', 'max']].merge(
update_test, on=['store_id', 'drug_id'],
suffixes=('_new', '_prod'))
update_test['mismatch_flag'] = np.where(
(update_test['min_new'] == update_test['min_prod']) &
(update_test['safe_stock_new'] == update_test[
'safe_stock_prod']) &
(update_test['max_new'] == update_test['max_prod']),
'updated', 'not updated')
missed_entries = missed_entries.append(
update_test[update_test['mismatch_flag'] == 'not updated'])
logger.info(
'Entries updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'updated'].shape[0]))
logger.info(
'Entries not updated successfully: ' +
str(update_test[
update_test['mismatch_flag'] == 'not updated'].shape[
0]))
mysql.close()
return missed_entries | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/doid_write.py | doid_write.py |
import datetime
import numpy as np
import pandas as pd
# from memory_profiler import profile
def remove_duplicates(df: pd.DataFrame, f):
"""
#remove duplicates from dataframe of the form id, f, ptr
#f corresponds to quantity which is added up for duplicate ids
#ptr remains same for all
"""
print(f"Removed duplicates on column: {f}")
df1 = df.groupby('id', as_index=False)[f].sum()
df2 = df.drop_duplicates(subset='id').drop(columns=f)
df = pd.merge(left=df1, right=df2, on='id', how='left')
return df
def getids(df: pd.DataFrame):
"""
utility function to generate string to be used in "in" query
"""
return ",".join(str(i) for i in df['id'].unique())
def combin_xin(recon_l: pd.DataFrame, xin_l: pd.DataFrame):
"""
this will take care of stores own inventory coming back
"""
return pd.concat([recon_l, xin_l], axis=0).drop_duplicates(subset='id')
class Data:
"""
class to get the inventory related data
Example:
'o', 'cr', 'xin', 'xout', 'ret', 'sold', 'del', 'ar', 'rr', 'c'
"""
def __init__(self, db, csv_store_ids, start_date, end_date):
"""
:param db: database connection
:param csv_store_ids: multiple store ids in csv
:param start_date: start date in IST
:param end_date: end date in IST
"""
self.db = db
self.csv_store_ids = csv_store_ids
self.start_ts = f"{start_date} 02:00:00" # in IST
self.end_ts = f"{end_date} 03:00:00" # in IST
""" since snapshots names are in UTC so tables alias is one day back"""
start_date_utc = datetime.datetime.strptime(start_date, '%Y-%m-%d') + datetime.timedelta(
days=-1)
start_date_utc = start_date_utc.strftime("%Y-%m-%d")
end_date_utc = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(
days=-1)
end_date_utc = end_date_utc.strftime("%Y-%m-%d")
self.s_alias = f"-mis-{start_date_utc}"
if start_date == "2022-06-01":
# Only for 2022-06-01 manual snapshot, since snapshot name and date are same
self.s_alias = f"-mis-{start_date}"
self.e_alias = f"-mis-{end_date_utc}"
self.s_schema = "public"
self.e_schema = "public"
""" Data frames """
self.recon_l = pd.DataFrame() # Final reconciled data frame
self.p_l = pd.DataFrame() # purchased / received
self.o_l = pd.DataFrame() # opening / initial
self.cr_l = pd.DataFrame() #
self.xin_l = pd.DataFrame() #
self.cin_l = pd.DataFrame() # cluster stock transfer in
self.xout_l = pd.DataFrame() #
self.cout_l = pd.DataFrame() # cluster stock transfer out
self.sold_l = pd.DataFrame() #
self.ret_l = pd.DataFrame() #
self.ar_l = pd.DataFrame() #
self.rr_l = pd.DataFrame() #
self.del_l = pd.DataFrame() #
self.c_l = pd.DataFrame() #
def take_union(self):
"""
select one value of barcode from left or right data frame
"""
for col in ['barcode', 'ptr']:
self.recon_l[col] = np.where(self.recon_l[f'{col}_x'].isna(), self.recon_l[f'{col}_y'],
self.recon_l[f'{col}_x'])
self.recon_l.drop(columns=[f'{col}_x', f'{col}_y'], axis=1, inplace=True)
def opening(self):
"""
opening inventory calculation
"""
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity o,
ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is null
and quantity != 0
order by
id
"""
o_l_1 = self.db.get_df(query=q)
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity o,
ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is not null
and quantity != 0
order by
id
"""
o_l_2 = self.db.get_df(query=q)
self.o_l = pd.concat([o_l_1, o_l_2], ignore_index=True)
return self.o_l
def purchased(self):
"""
purchased inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."actual-quantity" p,
c.ptr
from
"{self.e_schema}"."invoice-items-1{self.e_alias}" a
join "{self.e_schema}"."invoices-1{self.e_alias}" b on
a."franchisee-invoice-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(c."invoice-item-id" = a.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a."actual-quantity" !=0
"""
self.p_l = self.db.get_df(query=q)
return self.p_l
def customer_returns(self):
"""
customer return inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" cr,
c.ptr
from
"{self.e_schema}"."customer-return-items-1{self.e_alias}" a
join "{self.e_schema}"."customer-returns-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."returned-at" >= '{self.start_ts}'
and b."returned-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" cr,
c.ptr
from
"{self.e_schema}"."customer-return-items-1{self.e_alias}" a
join "{self.e_schema}"."customer-returns-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."returned-at" >= '{self.start_ts}'
and b."returned-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.cr_l = self.db.get_df(query=q)
self.cr_l = remove_duplicates(df=self.cr_l, f="cr")
return self.cr_l
def xin(self):
"""
Stock transfer in - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity xin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."destination-store")
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity xin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."destination-store")
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.xin_l = self.db.get_df(query=q)
self.xin_l = remove_duplicates(df=self.xin_l, f="xin")
return self.xin_l
def cin(self):
"""
Cluster Stock Transfer in - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity cin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."destination-store")
join "{self.e_schema}"."cluster-tasks{self.e_alias}" ct on
(a."transfer-id" = ct."stock-transfer-id"
and ct."task-type" = 'stock-transfer')
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity cin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."destination-store")
join "{self.e_schema}"."cluster-tasks{self.e_alias}" ct on
(a."transfer-id" = ct."stock-transfer-id"
and ct."task-type" = 'stock-transfer')
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.cin_l = self.db.get_df(query=q)
self.cin_l = remove_duplicates(df=self.cin_l, f="cin")
return self.cin_l
def xout(self):
"""
Stock transfer out inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" xout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."source-store")
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" xout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."source-store")
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.xout_l = self.db.get_df(query=q)
self.xout_l = remove_duplicates(self.xout_l, "xout")
return self.xout_l
def cout(self):
"""
Cluster Stock Transfer out inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" cout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."source-store")
join "{self.e_schema}"."cluster-tasks{self.e_alias}" ct on
(a."transfer-id" = ct."stock-transfer-id"
and ct."task-type" = 'stock-transfer')
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" cout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."source-store")
join "{self.e_schema}"."cluster-tasks{self.e_alias}" ct on
(a."transfer-id" = ct."stock-transfer-id"
and ct."task-type" = 'stock-transfer')
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.cout_l = self.db.get_df(query=q)
self.cout_l = remove_duplicates(self.cout_l, "cout")
return self.cout_l
def sold(self):
"""
Sold inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" sold,
c.ptr
from
"{self.e_schema}"."bill-items-1{self.e_alias}" a
join "{self.e_schema}"."bills-1{self.e_alias}" b on
a."bill-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" sold,
c.ptr
from
"{self.e_schema}"."bill-items-1{self.e_alias}" a
join "{self.e_schema}"."bills-1{self.e_alias}" b on
a."bill-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.sold_l = self.db.get_df(query=q)
self.sold_l = remove_duplicates(self.sold_l, "sold")
return self.sold_l
def returned_to_dc(self):
"""
Return to dc - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" ret,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" ret,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.ret_l = self.db.get_df(query=q)
self.ret_l = remove_duplicates(self.ret_l, "ret")
return self.ret_l
def deleted(self):
"""
Deleted - inventory calculation
"""
q = f"""
select
a.id,
nvl("barcode-reference", 0) barcode,
a.quantity del,
a.ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}" a
join "{self.e_schema}"."deleted-invoices{self.e_alias}" c on
a."invoice-id" = c.id
where
a."store-id" in ({self.csv_store_ids})
and c."deleted-at" >= '{self.start_ts}'
and c."deleted-at" <= '{self.end_ts}'
and a.quantity !=0
union
select
a.id,
nvl("barcode-reference", 0) barcode,
a.quantity del,
a.ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}" a
join "{self.e_schema}"."deleted-invoices-1{self.e_alias}" c on
a."franchisee-invoice-id" = c.id
where
a."store-id" in ({self.csv_store_ids})
and c."deleted-at" >= '{self.start_ts}'
and c."deleted-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.del_l = self.db.get_df(query=q)
self.del_l = remove_duplicates(self.del_l, "del")
return self.del_l
def closing(self):
"""
Closing inventory calculation
"""
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity c,
ptr
from
"{self.e_schema}"."inventory-1{self.e_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is null
and quantity !=0
order by
id
"""
c_l_1 = self.db.get_df(query=q)
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity c,
ptr
from
"{self.e_schema}"."inventory-1{self.e_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is not null
and quantity !=0
order by
id
"""
c_l_2 = self.db.get_df(query=q)
self.c_l = pd.concat([c_l_1, c_l_2], ignore_index=True)
return self.c_l
def audit_recon(self):
"""
Audit recon - inventory calculation
"""
q = f"""
select
b.id,
nvl("barcode-reference", 0) barcode,
a.change ar,
b.ptr
from
"{self.e_schema}"."inventory-changes-1{self.e_alias}" a
join "{self.e_schema}"."inventory-1{self.e_alias}" b on
(a."inventory-id" = b.id
and b."store-id" = a."store-id")
where
a."store-id" in ({self.csv_store_ids})
and a."created-at" >= '{self.start_ts}'
and a."created-at" <= '{self.end_ts}'
and a.change !=0
union all
select
b.id,
nvl("barcode-reference", 0) barcode,
a.change ar,
b.ptr
from
"{self.e_schema}"."inventory-changes-1{self.e_alias}" a
join "{self.e_schema}"."inventory-1{self.e_alias}" b on
(a."inventory-id" = b."barcode-reference"
and b."store-id" = a."store-id")
where
a."store-id" in ({self.csv_store_ids})
and a."created-at" >= '{self.start_ts}'
and a."created-at" <= '{self.end_ts}'
and a.change !=0
"""
self.ar_l = self.db.get_df(query=q)
self.ar_l = remove_duplicates(self.ar_l, "ar")
return self.ar_l
def reverted_returns(self):
"""
Reverted returns - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" rr,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and a."reverted-at" >= '{self.start_ts}'
and a."reverted-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" rr,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and a."reverted-at" >= '{self.start_ts}'
and a."reverted-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.rr_l = self.db.get_df(query=q)
self.rr_l = remove_duplicates(self.rr_l, "rr")
return self.rr_l
def get_meta_data(self):
""" extra data needed for inventory """
q = f"""
select
i.id,
i."purchase-rate" ,
d."drug-name"
from
"prod2-generico"."prod2-generico"."inventory-1" i
left join "prod2-generico"."prod2-generico".drugs d on
i."drug-id" = d.id
where
i."store-id" in ({self.csv_store_ids})
"""
return self.db.get_df(query=q)
# @profile
def start_data_fetch(self):
""" calls all the function which fetch the data from database """
print("Starting data fetch.")
self.opening()
print("opening: Successfully fetched.")
self.purchased()
print("purchased: Successfully fetched.")
self.customer_returns()
print("customer_returns: Successfully fetched.")
self.xin()
print("xin: Successfully fetched.")
self.cin()
print("cin: Successfully fetched.")
self.xout()
print("xout: Successfully fetched.")
self.cout()
print("cout: Successfully fetched.")
self.sold()
print("sold: Successfully fetched.")
self.returned_to_dc()
print("returned_to_dc: Successfully fetched.")
self.deleted()
print("deleted: Successfully fetched.")
self.closing()
print("closing: Successfully fetched.")
self.audit_recon()
print("audit_recon: Successfully fetched.")
self.reverted_returns()
print("reverted_returns: Successfully fetched.")
# @profile
def concat(self):
""" data fetching from database """
self.start_data_fetch()
"""
## combine initial and received
temp_l = select(p_l, :id, :barcode, :p => :o, :ptr)
recon_l = vcat(o_l, temp_l)
## following handles inventory lying in inventory-1 but received later
recon_l = remove_duplicates(recon_l, "o")
recon_l = combine_cr(recon_l, cr_l)
recon_l = combine_xin(recon_l, xin_l)
recon_l = combine_xout(recon_l, xout_l)
recon_l = combine_sold(recon_l, sold_l)
recon_l = combine_ret(recon_l, ret_l)
recon_l = combine_ar(recon_l, ar_l)
recon_l = combine_rr(recon_l, rr_l)
recon_l = leftjoin(recon_l, select(del_l, :id, :del), on = :id)
recon_l = leftjoin(recon_l, select(c_l, :id, :c), on = :id)
"""
""" combine initial and received and call it opening(o) """
self.p_l.rename(columns={'p': 'o'}, inplace=True)
""" following handles inventory lying in inventory-1 but received later """
self.recon_l = pd.concat([self.p_l, self.o_l], ignore_index=True)
self.recon_l = remove_duplicates(self.recon_l, "o")
"""combine_cr: following handles the case where inventory was stock transferred,
after the start time and returned before end time """
self.recon_l = pd.merge(self.recon_l, self.cr_l, on='id', how='outer')
self.take_union()
"""combine_xin: this will take care of stores own inventory coming back"""
self.recon_l = pd.merge(self.recon_l, self.xin_l, on='id', how='outer')
self.take_union()
"""combine_cin: this will take care of stores own inventory coming back from cluster """
self.recon_l = pd.merge(self.recon_l, self.cin_l, on='id', how='outer')
self.take_union()
"""combine_xout: this will take care of stores own inventory transferred out"""
self.recon_l = pd.merge(self.recon_l, self.xout_l, on='id', how='outer')
self.take_union()
"""combine_cout: this will take care of stores own inventory transferred out from cluster"""
self.recon_l = pd.merge(self.recon_l, self.cout_l, on='id', how='outer')
self.take_union()
"""combine_sold: this will take care of stores inventory sold """
self.recon_l = pd.merge(self.recon_l, self.sold_l, on='id', how='outer')
self.take_union()
"""combine_ret: this will take care of stores inventory returned """
self.recon_l = pd.merge(self.recon_l, self.ret_l, on='id', how='outer')
self.take_union()
"""combine_ar: """
self.recon_l = pd.merge(self.recon_l, self.ar_l, on='id', how='outer')
self.take_union()
"""combine_rr: """
self.recon_l = pd.merge(self.recon_l, self.rr_l, on='id', how='outer')
self.take_union()
self.recon_l = pd.merge(self.recon_l, self.del_l, on='id', how='left')
self.take_union()
self.recon_l = pd.merge(self.recon_l, self.c_l, on='id', how='left')
self.take_union()
""" calculate the error """
self.recon_l = self.recon_l.fillna(0)
for col in ['id', 'o', 'cr', 'xin', 'cin', 'xout', 'cout', 'ret', 'sold', 'del', 'ar', 'rr',
'c', 'barcode']:
self.recon_l[col] = pd.to_numeric(self.recon_l[col])
self.recon_l[col] = self.recon_l[col].astype('int', errors='raise')
""" since cin and cout are sub set of xin xout so will not be part of error calculation """
self.recon_l['e'] = self.recon_l['o'] + self.recon_l['cr'] + self.recon_l['xin'] - \
self.recon_l['xout'] - \
self.recon_l['ret'] - self.recon_l['sold'] - self.recon_l['del'] + \
self.recon_l['ar'] + \
self.recon_l['rr'] - self.recon_l['c']
return self.recon_l | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/inventory/inventory.py | inventory.py |
import datetime
import numpy as np
# from memory_profiler import profile
import pandas as pd
def remove_duplicates(df: pd.DataFrame, f):
"""
#remove duplicates from dataframe of the form id, f, ptr
#f corresponds to quantity which is added up for duplicate ids
#ptr remains same for all
"""
print(f"Removed duplicates on column: {f}")
df1 = df.groupby('id', as_index=False)[f].sum()
df2 = df.drop_duplicates(subset='id').drop(columns=f)
df = pd.merge(left=df1, right=df2, on='id', how='left')
return df
def getids(df: pd.DataFrame):
"""
utility function to generate string to be used in "in" query
"""
return ",".join(str(i) for i in df['id'].unique())
def combin_xin(recon_l: pd.DataFrame, xin_l: pd.DataFrame):
"""
this will take care of stores own inventory coming back
"""
return pd.concat([recon_l, xin_l], axis=0).drop_duplicates(subset='id')
class Data:
"""
class to get the inventory related data
Example:
'o', 'cr', 'xin', 'xout', 'ret', 'sold', 'del', 'ar', 'rr', 'c'
"""
def __init__(self, db, csv_store_ids, start_date, end_date, snapshot_ist_time_delta=0):
"""
:param db: database connection
:param csv_store_ids: multiple store ids in csv
:param start_date: start date in IST
:param end_date: end date in IST
"""
self.db = db
self.csv_store_ids = csv_store_ids
self.start_ts = f"{start_date} 02:00:00" # in IST
self.end_ts = f"{end_date} 03:00:00" # in IST
""" since snapshots names are in UTC so tables alias is one day back"""
start_date_utc = datetime.datetime.strptime(start_date, '%Y-%m-%d') - datetime.timedelta(
days=snapshot_ist_time_delta)
start_date_utc = start_date_utc.strftime("%Y-%m-%d")
end_date_utc = datetime.datetime.strptime(end_date, '%Y-%m-%d') - datetime.timedelta(
days=snapshot_ist_time_delta)
end_date_utc = end_date_utc.strftime("%Y-%m-%d")
self.s_alias = f"-mis-{start_date_utc}"
if start_date == "2022-06-01":
# Only for 2022-06-01 manual snapshot, since snapshot name and date are same
self.s_alias = f"-mis-{start_date}"
self.e_alias = f"-mis-{end_date_utc}"
self.s_schema = "public"
self.e_schema = "public"
""" Data frames """
self.recon_l = pd.DataFrame() # Final reconciled data frame
self.p_l = pd.DataFrame() # purchased / received
self.prd_l = pd.DataFrame() # purchased return dispatched
self.prs_l = pd.DataFrame() # purchased return settled
self.o_l = pd.DataFrame() # opening / initial
self.cr_l = pd.DataFrame() #
self.xin_l = pd.DataFrame() #
self.xout_l = pd.DataFrame() #
self.sold_l = pd.DataFrame() #
self.ret_l = pd.DataFrame() #
self.ar_l = pd.DataFrame() #
self.rr_l = pd.DataFrame() #
self.del_l = pd.DataFrame() #
self.c_l = pd.DataFrame() #
def take_union(self):
"""
select one value of barcode from left or right data frame
"""
for col in ['barcode', 'ptr']:
self.recon_l[col] = np.where(self.recon_l[f'{col}_x'].isna(), self.recon_l[f'{col}_y'],
self.recon_l[f'{col}_x'])
self.recon_l.drop(columns=[f'{col}_x', f'{col}_y'], axis=1, inplace=True)
def opening(self):
"""
opening inventory calculation
"""
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity o,
ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is null
and quantity != 0
order by
id
"""
o_l_1 = self.db.get_df(query=q)
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity o,
ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is not null
and quantity != 0
order by
id
"""
o_l_2 = self.db.get_df(query=q)
self.o_l = pd.concat([o_l_1, o_l_2], ignore_index=True)
return self.o_l
def purchased(self):
"""
purchased inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."actual-quantity" p,
c.ptr
from
"{self.e_schema}"."invoice-items-1{self.e_alias}" a
join "{self.e_schema}"."invoices-1{self.e_alias}" b on
a."franchisee-invoice-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(c."invoice-item-id" = a.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a."actual-quantity" !=0
"""
self.p_l = self.db.get_df(query=q)
return self.p_l
def purchased_return_dispatched(self):
"""
purchased return dispatched inventory calculation
"""
q = f"""
select
d.id ,
nvl("barcode-reference", 0) barcode,
c."returned-quantity" prd,
d."ptr"
from
"{self.e_schema}"."debit-notes-1{self.e_alias}" a
join "{self.e_schema}"."debit-note-items-1{self.e_alias}" b on
b."debit-note-id" = a.id
join "{self.e_schema}"."return-items-1{self.e_alias}" c on
b."item-id" = c.id
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" e on
c."return-id" = e.id
join "{self.e_schema}"."inventory-1{self.e_alias}" d on
c."inventory-id" = d.id
where
e."store-id" in ({self.csv_store_ids})
and a."dispatched-at" >= '{self.start_ts}'
and a."dispatched-at" <= '{self.end_ts}'
"""
self.prd_l = self.db.get_df(query=q)
return self.prd_l
def purchased_return_settled(self):
"""
purchased return settled inventory calculation
"""
q = f"""
select
d.id ,
nvl("barcode-reference", 0) barcode,
c."returned-quantity" prs,
d."ptr"
from
"{self.e_schema}"."debit-notes-1{self.e_alias}" a
join "{self.e_schema}"."debit-note-items-1{self.e_alias}" b on
b."debit-note-id" = a.id
join "{self.e_schema}"."return-items-1{self.e_alias}" c on
b."item-id" = c.id
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" e on
c."return-id" = e.id
join "{self.e_schema}"."inventory-1{self.e_alias}" d on
c."inventory-id" = d.id
where
e."store-id" in ({self.csv_store_ids})
and a."settled-at" >= '{self.start_ts}'
and a."settled-at" <= '{self.end_ts}'
"""
self.prs_l = self.db.get_df(query=q)
return self.prs_l
def customer_returns(self):
"""
customer return inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" cr,
c.ptr
from
"{self.e_schema}"."customer-return-items-1{self.e_alias}" a
join "{self.e_schema}"."customer-returns-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."returned-at" >= '{self.start_ts}'
and b."returned-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" cr,
c.ptr
from
"{self.e_schema}"."customer-return-items-1{self.e_alias}" a
join "{self.e_schema}"."customer-returns-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."returned-at" >= '{self.start_ts}'
and b."returned-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.cr_l = self.db.get_df(query=q)
self.cr_l = remove_duplicates(df=self.cr_l, f="cr")
return self.cr_l
def xin(self):
"""
Stock transfer in - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity xin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."destination-store")
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity xin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."destination-store")
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.xin_l = self.db.get_df(query=q)
self.xin_l = remove_duplicates(df=self.xin_l, f="xin")
return self.xin_l
def xout(self):
"""
Stock transfer out inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" xout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."source-store")
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" xout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."source-store")
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.xout_l = self.db.get_df(query=q)
self.xout_l = remove_duplicates(self.xout_l, "xout")
return self.xout_l
def sold(self):
"""
Sold inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" sold,
c.ptr
from
"{self.e_schema}"."bill-items-1{self.e_alias}" a
join "{self.e_schema}"."bills-1{self.e_alias}" b on
a."bill-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" sold,
c.ptr
from
"{self.e_schema}"."bill-items-1{self.e_alias}" a
join "{self.e_schema}"."bills-1{self.e_alias}" b on
a."bill-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.sold_l = self.db.get_df(query=q)
self.sold_l = remove_duplicates(self.sold_l, "sold")
return self.sold_l
def returned_to_dc(self):
"""
Return to dc - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" ret,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" ret,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.ret_l = self.db.get_df(query=q)
self.ret_l = remove_duplicates(self.ret_l, "ret")
return self.ret_l
def deleted(self):
"""
Deleted - inventory calculation
"""
q = f"""
select
a.id,
nvl("barcode-reference", 0) barcode,
a.quantity del,
a.ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}" a
join "{self.e_schema}"."deleted-invoices{self.e_alias}" c on
a."invoice-id" = c.id
where
a."store-id" in ({self.csv_store_ids})
and c."deleted-at" >= '{self.start_ts}'
and c."deleted-at" <= '{self.end_ts}'
and a.quantity !=0
union
select
a.id,
nvl("barcode-reference", 0) barcode,
a.quantity del,
a.ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}" a
join "{self.e_schema}"."deleted-invoices-1{self.e_alias}" c on
a."franchisee-invoice-id" = c.id
where
a."store-id" in ({self.csv_store_ids})
and c."deleted-at" >= '{self.start_ts}'
and c."deleted-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.del_l = self.db.get_df(query=q)
self.del_l = remove_duplicates(self.del_l, "del")
return self.del_l
def closing(self):
"""
Closing inventory calculation
"""
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity c,
ptr
from
"{self.e_schema}"."inventory-1{self.e_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is null
and quantity !=0
order by
id
"""
c_l_1 = self.db.get_df(query=q)
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity c,
ptr
from
"{self.e_schema}"."inventory-1{self.e_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is not null
and quantity !=0
order by
id
"""
c_l_2 = self.db.get_df(query=q)
self.c_l = pd.concat([c_l_1, c_l_2], ignore_index=True)
return self.c_l
def audit_recon(self):
"""
Audit recon - inventory calculation
"""
q = f"""
select
b.id,
nvl("barcode-reference", 0) barcode,
a.change ar,
b.ptr
from
"{self.e_schema}"."inventory-changes-1{self.e_alias}" a
join "{self.e_schema}"."inventory-1{self.e_alias}" b on
(a."inventory-id" = b.id
and b."store-id" = a."store-id")
where
a."store-id" in ({self.csv_store_ids})
and a."created-at" >= '{self.start_ts}'
and a."created-at" <= '{self.end_ts}'
and a.change !=0
union all
select
b.id,
nvl("barcode-reference", 0) barcode,
a.change ar,
b.ptr
from
"{self.e_schema}"."inventory-changes-1{self.e_alias}" a
join "{self.e_schema}"."inventory-1{self.e_alias}" b on
(a."inventory-id" = b."barcode-reference"
and b."store-id" = a."store-id")
where
a."store-id" in ({self.csv_store_ids})
and a."created-at" >= '{self.start_ts}'
and a."created-at" <= '{self.end_ts}'
and a.change !=0
"""
self.ar_l = self.db.get_df(query=q)
self.ar_l = remove_duplicates(self.ar_l, "ar")
return self.ar_l
def reverted_returns(self):
"""
Reverted returns - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" rr,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and a."reverted-at" >= '{self.start_ts}'
and a."reverted-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" rr,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and a."reverted-at" >= '{self.start_ts}'
and a."reverted-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.rr_l = self.db.get_df(query=q)
self.rr_l = remove_duplicates(self.rr_l, "rr")
return self.rr_l
def get_meta_data(self):
""" extra data needed for inventory """
q = f"""
select
i.id,
i."purchase-rate" ,
d."drug-name"
from
"prod2-generico"."prod2-generico"."inventory-1" i
left join "prod2-generico"."prod2-generico".drugs d on
i."drug-id" = d.id
where
i."store-id" in ({self.csv_store_ids})
"""
return self.db.get_df(query=q)
# @profile
def start_data_fetch(self):
""" calls all the function which fetch the data from database """
print("Starting data fetch.")
self.opening()
print("opening: Successfully fetched.")
self.purchased()
print("purchased: Successfully fetched.")
self.purchased_return_dispatched()
print("purchased_return_dispatched : Successfully fetched.")
self.purchased_return_settled()
print("purchased_return_settled : Successfully fetched.")
self.customer_returns()
print("customer_returns: Successfully fetched.")
self.xin()
print("xin: Successfully fetched.")
self.xout()
print("xout: Successfully fetched.")
self.sold()
print("sold: Successfully fetched.")
self.returned_to_dc()
print("returned_to_dc: Successfully fetched.")
self.deleted()
print("deleted: Successfully fetched.")
self.closing()
print("closing: Successfully fetched.")
self.audit_recon()
print("audit_recon: Successfully fetched.")
self.reverted_returns()
print("reverted_returns: Successfully fetched.")
# @profile
def concat(self):
""" data fetching from database """
self.start_data_fetch()
"""
## combine initial and received
temp_l = select(p_l, :id, :barcode, :p => :o, :ptr)
recon_l = vcat(o_l, temp_l)
## following handles inventory lying in inventory-1 but received later
recon_l = remove_duplicates(recon_l, "o")
recon_l = combine_cr(recon_l, cr_l)
recon_l = combine_xin(recon_l, xin_l)
recon_l = combine_xout(recon_l, xout_l)
recon_l = combine_sold(recon_l, sold_l)
recon_l = combine_ret(recon_l, ret_l)
recon_l = combine_ar(recon_l, ar_l)
recon_l = combine_rr(recon_l, rr_l)
recon_l = leftjoin(recon_l, select(del_l, :id, :del), on = :id)
recon_l = leftjoin(recon_l, select(c_l, :id, :c), on = :id)
"""
# """ combine initial and received and call it opening(o) """
# self.p_l.rename(columns={'p': 'o'}, inplace=True)
# self.recon_l = remove_duplicates(self.o_l, f="o")
self.recon_l = self.o_l
# """ following handles inventory lying in inventory-1 but received later """
# self.recon_l = pd.concat([self.p_l, self.o_l], ignore_index=True)
# self.recon_l = remove_duplicates(self.recon_l, "o")
""" purchase """
self.recon_l = pd.merge(self.recon_l, self.p_l, on='id', how='outer')
self.take_union()
# """ purchase_return_deleted """
# self.recon_l = pd.merge(self.recon_l, self.prd_l, on='id', how='outer')
# self.take_union()
#
# """ purchase_return_settled """
# self.recon_l = pd.merge(self.recon_l, self.prs_l, on='id', how='outer')
# self.take_union()
self.recon_l['prd'] = 0
self.recon_l['prs'] = 0
"""combine_cr: following handles the case where inventory was stock transferred,
after the start time and returned before end time """
self.recon_l = pd.merge(self.recon_l, self.cr_l, on='id', how='outer')
self.take_union()
"""combine_xin: this will take care of stores own inventory coming back"""
self.recon_l = pd.merge(self.recon_l, self.xin_l, on='id', how='outer')
self.take_union()
"""combine_xout: this will take care of stores own inventory transferred out"""
self.recon_l = pd.merge(self.recon_l, self.xout_l, on='id', how='outer')
self.take_union()
"""combine_sold: this will take care of stores inventory sold """
self.recon_l = pd.merge(self.recon_l, self.sold_l, on='id', how='outer')
self.take_union()
"""combine_ret: this will take care of stores inventory returned """
self.recon_l = pd.merge(self.recon_l, self.ret_l, on='id', how='outer')
self.take_union()
"""combine_ar: """
self.recon_l = pd.merge(self.recon_l, self.ar_l, on='id', how='outer')
self.take_union()
"""combine_rr: """
self.recon_l = pd.merge(self.recon_l, self.rr_l, on='id', how='outer')
self.take_union()
""" deleted """
self.recon_l = pd.merge(self.recon_l, self.del_l, on='id', how='left')
self.take_union()
""" closing """
self.recon_l = pd.merge(self.recon_l, self.c_l, on='id', how='left')
self.take_union()
""" calculate the error """
self.recon_l = self.recon_l.fillna(0)
for col in ['id', 'o', 'p', 'prd', 'prs', 'cr', 'xin', 'xout', 'ret', 'sold', 'del', 'ar',
'rr', 'c', 'barcode']:
self.recon_l[col] = pd.to_numeric(self.recon_l[col])
self.recon_l[col] = self.recon_l[col].astype('int', errors='raise')
self.recon_l['e'] = self.recon_l['o'] + self.recon_l['p'] + self.recon_l['cr'] + \
self.recon_l['xin'] - \
self.recon_l['xout'] - \
self.recon_l['ret'] - self.recon_l['sold'] - self.recon_l['del'] + \
self.recon_l['ar'] + \
self.recon_l['rr'] - self.recon_l['c']
return self.recon_l | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/inventory/inventory_2.py | inventory_2.py |
import numpy as np
import datetime as dt
"""
Steps -
1. Get Auto short total time -> from creation to received at store
2. If marked as lost make it 7 days
3. Max LT capped at 7 days
"""
def lead_time(store_id, reset_date, db, schema, logger=None):
end_date = str((
dt.datetime.strptime(reset_date, '%Y-%m-%d') -
dt.timedelta(7)).date())
begin_date = str((
dt.datetime.strptime(reset_date, '%Y-%m-%d') -
dt.timedelta(97)).date())
logger.info("Lead Time Calculation Starts")
logger.info(f"SB Begin Date: {begin_date}, SB End Date: {end_date}")
lead_time_query = f"""
select "store-id" , "drug-id" , "type" , status , "created-to-delivery-hour" as "lt-hrs"
from "{schema}"."as-ms" am
where "as-ms" = 'AS'
and "store-id" = {store_id}
and date("created-at") <= '{end_date}'
and date("created-at") >= '{begin_date}'
and status not in ('failed', 'deleted')
"""
lead_time = db.get_df(lead_time_query)
lead_time.columns = [c.replace('-', '_') for c in lead_time.columns]
# classify all types into generic, ethical & others
lead_time["type"] = np.where(
lead_time["type"].isin(['ethical', 'high-value-ethical']), 'ethical',
lead_time["type"])
lead_time["type"] = np.where(lead_time["type"].isin(['ethical', 'generic']),
lead_time["type"], 'others')
lead_time["lt_days"] = lead_time["lt_hrs"] / 24
lead_time["lt_days"] = lead_time["lt_days"].fillna(7)
lead_time["lt_days"] = np.where(lead_time["lt_days"] > 7, 7, lead_time["lt_days"])
lt_store_mean = np.ceil(lead_time.lt_days.mean())
lt_store_std = round(lead_time.lt_days.std(ddof=0), 2)
lt_drug = lead_time.groupby('drug_id'). \
agg({'lt_days': [np.mean, np.std]}).reset_index()
lt_drug.columns = ['drug_id', 'lead_time_mean', 'lead_time_std']
lt_drug['lead_time_std'] = np.where(
lt_drug['lead_time_std'].isin([0, np.nan]),
lt_store_std, lt_drug['lead_time_std'])
lt_drug["lead_time_mean"] = np.ceil(lt_drug["lead_time_mean"])
lt_drug['lead_time_std'] = round(lt_drug['lead_time_std'], 2)
logger.info("Lead Time Calculation Completed")
return lt_drug, lt_store_mean, lt_store_std | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/lead_time.py | lead_time.py |
import numpy as np
def compare_df(df_pre, df_post, logger, cols_to_compare=None):
num_drugs = len(df_pre["drug_id"].unique())
if num_drugs != df_pre.shape[0]:
logger.info("WARNING: Duplicate drug entries present!")
if cols_to_compare is None:
cols_to_compare = ["safety_stock", "reorder_point", "order_upto_point"]
df_pre = df_pre[["drug_id"] + cols_to_compare]
df_post = df_post[["drug_id"] + cols_to_compare]
df_comb = df_pre.merge(df_post, on="drug_id", how="outer",
suffixes=('_pre', '_post'))
df_comb["changed"] = 'N'
for col in cols_to_compare:
df_comb["changed"] = np.where(
df_comb[col+str('_pre')] != df_comb[col+str('_post')],
'Y', df_comb["changed"])
drugs_corrected = list(df_comb.loc[df_comb["changed"] == 'Y']["drug_id"].unique())
return drugs_corrected
def add_correction_flag(df, corr_drug_list, corr_flag):
if "correction_flags" not in df.columns:
df["correction_flags"] = ""
df["correction_flags"] = df["correction_flags"].fillna("")
df["correction_flags"] = np.where(
(df["drug_id"].isin(corr_drug_list)) & (df["correction_flags"] != ""),
df["correction_flags"] + '-' + corr_flag, df["correction_flags"])
df["correction_flags"] = np.where(
(df["drug_id"].isin(corr_drug_list)) & (df["correction_flags"] == ""),
corr_flag, df["correction_flags"])
return df
def compare_df_comb(df_pre, df_post, logger, cols_to_compare=None):
num_drugs = len(df_pre["comb_id"].unique())
if num_drugs != df_pre.shape[0]:
logger.info("WARNING: Duplicate comb entries present!")
if cols_to_compare is None:
cols_to_compare = ["safety_stock", "reorder_point", "order_upto_point"]
df_pre = df_pre[["comb_id"] + cols_to_compare]
df_post = df_post[["comb_id"] + cols_to_compare]
df_comb = df_pre.merge(df_post, on="comb_id", how="outer",
suffixes=('_pre', '_post'))
df_comb["changed"] = 'N'
for col in cols_to_compare:
df_comb["changed"] = np.where(
df_comb[col+str('_pre')] != df_comb[col+str('_post')],
'Y', df_comb["changed"])
comb_corrected = list(df_comb.loc[df_comb["changed"] == 'Y']["comb_id"].unique())
return comb_corrected
def add_correction_flag_comb(df, corr_comb_list, corr_flag):
if "correction_flags" not in df.columns:
df["correction_flags"] = ""
df["correction_flags"] = df["correction_flags"].fillna("")
df["correction_flags"] = np.where(
(df["comb_id"].isin(corr_comb_list)) & (df["correction_flags"] != ""),
df["correction_flags"] + '-' + corr_flag, df["correction_flags"])
df["correction_flags"] = np.where(
(df["comb_id"].isin(corr_comb_list)) & (df["correction_flags"] == ""),
corr_flag, df["correction_flags"])
return df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/correction_flag.py | correction_flag.py |
platform_prod = 0
store_col = 'store_id'
drug_col = 'drug_id'
comb_col = 'comb_id'
date_col = 'date'
target_col = 'actual_demand'
key_col = 'ts_id'
eol_cutoff = 13
mature_cutoff = 52
forecast_horizon = 4
flag_weather = 0
flag_seasonality_index = {
'ctb': 0,
'lgbm': 0,
'xgb': 0
}
flag_sample_weights = {
'ctb': 0,
'lgbm': 0,
'xgb': 0
}
num_shift_lag = 3
lags = [1]
add_lags_diff_flag = 1
lags_diff = [(1, 2)]
add_monthly_lags_flag = 1
# monthly_lags = [1, 2, 3, 6, 12]
monthly_lags = [1, 2, 3, 6]
rolling_time_feat = {
'lags': [5, 13, 26, 53],
'agg_func_dict': {'min', 'max', 'mean', 'median', 'std'}
}
ewma_lags = [4, 8]
# trend_lags = [13, 26, 53]
trend_lags = [13, 26]
perc_noise = [0.2, 0.5, 0.1]
# fc_cols = ['preds_xgb_rf_target','preds_cb_target','preds_lgb','AE', 'croston_fcst']
# models = ['croston', 'prophet', 'ETS', 'MA', 'AE_ts', 'lgbm']
run_ml_flag = 1
runs_ts_flag = 1
run_ts_4w_agg_flag = 1
run_ml_4w_agg_flag = 0
models_un_agg = ['ETS_Auto', 'ETS_12w', 'MA', 'LGBM']
models_agg = ['ETS_4w_agg', 'LGBM_4w_agg']
local_testing = 0
models = ['LGBM']
default_model = 'LGBM'
# lgbm_params = {
# 'objective': 'regression_l1',
# 'learning_rate': 0.01,
# 'max_bin': 404,
# 'num_leaves': 1000,
# 'lambda_l1': 0.003657033571790936,
# 'lambda_l2': 1.203092568431234,
# 'cat_l2': 5.192935907692467,
# 'cat_smooth': 9.67794952387374,
# 'feature_fraction': 0.997997647335764,
# 'bagging_fraction': 0.9162909273820165,
# 'bagging_freq': 7,
# 'min_data_in_leaf': 33,
# 'min_child_samples': 5,
# 'metric': 'rmse',
# 'boosting_type': 'gbdt',
# 'max_depth': -1,
# 'random_state': 42,
# 'force_row_wise': True,
# 'verbose': -1,
# 'num_iterations': 1500
# }
# fc_cols = ['croston_fcst', 'ETS_fcst', 'ma_fcst','prophet_fcst', 'AE_ts_fcst']
# cols_rename = {
# 'preds_xgb_rf_target': 'fcst_1',
# 'preds_cb_target': 'fcst_2',
# 'preds_lgb':'fcst_3',
# 'AE':'fcst_4',
# 'croston_fcst':'fcst_5'
# }
# cols_rename = {
# 'croston_fcst': 'fcst_1',
# 'ETS_fcst': 'fcst_2',
# 'ma_fcst':'fcst_3',
# 'prophet_fcst':'fcst_4',
# 'AE_ts_fcst':'fcst_5'
# } | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/config_ipc_combination.py | config_ipc_combination.py |
import pandas as pd
import numpy as np
def post_processing(store_id, safety_stock_df, seg_df_comb_lvl, seg_df_drug_lvl,
schema, db, logger):
seg_df_comb_lvl[['store_id', 'comb_id']] = seg_df_comb_lvl['ts_id'].str.split('_', expand=True)
seg_df_drug_lvl[['store_id', 'drug_id']] = seg_df_drug_lvl['ts_id'].str.split('_', expand=True)
seg_df_drug_lvl['store_id'] = seg_df_drug_lvl['store_id'].astype(int)
seg_df_drug_lvl['drug_id'] = seg_df_drug_lvl['drug_id'].astype(int)
list_drugs = safety_stock_df['drug_id'].tolist()
str_drugs = str(list_drugs).replace('[', '(').replace(']', ')')
q_drug_info = f"""
select d.id as drug_id, "drug-name" as drug_name, type, composition
from "{schema}".drugs d
where d.id in {str_drugs}
"""
df_drug_info = db.get_df(q_drug_info)
# get store name
q_store_name = f""" select name from "{schema}".stores where id = {store_id} """
store_name = db.get_df(q_store_name)['name'][0]
# get current inventory and avg_ptr info
q_inv = f"""
select "drug-id" as drug_id, sum("locked-quantity"+quantity+
"locked-for-audit"+"locked-for-transfer"+"locked-for-check"+
"locked-for-return") as curr_inventory
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {str_drugs}
group by "drug-id"
"""
df_inv = db.get_df(q_inv)
q_avg_ptr_store = f"""
select "drug-id" as drug_id, avg(ptr) as avg_ptr
from "{schema}"."inventory-1" i
where "store-id" = {store_id}
and "drug-id" in {str_drugs}
and DATEDIFF(day, date("created-at"), current_date) < 365
group by "drug-id"
"""
df_avg_ptr_store = db.get_df(q_avg_ptr_store)
q_avg_ptr_sys = f"""
select "drug-id" as drug_id, "avg-ptr" as avg_ptr_sys
from "{schema}"."drug-std-info" dsi
"""
df_avg_ptr_sys = db.get_df(q_avg_ptr_sys)
# add all to ss table
safety_stock_df['store_id'] = store_id
safety_stock_df['store_name'] = store_name
safety_stock_df = safety_stock_df.merge(
df_drug_info, on='drug_id', how='left')
safety_stock_df = safety_stock_df.merge(
df_inv, on='drug_id', how='left')
safety_stock_df = safety_stock_df.merge(
df_avg_ptr_store, on='drug_id', how='left')
safety_stock_df = safety_stock_df.merge(
df_avg_ptr_sys, on='drug_id', how='left')
# replace NA in avg_ptr with system-avg_ptr
safety_stock_df["avg_ptr"] = np.where(safety_stock_df["avg_ptr"].isna(),
safety_stock_df["avg_ptr_sys"],
safety_stock_df["avg_ptr"])
safety_stock_df.drop("avg_ptr_sys", axis=1, inplace=True)
safety_stock_df["avg_ptr"] = safety_stock_df["avg_ptr"].astype(float)
# calculate DOH
safety_stock_df['fcst'] = safety_stock_df['fcst'].fillna(0)
safety_stock_df['safety_stock_days'] = np.where(
(safety_stock_df['fcst'] == 0) | (safety_stock_df['safety_stock'] == 0),
0, safety_stock_df['safety_stock'] / (safety_stock_df['fcst'] / 28))
safety_stock_df['reorder_days'] = np.where(
(safety_stock_df['fcst'] == 0) | (safety_stock_df['reorder_point'] == 0),
0, safety_stock_df['reorder_point'] / (safety_stock_df['fcst'] / 28))
safety_stock_df['order_upto_days'] = np.where(
(safety_stock_df['fcst'] == 0) | (safety_stock_df['order_upto_point'] == 0),
0, safety_stock_df['order_upto_point'] / (safety_stock_df['fcst'] / 28))
# calculate max-value, to-order-qty and to-order-val
safety_stock_df["max_value"] = safety_stock_df['order_upto_point'] * \
safety_stock_df['avg_ptr']
safety_stock_df['curr_inventory'] = safety_stock_df['curr_inventory'].fillna(0)
safety_stock_df['to_order_quantity'] = np.where(
safety_stock_df['curr_inventory'] <= safety_stock_df['reorder_point'],
safety_stock_df['order_upto_point'] - safety_stock_df['curr_inventory'],
0)
safety_stock_df['to_order_value'] = safety_stock_df['to_order_quantity'] * \
safety_stock_df['avg_ptr']
safety_stock_df = safety_stock_df[[
'store_id', 'store_name', 'comb_id', 'fcst_source', 'map_type', 'fcst_wt',
'bucket', 'model', 'drug_id', 'drug_name', 'type', 'composition',
'fcst', 'std', 'lead_time_mean', 'lead_time_std',
'safety_stock', 'reorder_point', 'order_upto_point', 'correction_flags',
'curr_inventory', 'avg_ptr', 'safety_stock_days', 'reorder_days',
'order_upto_days', 'max_value', 'to_order_quantity', 'to_order_value']]
# formatting segmentation table
seg_df_comb_lvl.rename(columns={'std': 'sales_std', 'cov': 'sales_cov',
'Mixed': 'bucket', 'Group': 'group',
'PLC Status L1': 'plc_status', 'ADI': 'adi',
'total_LY_sales': 'total_ly_sales',
'start_date': 'sale_start_date'}, inplace=True)
seg_df_comb_lvl['sale_start_date'] = seg_df_comb_lvl['sale_start_date'].dt.date
seg_df_comb_lvl['store_name'] = store_name
seg_df_drug_lvl.rename(columns={'std': 'sales_std', 'cov': 'sales_cov',
'Mixed': 'bucket', 'Group': 'group',
'PLC Status L1': 'plc_status', 'ADI': 'adi',
'total_LY_sales': 'total_ly_sales',
'start_date': 'sale_start_date'},
inplace=True)
seg_df_drug_lvl['sale_start_date'] = seg_df_drug_lvl[
'sale_start_date'].dt.date
seg_df_drug_lvl['store_name'] = store_name
return safety_stock_df, seg_df_comb_lvl, seg_df_drug_lvl | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/post_processing.py | post_processing.py |
import pandas as pd
import numpy as np
from zeno_etl_libs.utils.ipc_pmf.lead_time import lead_time
from zeno_etl_libs.utils.ipc_pmf.heuristics.sl_heuristics import calculate_ss
from zeno_etl_libs.utils.ipc_pmf.heuristics.ss_doh_heuristics import ss_doh_min_cap, ss_doh_max_cap
from zeno_etl_libs.utils.ipc2.helpers.correction_flag import compare_df, \
add_correction_flag
from zeno_etl_libs.utils.ipc2.heuristics.ipcv4_heuristics import v4_corrections
def safety_stock_calc(df_fcst_drug, store_id, reset_date,
v4_active_flag, drug_type_list_v4,
drug_sales_latest_12w, schema, db, logger):
fcst_weeks = 4
order_freq = 2
# ========================= LEAD TIME CALCULATIONS =========================
lt_drug, lt_store_mean, lt_store_std = lead_time(
store_id, reset_date, db, schema, logger)
safety_stock_df = df_fcst_drug.merge(
lt_drug[['drug_id', 'lead_time_mean', 'lead_time_std']],
how='left', on='drug_id')
safety_stock_df['lead_time_mean'].fillna(lt_store_mean, inplace=True)
safety_stock_df['lead_time_std'].fillna(lt_store_std, inplace=True)
# ==================== SS, ROP, OUP CALCULATION BEGINS =====================
# impute store_std for cases where store-drug std<1
safety_stock_df['lead_time_std'] = np.where(
safety_stock_df['lead_time_std'] < 1,
lt_store_std, safety_stock_df['lead_time_std'])
# calculate SS
safety_stock_df = calculate_ss(safety_stock_df, fcst_weeks, logger)
# MIN-SS-DOH CAPPING
logger.info(f"DOH1 Correction starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = ss_doh_min_cap(safety_stock_df)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst,
'DOH1')
# MAX-SS-DOH CAPPING
logger.info(f"DOH2 Correction starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = ss_doh_max_cap(safety_stock_df)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum SS before: {df_pre_corr['safety_stock'].sum()}")
logger.info(f"Sum SS after: {df_post_corr['safety_stock'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['safety_stock'])
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst,
'DOH2')
# calculate ROP - add lead time demand to SS
safety_stock_df['reorder_point'] = safety_stock_df.apply(
lambda row: np.round(
row['lead_time_mean'] * row['fcst'] / fcst_weeks / 7),
axis=1) + safety_stock_df['safety_stock']
# calculate OUP - add order_freq demand to ROP
safety_stock_df['order_upto_point'] = (
safety_stock_df['reorder_point'] +
np.round(
np.where(
# if rounding off give 0, increase it to 4-week forecast
(safety_stock_df['reorder_point'] +
safety_stock_df[
'fcst'] * order_freq / fcst_weeks / 7 < 0.5) &
(safety_stock_df['fcst'] > 0),
safety_stock_df['fcst'],
safety_stock_df['fcst'] * order_freq / fcst_weeks / 7))
)
# ========== CORRECTION PLUGINS (REWORK SS,ROP,OUP BASED ON REQ) ===========
final_ss_df = safety_stock_df.copy()
if v4_active_flag == 'Y':
logger.info("IPC V4 Correction starts")
df_pre_corr = final_ss_df.copy()
final_ss_df = v4_corrections(final_ss_df, drug_type_list_v4, db, schema)
df_post_corr = final_ss_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'V4')
# correct cases where ROP=OUP
df_pre_corr = final_ss_df.copy()
final_ss_df['order_upto_point'] = np.where(
((final_ss_df['order_upto_point'] > 0) &
(final_ss_df['reorder_point'] == final_ss_df['order_upto_point'])),
final_ss_df['reorder_point'] + 1, final_ss_df['order_upto_point'])
df_post_corr = final_ss_df.copy()
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'OUP_CORR1')
# OUP < STD Qty, STD Qty correction
df_pre_corr = final_ss_df.copy()
final_ss_df = std_qty_oup(final_ss_df, schema, db)
df_post_corr = final_ss_df.copy()
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'STD_CORR')
# Min OUP=2 for recently sold
df_pre_corr = final_ss_df.copy()
final_ss_df = min_oup_recency(final_ss_df, drug_sales_latest_12w)
df_post_corr = final_ss_df.copy()
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
final_ss_df = add_correction_flag(final_ss_df, corr_drug_lst, 'OUP_CORR2')
return final_ss_df
def std_qty_oup(df, schema, db):
list_drugs = df['drug_id'].tolist()
if list_drugs:
str_drugs = str(list_drugs).replace('[', '(').replace(']', ')')
else:
str_drugs = '(0)'
q_std_qty = f"""
select "drug-id" , "std-qty"
from "{schema}"."drug-std-info" dsi
where "drug-id" in {str_drugs}
"""
df_std_qty = db.get_df(q_std_qty)
df_std_qty.columns = [c.replace('-', '_') for c in df_std_qty.columns]
df = df.merge(df_std_qty, on='drug_id', how='left')
df['std_qty'] = df['std_qty'].fillna(1)
df_to_corr = df.loc[df['order_upto_point'] < df['std_qty']]
df_not_to_corr = df.loc[~(df['order_upto_point'] < df['std_qty'])]
df_to_corr['order_upto_point'] = df_to_corr['std_qty']
df_to_corr['reorder_point'] = np.where(df_to_corr['order_upto_point'] > 1, round(df_to_corr['std_qty'] / 2), 0)
df = df_not_to_corr.append(df_to_corr)
df.drop('std_qty', axis=1, inplace=True)
return df
def min_oup_recency(df, drug_sales_latest_12w):
drug_sales_latest_12w['latest_28d_demand'] = np.round(
drug_sales_latest_12w['actual_demand'] / 3)
df = df.merge(drug_sales_latest_12w[['drug_id', 'latest_28d_demand']],
on='drug_id', how='left')
df_to_corr = df.loc[(df['order_upto_point'] == 1) & (df['fcst'] > 0) &
(df['latest_28d_demand'] > 0)]
df_not_to_corr = df.loc[~((df['order_upto_point'] == 1) & (df['fcst'] > 0) &
(df['latest_28d_demand'] > 0))]
df_to_corr['order_upto_point'] = 2
df_to_corr['reorder_point'] = 1
df = df_not_to_corr.append(df_to_corr)
df.drop('latest_28d_demand', axis=1, inplace=True)
return df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/safety_stock.py | safety_stock.py |
import pandas as pd
import numpy as np
import datetime as dt
import time
from dateutil.relativedelta import relativedelta
from zeno_etl_libs.utils.ipc2.engine.data_load import LoadData
from zeno_etl_libs.utils.ipc2.engine.forecast import Forecast
from zeno_etl_libs.utils.ipc2.engine.feat_engg import Feature_Engg
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.engine.data_pre_process import PreprocessData
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.engine.segmentation import Segmentation
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.engine.ts_fcst import TS_forecast
from zeno_etl_libs.utils.ipc_pmf.config_ipc_combination import *
def ipc_comb_forecast(store_id, reset_date, type_list, schema, db, logger):
store_id_list = ("({})").format(store_id) # for sql pass
last_date = dt.date(day=1, month=4, year=2019) # max history
# define empty variables in case of fail
weekly_fcst = pd.DataFrame()
ts_fcst = pd.DataFrame()
ts_fcst_cols = []
logger.info("Data Loading Started...")
data_load_obj = LoadData()
(
drug_list,
sales_history,
cfr_pr,
calendar,
first_bill_date
) = data_load_obj.load_all_input(
type_list=type_list,
store_id_list=store_id_list,
last_date=last_date,
reset_date=reset_date,
schema=schema,
db=db
)
# consider only drugs in specified type
drug_list = drug_list["drug_id"].unique().tolist()
sales_history = sales_history.loc[sales_history[drug_col].isin(drug_list)]
cfr_pr = cfr_pr.loc[cfr_pr[drug_col].isin(drug_list)]
# ========================================================================
# AGGREGATE DRUG-LEVEL DEMAND TO COMBINATION LEVEL DEMAND
# ========================================================================
sales_history, cfr_pr, comb_list = drugs_to_comb_gps(sales_history, cfr_pr,
schema, db)
# ========================================================================
logger.info("Data Pre Processing Started...")
data_prep_obj = PreprocessData()
(
comb_sales_4w_wtd,
comb_sales_latest_12w,
train_4w_agg_vald_max_date,
sales_pred_4w_agg_vald,
train_vald_max_date,
sales_pred_vald,
sales_4w_agg,
sales_pred_4w_agg,
sales,
sales_pred,
cal_sales,
sales_daily
) = data_prep_obj.preprocess_all(
sales=sales_history,
comb_list=comb_list,
cfr_pr=cfr_pr,
calendar=calendar,
first_bill_date=first_bill_date,
last_date=last_date
)
train_max_date = sales[date_col].max()
end_date = sales_pred[date_col].max()
logger.info("Segmentation Started...")
seg_obj = Segmentation()
seg_df = seg_obj.get_weekly_segmentation(
df=sales.copy(deep=True),
df_sales_daily=sales_daily.copy(deep=True),
train_max_date=train_max_date,
end_date=end_date
)
seg_df['reset_date'] = str(reset_date)
# ========================================================================
# VALIDATION AND BEST MODEL FOR BUCKET SELECTION
# ========================================================================
# Find validation period actual demand
valid_start_date = train_max_date - relativedelta(weeks=4)
valid_period_demand = sales.loc[sales[date_col] > valid_start_date]
valid_period_demand = valid_period_demand.groupby(key_col, as_index=False).agg({target_col: "sum"})
min_history_date_validation = valid_start_date - relativedelta(weeks=4)
df_min_date = sales_pred_vald.groupby(key_col, as_index=False).agg({date_col: 'min'})
df_min_date['min_allowed_date'] = min_history_date_validation
ts_ids_to_drop = df_min_date.loc[df_min_date['min_allowed_date']<df_min_date[date_col]][key_col].tolist()
# Perform Un-Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred_vald, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
merged_df1 = merged_df1[~merged_df1['ts_id'].isin(ts_ids_to_drop)]
# calculate bucket wise wmape
valid_wmape = {'Model': []}
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
valid_wmape[bucket] = []
if runs_ts_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast(
df=merged_df1.copy(),
train_max_date=train_vald_max_date,
forecast_start=train_vald_max_date + relativedelta(weeks=1))
for model in ts_fcst_cols:
df_model = ts_fcst[[key_col, 'Mixed', model]]
df_model = df_model.groupby(key_col, as_index=False).agg({'Mixed': 'first', model: 'sum'})
df_model = df_model.merge(valid_period_demand, on=key_col, how='left')
df_model['error'] = df_model[model] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg({'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error']/df_bucket_wmape[target_col]
valid_wmape['Model'].append(model)
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket]['wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
if run_ml_flag == 1:
forecast_start = train_vald_max_date + relativedelta(weeks=1)
weekly_fcst = run_LGBM(merged_df1, train_vald_max_date, forecast_start, logger, is_validation=True)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg({'preds_lgb': 'sum'})
df_model = lgbm_fcst.merge(valid_period_demand, on=key_col, how='left')
df_model = df_model.merge(seg_df[[key_col, 'Mixed']], how='left', on='ts_id')
df_model['error'] = df_model['preds_lgb'] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / df_bucket_wmape[target_col]
valid_wmape['Model'].append('LGBM')
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket]['wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
# Perform Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred_4w_agg_vald, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
merged_df1 = merged_df1[~merged_df1['ts_id'].isin(ts_ids_to_drop)]
if run_ts_4w_agg_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast_agg(
df=merged_df1.copy(),
train_max_date=train_4w_agg_vald_max_date,
forecast_start=train_4w_agg_vald_max_date + relativedelta(weeks=1))
for model in ts_fcst_cols:
df_model = ts_fcst[[key_col, 'Mixed', model]]
df_model = df_model.groupby(key_col, as_index=False).agg(
{'Mixed': 'first', model: 'sum'})
df_model = df_model.merge(valid_period_demand, on=key_col,
how='left')
df_model['error'] = df_model[model] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / df_bucket_wmape[target_col]
valid_wmape['Model'].append(model)
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket]['wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
if run_ml_4w_agg_flag == 1:
forecast_start = train_4w_agg_vald_max_date + relativedelta(weeks=4)
weekly_fcst = run_LGBM(merged_df1, train_4w_agg_vald_max_date, forecast_start,
logger, is_validation=True, agg_4w=True)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg(
{'preds_lgb': 'sum'})
df_model = lgbm_fcst.merge(valid_period_demand, on=key_col, how='left')
df_model = df_model.merge(seg_df[[key_col, 'Mixed']], how='left',
on='ts_id')
df_model['error'] = df_model['preds_lgb'] - df_model[target_col]
df_model['abs_error'] = abs(df_model['error'])
df_bucket_wmape = df_model.groupby('Mixed', as_index=False).agg(
{'abs_error': 'sum', target_col: 'sum'})
df_bucket_wmape['wmape'] = df_bucket_wmape['abs_error'] / \
df_bucket_wmape[target_col]
valid_wmape['Model'].append('LGBM_4w_agg')
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
try:
wmape = df_bucket_wmape.loc[df_bucket_wmape['Mixed'] == bucket][
'wmape'].values[0]
except:
wmape = np.inf
valid_wmape[bucket].append(wmape)
# ========================================================================
# Choose best model based on lowest wmape
# ========================================================================
best_bucket_model = {}
for bucket in ['AW', 'AX', 'AY', 'AZ', 'BW', 'BX', 'BY', 'BZ',
'CW', 'CX', 'CY', 'CZ', 'DW', 'DX', 'DY', 'DZ']:
min_wmape = min(valid_wmape[bucket])
if min_wmape != np.inf:
best_bucket_model[bucket] = valid_wmape['Model'][valid_wmape[bucket].index(min_wmape)]
else:
best_bucket_model[bucket] = default_model # default
# ========================================================================
# TRAINING AND FINAL FORECAST
# ========================================================================
# Perform Un-Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
if runs_ts_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast(
df=merged_df1.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
final_fcst = pd.DataFrame()
for model_fcst in ts_fcst_cols:
df_model_fcst = ts_fcst.groupby(key_col, as_index=False).agg({model_fcst: 'sum'})
df_model_fcst.rename({model_fcst: 'fcst'}, axis=1, inplace=True)
df_model_fcst['model'] = model_fcst
final_fcst = final_fcst.append(df_model_fcst)
if run_ml_flag == 1:
forecast_start = train_max_date + relativedelta(weeks=2)
weekly_fcst = run_LGBM(merged_df1, train_max_date, forecast_start, logger, is_validation=False)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg({'preds_lgb': 'sum'})
lgbm_fcst.rename({'preds_lgb': 'fcst'}, axis=1, inplace=True)
lgbm_fcst['model'] = 'LGBM'
final_fcst = final_fcst.append(lgbm_fcst)
# Perform Aggregated TS Forecast
merged_df1 = pd.merge(sales_pred_4w_agg, seg_df, how='left', on=['ts_id'])
merged_df1 = merged_df1[merged_df1['PLC Status L1'].isin(['Mature', 'New Product'])]
if run_ts_4w_agg_flag == 1:
ts_fcst_obj = TS_forecast()
ts_fcst, ts_fcst_cols = ts_fcst_obj.apply_ts_forecast_agg(
df=merged_df1.copy(),
train_max_date=train_max_date,
forecast_start=train_max_date + relativedelta(weeks=2))
for model_fcst in ts_fcst_cols:
df_model_fcst = ts_fcst.groupby(key_col, as_index=False).agg(
{model_fcst: 'sum'})
df_model_fcst.rename({model_fcst: 'fcst'}, axis=1, inplace=True)
df_model_fcst['model'] = model_fcst
final_fcst = final_fcst.append(df_model_fcst)
if run_ml_4w_agg_flag == 1:
forecast_start = train_max_date + relativedelta(weeks=2)
weekly_fcst = run_LGBM(merged_df1, train_max_date, forecast_start,
logger, is_validation=False, agg_4w=True)
lgbm_fcst = weekly_fcst.groupby(key_col, as_index=False).agg(
{'preds_lgb': 'sum'})
lgbm_fcst.rename({'preds_lgb': 'fcst'}, axis=1, inplace=True)
lgbm_fcst['model'] = 'LGBM_4w_agg'
final_fcst = final_fcst.append(lgbm_fcst)
final_fcst = final_fcst.merge(seg_df[[key_col, 'Mixed']], on=key_col, how='left')
final_fcst.rename({'Mixed': 'bucket'}, axis=1, inplace=True)
# Choose buckets forecast of best models as final forecast
final_selected_fcst = pd.DataFrame()
for bucket in best_bucket_model.keys():
df_selected = final_fcst.loc[(final_fcst['bucket'] == bucket) &
(final_fcst['model'] == best_bucket_model[bucket])]
final_selected_fcst = final_selected_fcst.append(df_selected)
# add comb rejected due to recent sales
list_all_comb = final_fcst[key_col].unique().tolist()
list_all_final_comb = final_selected_fcst[key_col].unique().tolist()
list_comb_rejects = list(set(list_all_comb)-set(list_all_final_comb))
comb_fcst_to_add = final_fcst.loc[(final_fcst[key_col].isin(list_comb_rejects))
& (final_fcst["model"] == default_model)]
final_selected_fcst = final_selected_fcst.append(comb_fcst_to_add)
final_selected_fcst = final_selected_fcst.merge(seg_df[[key_col, 'std']],
on=key_col, how='left')
final_selected_fcst[[store_col, comb_col]] = final_selected_fcst['ts_id'].str.split('_', expand=True)
final_selected_fcst[store_col] = final_selected_fcst[store_col].astype(int)
model_name_map = {'preds_ETS_12w': 'ETS_12w', 'preds_ma': 'MA',
'preds_ETS_auto': 'ETS_auto',
'preds_ETS_4w_auto': 'ETS_4w_auto'}
final_selected_fcst["model"] = final_selected_fcst["model"].map(
model_name_map).fillna(final_selected_fcst["model"])
return final_selected_fcst, seg_df, comb_sales_latest_12w, comb_sales_4w_wtd
def run_LGBM(merged_df1, train_max_date, forecast_start, logger, is_validation=False, agg_4w=False):
start_time = time.time()
merged_df1['All'] = 'All'
slice_col = 'All'
forecast_volume = merged_df1[merged_df1[date_col] > train_max_date][
target_col].sum()
assert forecast_volume == 0
logger.info(
"forecast start {} total volume: {}".format(forecast_start,
forecast_volume)
)
forecast_df = pd.DataFrame()
validation_df = pd.DataFrame()
weekly_fcst = pd.DataFrame()
if agg_4w:
end_range = 2
else:
end_range = 5
for i in range(1, end_range):
if is_validation:
num_shift_lags = i
else:
num_shift_lags = i + 1
# for group_name in merged_df1[slice_col].dropna.unique():
# slice_col = 'Mixed'
# for groups in [['AW', 'BW', 'CW', 'DW'], ['AX', 'BX', 'CX', 'DX'], ['AY', 'BY', 'CY', 'DY'], ['AZ', 'BZ', 'CZ', 'DZ']]:
for groups in ['All']:
logger.info('Group: {}'.format(groups))
logger.info("Feature Engineering Started...")
feat_df = pd.DataFrame()
for one_df in [merged_df1]:
feat_engg_obj = Feature_Engg()
one_feat_df = feat_engg_obj.feat_agg(
one_df[
one_df[slice_col] == groups
].drop(slice_col, axis=1).copy(deep=True),
train_max_date=train_max_date,
num_shift_lag=num_shift_lags
)
feat_df = pd.concat([one_feat_df, feat_df])
if pd.DataFrame(feat_df).empty:
continue
logger.info(
"Forecasting Started for {}...".format(forecast_start))
forecast_obj = Forecast()
fcst_df, val_df, Feature_Imp_all = forecast_obj.get_STM_forecast(
feat_df.copy(deep=True),
forecast_start=forecast_start,
num_shift_lags=num_shift_lags
)
forecast_df = pd.concat([forecast_df, fcst_df], axis=0)
validation_df = pd.concat([validation_df, val_df])
ml_fc_cols = [i for i in forecast_df.columns if
i.startswith('preds_')]
# forecast_df['AE'] = forecast_df[ml_fc_cols].mean(axis=1)
end_time = time.time()
logger.info(
"total time for {} forecast: {}"
.format(forecast_start, end_time - start_time)
)
forecast_start = forecast_start + relativedelta(weeks=1)
# weekly_fcst = pd.concat([weekly_fcst, forecast_df])
return forecast_df
def drugs_to_comb_gps(sales_history, cfr_pr, schema, db):
"""map all drugs to its combination groups"""
q_drug_comp_hash = f"""
select "drug-id" as drug_id, "group" as comb_id
from "{schema}"."drug-substitution-mapping" dsm
"""
df_drug_comb = db.get_df(q_drug_comp_hash)
sales_history = sales_history.merge(df_drug_comb, on="drug_id", how="left")
sales_history = sales_history.groupby(["store_id", "comb_id", "sales_date"],
as_index=False).agg({"net_sales_quantity": "sum"})
cfr_pr = cfr_pr.merge(df_drug_comb, on="drug_id", how="left")
cfr_pr = cfr_pr.groupby(["store_id", "comb_id", "shortbook_date"],
as_index=False).agg({"loss_quantity": "sum"})
comb_list = df_drug_comb[comb_col].unique().tolist()
return sales_history, cfr_pr, comb_list | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_combination_fcst/forecast_main.py | forecast_main.py |
import pandas as pd
import numpy as np
import datetime as dt
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
from zeno_etl_libs.utils.ipc_pmf.config_ipc_combination import *
def fcst_comb_drug_map(store_id, reset_date, comb_fcst_df, drug_fcst_df,
type_list_comb_lvl, schema, db, logger):
# Round off forecast values
comb_fcst_df['fcst'] = np.round(comb_fcst_df['fcst'])
comb_fcst_df = comb_fcst_df.loc[comb_fcst_df['fcst'] > 0]
# Read assortment from GSheet
gs = GoogleSheet()
spreadsheet_id = "1tFHCTr3CHdb0UOFseK_ntjAUJSHQHcjLmysPPCWRM04"
ast_data = gs.download(data={
"spreadsheet_id": spreadsheet_id,
"sheet_name": "Sheet1",
"listedFields": []})
df_assortment = pd.DataFrame(ast_data)
df_assortment.columns = [c.replace('-', '_') for c in df_assortment.columns]
df_assortment[drug_col] = df_assortment[drug_col].astype(int)
df_assortment['drug_name'] = df_assortment['drug_name'].astype(str)
df_assortment['count'] = 1
df_assortment_comb = df_assortment.loc[df_assortment['type'].isin(type_list_comb_lvl)]
# Read combinations and corresponding composition
list_unq_comb = comb_fcst_df[comb_col].tolist()
if list_unq_comb:
str_unq_comb = str(list_unq_comb).replace('[', '(').replace(']', ')')
else:
str_unq_comb = '(0)'
q_comb_drug = f"""
select dsm."drug-id" as drug_id, dsm."group" as comb_id,
d."drug-name" as drug_name, d.type, d.composition
from "{schema}"."drug-substitution-mapping" dsm
left join "{schema}".drugs d on dsm."drug-id" = d.id
where dsm."group" in {str_unq_comb}
"""
df_comb_drug = db.get_df(q_comb_drug)
# Get all mapping cases
merge_comb_drug = df_comb_drug[[drug_col, comb_col]].merge(
df_assortment_comb[[drug_col, 'drug_name', 'count']], on=drug_col, how="outer")
count_comp_drugs = merge_comb_drug.groupby(comb_col, as_index=False).agg({'count': 'sum'})
list_comb_one_one = count_comp_drugs[count_comp_drugs['count'] == 1][comb_col].tolist()
list_comb_one_many = count_comp_drugs[count_comp_drugs['count'] > 1][comb_col].tolist()
list_comb_one_none = count_comp_drugs[count_comp_drugs['count'] == 0][comb_col].tolist()
# Allocate forecast to drugs
df_assortment_merge = df_assortment_comb.merge(df_comb_drug, on=drug_col, how='left')
list_drug_with_comb_fcst = df_assortment_merge.loc[df_assortment_merge[comb_col].notna()][drug_col].tolist()
df_all_comb = comb_fcst_df.merge(df_assortment_merge, on=comb_col, how='left')
df_all_comb = df_all_comb[[store_col, comb_col, 'bucket', 'model', drug_col, 'fcst', 'std', 'correction_flags']]
df_fcst_final = pd.DataFrame()
# Case 1: One-One, direct assign
df_temp = df_all_comb.loc[df_all_comb[comb_col].isin(list_comb_one_one)]
df_temp['fcst_wt'] = 1
df_temp['map_type'] = 'one-one'
df_temp['fcst_source'] = 'combination_fcst'
df_fcst_final = df_fcst_final.append(df_temp)
df_one_one = df_temp.copy()
df_one_one = df_one_one.merge(df_assortment_comb, on=drug_col, how='left')
df_one_one.drop('count', axis=1, inplace=True)
# Case 2: One-Many, assign based on past month sales contribution
df_temp = df_all_comb.loc[df_all_comb[comb_col].isin(list_comb_one_many)]
df_temp = drug_sales_multiplier(df_temp, store_id, reset_date, schema, db)
df_one_many = df_temp.copy()
df_temp.drop(['sales_90d', 'comb_sales_90d'], axis=1, inplace=True)
df_temp['fcst'] = df_temp['fcst'] * df_temp['fcst_wt']
df_temp['std'] = df_temp['std'] * df_temp['fcst_wt']
df_temp['map_type'] = 'one-many'
df_temp['fcst_source'] = 'combination_fcst'
df_fcst_final = df_fcst_final.append(df_temp)
df_one_many = df_one_many.merge(df_assortment_comb, on=drug_col, how='left')
df_one_many.drop('count', axis=1, inplace=True)
# Case 3: One-None, to send
df_one_none = df_all_comb.loc[df_all_comb[comb_col].isin(list_comb_one_none)]
df_one_none.drop(drug_col, axis=1, inplace=True)
df_one_none = df_one_none.merge(df_comb_drug, on=comb_col, how='left')
df_one_none = drug_sales_multiplier(df_one_none, store_id, reset_date, schema, db)
df_one_none = df_one_none.loc[df_one_none['sales_90d'] > 0]
df_one_none.drop('fcst_wt', axis=1, inplace=True)
# Case 4: No Comb - Drugs, to map with drug-level-fcst
df_none_one = df_assortment.loc[~df_assortment[drug_col].isin(list_drug_with_comb_fcst)]
df_none_one.drop('count', axis=1, inplace=True)
# get drug-combination groups
list_drugs = df_none_one[drug_col].tolist()
if list_drugs:
str_list_drugs = str(list_drugs).replace('[', '(').replace(']', ')')
else:
str_list_drugs = '(0)'
q_comb_drug = f"""
select dsm."drug-id" as drug_id, dsm."group" as comb_id,
d."drug-name" as drug_name, d.type, d.composition
from "{schema}"."drug-substitution-mapping" dsm
left join "{schema}".drugs d on dsm."drug-id" = d.id
where dsm."drug-id" in {str_list_drugs}
"""
df_comb_drug = db.get_df(q_comb_drug)
drug_fcst_df.drop(key_col, axis=1, inplace=True)
df_fcst_drug_level_merge = drug_fcst_df.merge(df_none_one[[drug_col]],
on=drug_col, how='inner')
df_fcst_drug_level_merge = df_fcst_drug_level_merge.merge(
df_comb_drug, on=drug_col, how='left')
df_fcst_drug_level_merge['fcst_source'] = 'drug_fcst'
df_fcst_drug_level_merge['fcst_wt'] = np.nan
df_fcst_drug_level_merge['map_type'] = np.nan
df_fcst_final = df_fcst_final.append(df_fcst_drug_level_merge[df_fcst_final.columns])
# filter only drugs without combination
drugs_with_comb = df_comb_drug[drug_col].tolist()
df_none_one = df_none_one.loc[~df_none_one[drug_col].isin(drugs_with_comb)]
# Append reject cases
forecasted_drugs = df_fcst_final[drug_col].tolist()
assortment_drugs = df_assortment[drug_col].tolist()
reject_drugs = list(set(forecasted_drugs) ^ set(assortment_drugs))
df_reject_cases = df_assortment.loc[df_assortment[drug_col].isin(reject_drugs)][[drug_col]]
df_reject_cases[store_col] = store_id
df_reject_cases['bucket'] = 'NA'
df_reject_cases['model'] = 'NA'
df_reject_cases['fcst'] = 0
df_reject_cases['std'] = 0
df_reject_cases['fcst_wt'] = np.nan
df_reject_cases['map_type'] = np.nan
df_reject_cases['fcst_source'] = np.nan
df_reject_cases['correction_flags'] = ""
df_reject_cases = df_reject_cases.merge(df_comb_drug, on=drug_col, how='left')
df_fcst_final = df_fcst_final.append(df_reject_cases[df_fcst_final.columns])
# Round off forecast values
df_fcst_final['fcst'] = df_fcst_final['fcst'].astype(float)
df_fcst_final['fcst'] = np.round(df_fcst_final['fcst'])
return df_fcst_final, df_one_one, df_one_many, df_one_none, df_none_one
def drug_sales_multiplier(df, store_id, reset_date, schema, db):
list_drugs = df[drug_col].tolist()
if list_drugs:
str_drugs = str(list_drugs).replace('[', '(').replace(']', ')')
else:
str_drugs = '(0)'
sales_start = (dt.datetime.strptime(reset_date, '%Y-%m-%d').date() -
dt.timedelta(days=90)).strftime('%Y-%m-%d')
q_drug_sales = f"""
select "drug-id" , sum("net-quantity") as sales_90d
from "{schema}".sales s
where "store-id" = {store_id}
and date("created-at") >= '{sales_start}'
and date("created-at") < '{reset_date}'
and "drug-id" in {str_drugs}
group by "drug-id"
"""
df_drug_sales = db.get_df(q_drug_sales)
df_drug_sales.columns = [c.replace('-', '_') for c in df_drug_sales.columns]
df = df.merge(df_drug_sales, on=drug_col, how='left')
df['sales_90d'] = df['sales_90d'].fillna(0)
df_comb_sales_sum = df.groupby(comb_col, as_index=False).agg({'sales_90d': 'sum'})
df_comb_sales_sum.rename({'sales_90d': 'comb_sales_90d'}, axis=1, inplace=True)
df = df.merge(df_comb_sales_sum, on=comb_col, how='left')
df['fcst_wt'] = df['sales_90d']/df['comb_sales_90d']
df['fcst_wt'] = df['fcst_wt'].fillna(0)
# assign equal split for combination with 0 sales
zero_sales_comb = df_comb_sales_sum.loc[
df_comb_sales_sum['comb_sales_90d'] == 0][comb_col].tolist()
df_comb_equal_split_cases = df.groupby(comb_col, as_index=False).agg({drug_col: 'count'})
df_comb_equal_split_cases.rename({drug_col: 'count'}, axis=1, inplace=True)
df_comb_equal_split_cases = df_comb_equal_split_cases.loc[df_comb_equal_split_cases[comb_col].isin(zero_sales_comb)]
df_comb_equal_split_cases['equal_split_wt'] = 1/df_comb_equal_split_cases['count']
df = df.merge(df_comb_equal_split_cases, on=comb_col, how='left')
df['fcst_wt'] = np.where(df['equal_split_wt'].isna(), df['fcst_wt'], df['equal_split_wt'])
df.drop(['count', 'equal_split_wt'], axis=1, inplace=True)
return df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_combination_fcst/fcst_mapping.py | fcst_mapping.py |
import numpy as np
np.random.seed(0)
import pandas as pd
# import time
# import re
# from datetime import date
# from dateutil.relativedelta import relativedelta
# from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from prophet import Prophet
from statsmodels.tsa.api import ExponentialSmoothing
# import sktime
from zeno_etl_libs.utils.ipc2.helpers.helper_functions import \
applyParallel_croston
# from boruta import BorutaPy
from zeno_etl_libs.utils.ipc_pmf.config_ipc_combination import (
date_col,
target_col,
models_un_agg,
models_agg
)
import logging
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
class TS_forecast:
def train_test_split(self, df, train_max_date, forecast_start):
df.rename(columns={date_col: 'ds', target_col: 'y'}, inplace=True)
df.sort_values(by=['ds'], inplace=True)
train = df[df['ds'] <= train_max_date]
test = df[df['ds'] >= forecast_start]
return train, test
def Croston_TSB(self, ts, extra_periods=4, alpha=0.4, beta=0.4):
d = np.array(ts) # Transform the input into a numpy array
cols = len(d) # Historical period length
d = np.append(d, [
np.nan] * extra_periods) # Append np.nan into the demand array to cover future periods
# level (a), probability(p) and forecast (f)
a, p, f = np.full((3, cols + extra_periods), np.nan)
# Initialization
first_occurence = np.argmax(d[:cols] > 0)
a[0] = d[first_occurence]
p[0] = 1 / (1 + first_occurence)
f[0] = p[0] * a[0]
# Create all the t+1 forecasts
for t in range(0, cols):
if d[t] > 0:
a[t + 1] = alpha * d[t] + (1 - alpha) * a[t]
p[t + 1] = beta * (1) + (1 - beta) * p[t]
else:
a[t + 1] = a[t]
p[t + 1] = (1 - beta) * p[t]
f[t + 1] = p[t + 1] * a[t + 1]
# Future Forecast
a[cols + 1:cols + extra_periods] = a[cols]
p[cols + 1:cols + extra_periods] = p[cols]
f[cols + 1:cols + extra_periods] = f[cols]
df = pd.DataFrame.from_dict(
{"Demand": d, "Forecast": f, "Period": p, "Level": a,
"Error": d - f})
return df[-extra_periods:]
def ETS_forecast(self, train, test, latest_12w_fit=False):
try:
train.set_index(['ds'], inplace=True)
test.set_index(['ds'], inplace=True)
train.index.freq = train.index.inferred_freq
test.index.freq = test.index.inferred_freq
if latest_12w_fit:
train = train[-12:] # use only latest 3 months
fit = ExponentialSmoothing(train['y']).fit()
preds = fit.forecast(len(test) + 1)
preds = preds[-len(test):]
except Exception as e:
logger.info("error in ETS fcst")
logger.info(str(e))
preds = 0
return preds
def ma_forecast(self, data):
"""
Purpose: Compute MA forecast for the for the forecast horizon specified
Inputs: time series to create forecast
Output: series with forecasted values
"""
sma_df = data.copy(deep=True)
yhat = []
if len(data) >= 8:
for i in range(5):
sma_val = sma_df.rolling(8).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
else:
for i in range(5):
sma_val = sma_df.rolling(len(data)).mean().iloc[-1]
sma_df.loc[sma_df.index.max() + 1] = sma_val
yhat.append(sma_val)
logger.info(yhat)
return yhat[-4:]
def prophet_fcst(self, train, test, params=None):
# reg_list = []
try:
if params is None:
pro = Prophet()
else:
pro = Prophet(n_changepoints=params)
# for j in train.columns:
# if j not in col_list:
# pro.add_regressor(j)
# reg_list.append(j)
pro.fit(train[['ds', 'y']])
pred_f = pro.predict(test)
test = test[["ds", "y"]]
test = pd.merge(test, pred_f, on="ds", how="left")
except Exception as e:
logger.info("error in prophet fcst")
logger.info(str(e))
test['yhat'] = 0
return test
def ts_forecast_un_agg(self, df, train_max_date, forecast_start):
train, test = self.train_test_split(df, train_max_date=train_max_date,
forecast_start=forecast_start)
test = test.sort_values(by=['ds'])
if 'croston' in models_un_agg:
preds_croston = self.Croston_TSB(train['y'])
test['preds_croston'] = preds_croston['Forecast'].values
if 'ETS_Auto' in models_un_agg:
preds_ETS = self.ETS_forecast(train.copy(), test.copy())
try:
test['preds_ETS_auto'] = preds_ETS.values
except:
test['preds_ETS_auto'] = 0
if 'ETS_12w' in models_un_agg:
preds_ETS = self.ETS_forecast(train.copy(), test.copy(),
latest_12w_fit=True)
try:
test['preds_ETS_12w'] = preds_ETS.values
except:
test['preds_ETS_12w'] = 0
if 'MA' in models_un_agg:
preds_ma = self.ma_forecast(train['y'])
test['preds_ma'] = preds_ma
if 'prophet' in models_un_agg:
preds_prophet = self.prophet_fcst(train.copy(), test.copy())
test['preds_prophet'] = preds_prophet['yhat'].values
return test
def ts_forecast_agg(self, df, train_max_date, forecast_start):
train, test = self.train_test_split(df, train_max_date=train_max_date,
forecast_start=forecast_start)
test = test.sort_values(by=['ds'])
if 'ETS_4w_agg' in models_agg:
preds_ETS = self.ETS_forecast(train.copy(), test.copy(), latest_12w_fit=True)
try:
test['preds_ETS_4w_auto'] = preds_ETS.values
except:
test['preds_ETS_4w_auto'] = 0
return test
def apply_ts_forecast(self, df, train_max_date, forecast_start):
# global train_date
# train_date = train_max_date
# global forecast_start_date
# forecast_start_date = forecast_start
preds = applyParallel_croston(
df.groupby('ts_id'),
func=self.ts_forecast_un_agg, train_max_date=train_max_date,
forecast_start=forecast_start
)
preds.rename(columns={'ds': date_col, 'y': target_col}, inplace=True)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
# preds['preds_AE_ts'] = preds[ts_fcst_cols].mean(axis=1)
return preds, ts_fcst_cols
def apply_ts_forecast_agg(self, df, train_max_date, forecast_start):
# global train_date
# train_date = train_max_date
# global forecast_start_date
# forecast_start_date = forecast_start
preds = applyParallel_croston(
df.groupby('ts_id'),
func=self.ts_forecast_agg, train_max_date=train_max_date,
forecast_start=forecast_start
)
preds.rename(columns={'ds': date_col, 'y': target_col}, inplace=True)
ts_fcst_cols = [i for i in preds.columns if i.startswith('preds_')]
for col in ts_fcst_cols:
preds[col].fillna(0, inplace=True)
preds[col] = np.where(preds[col] < 0, 0, preds[col])
# preds['preds_AE_ts'] = preds[ts_fcst_cols].mean(axis=1)
return preds, ts_fcst_cols | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_combination_fcst/engine/ts_fcst.py | ts_fcst.py |
import pandas as pd
import datetime
import numpy as np
from zeno_etl_libs.utils.ipc_pmf.config_ipc_combination import date_col, store_col, \
comb_col, target_col, key_col, local_testing
class PreprocessData:
def add_ts_id(self, df):
df = df[~df[comb_col].isnull()].reset_index(drop=True)
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[comb_col].astype(str)
)
return df
def preprocess_sales(self, df, comb_list):
df.rename(columns={
'net_sales_quantity': target_col
}, inplace=True)
df.rename(columns={
'sales_date': date_col
}, inplace=True)
set_dtypes = {
store_col: int,
comb_col: str,
date_col: str,
target_col: float
}
df = df.astype(set_dtypes)
df[target_col] = df[target_col].round()
df[date_col] = pd.to_datetime(df[date_col])
df = df.groupby(
[store_col, comb_col, key_col, date_col]
)[target_col].sum().reset_index()
df = df[df[comb_col].isin(comb_list)]
return df
def get_formatted_data(self, df):
df_start = df.groupby([key_col])[date_col].min().reset_index().rename(
columns={date_col: 'sales_start'})
df = df[[key_col, date_col, target_col]]
min_date = df[date_col].dropna().min()
end_date = df[date_col].dropna().max()
date_range = []
date_range = pd.date_range(
start=min_date,
end=end_date,
freq='d'
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = np.nan
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = pd.merge(df, df_start, how='left', on=key_col)
df = df[df[date_col] >= df['sales_start']]
df[[store_col, comb_col]] = df[key_col].str.split('_', expand=True)
df[[store_col, comb_col]] = df[[store_col, comb_col]]
df[store_col] = df[store_col].astype(int)
df[comb_col] = df[comb_col].astype(str)
return df
def preprocess_cfr_pr(self, df):
set_dtypes = {
store_col: int,
comb_col: str,
'loss_quantity': int
}
df = df.astype(set_dtypes)
df['shortbook_date'] = pd.to_datetime(df['shortbook_date'])
return df
def merge_cfr_pr(self, sales, cfr_pr):
df = sales.merge(cfr_pr,
left_on=[store_col, comb_col, date_col],
right_on=[store_col, comb_col, 'shortbook_date'],
how='left')
df[date_col] = df[date_col].combine_first(df['shortbook_date'])
df[target_col].fillna(0, inplace=True)
df['loss_quantity'].fillna(0, inplace=True)
df[target_col] += df['loss_quantity']
df.drop(['shortbook_date', 'loss_quantity'], axis=1, inplace=True)
return df
def preprocess_calendar(self, df, last_date):
df.rename(columns={'date': date_col}, inplace=True)
df[date_col] = pd.to_datetime(df[date_col])
cal_sales = df.copy()
cal_sales['week_begin_dt'] = cal_sales.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
cal_sales['month_begin_dt'] = cal_sales.apply(
lambda x: x['date'] - datetime.timedelta(x['date'].day - 1), axis=1)
cal_sales['key'] = 1
ld = pd.to_datetime(last_date)
cal_sales = cal_sales[cal_sales[date_col] > ld]
return df, cal_sales
def merge_calendar(self, sales, calendar):
df = sales.merge(calendar,
how='left',
on=date_col
)
# df_week_days_count = df.groupby([key_col, 'year', 'week_of_year'])[date_col].count().reset_index().rename(columns = {date_col:'week_days_count'})
# df['week_days_count'] = 1
df['week_begin_dt'] = df.apply(
lambda x: x[date_col] - datetime.timedelta(x['day_of_week']),
axis=1)
df_week_days_count = df.groupby(['ts_id', 'week_begin_dt'])[
date_col].count().reset_index().rename(
columns={date_col: 'week_days_count'})
# df = df.groupby(['ts_id', store_col, drug_col, ]).resample('W-Mon', on =date_col )[target_col].sum().reset_index()
df = df.groupby(['ts_id', store_col, comb_col, 'week_begin_dt'])[
target_col].sum().reset_index()
df = pd.merge(df, df_week_days_count, how='left',
on=[key_col, 'week_begin_dt'])
df = df[df['week_days_count'] == 7].reset_index(drop=True)
df.drop(columns=['week_days_count'], inplace=True)
df.rename(columns={'week_begin_dt': date_col}, inplace=True)
return df
def preprocess_bill_date(self, df):
df.rename(columns={'store-id': store_col}, inplace=True)
df['bill_date'] = pd.to_datetime(df['bill_date'])
return df
def merge_first_bill_date(self, sales, first_bill_date):
df = pd.merge(sales, first_bill_date, on=[store_col])
df = df[df[date_col] >= df['bill_date']].reset_index(drop=True)
df.drop(columns=['bill_date'], inplace=True)
return df
def make_future_df(self, df):
start_date_df = (
df
.groupby(key_col)[date_col]
.min()
.reset_index()
.rename(columns={date_col: 'start_date'})
)
df = df[[key_col, date_col, target_col]]
end_date = df[date_col].max() + datetime.timedelta(weeks=5)
min_date = df[date_col].min()
date_range = pd.date_range(
start=min_date,
end=end_date,
freq="W-MON"
)
date_range = list(set(date_range) - set(df[date_col]))
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = 0
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = df.merge(start_date_df, on=key_col, how='left')
df = df[
df[date_col] >= df['start_date']
]
df.drop('start_date', axis=1, inplace=True)
df[[store_col, comb_col]] = df[key_col].str.split('_', expand=True)
return df
def make_future_df_4w_agg(self, df):
start_date_df = (
df
.groupby(key_col)[date_col]
.min()
.reset_index()
.rename(columns={date_col: 'start_date'})
)
df = df[[key_col, date_col, target_col]]
fcst_week_start = df[date_col].max() + datetime.timedelta(weeks=5)
date_range = [fcst_week_start]
df = (
df
.groupby([date_col] + [key_col])[target_col]
.sum()
.unstack()
)
for date in date_range:
df.loc[date, :] = 0
df = (
df
.fillna(0)
.stack()
.reset_index()
.rename(columns={0: target_col})
)
df = df.merge(start_date_df, on=key_col, how='left')
df = df[
df[date_col] >= df['start_date']
]
df.drop('start_date', axis=1, inplace=True)
df[[store_col, comb_col]] = df[key_col].str.split('_', expand=True)
return df
def sales_pred_vald_df(
self,
df
):
vald_max_date = df[date_col].max() - datetime.timedelta(weeks=4)
df_vald_train = df.loc[df[date_col] <= vald_max_date]
df_vald_future = df.loc[~(df[date_col] <= vald_max_date)]
df_vald_future[target_col] = 0
df_final = df_vald_train.append(df_vald_future)
train_vald_max_date = df_vald_train[date_col].max()
return df_final, train_vald_max_date
def sales_4w_agg(
self,
df
):
# =====================================================================
# Combine 4 weeks into an arbitrary group
# =====================================================================
unique_ts_ids = df[key_col].unique().tolist()
sales_4w_agg = pd.DataFrame()
for ts_id in unique_ts_ids:
week_gp_size = 4
sales_temp = df.loc[df[key_col] == ts_id]
available_week_count = sales_temp.shape[0]
if available_week_count >= (3 * week_gp_size):
allowable_week_count = int(
week_gp_size * np.fix(available_week_count / week_gp_size))
sales_temp = sales_temp.sort_values(by=["date"], ascending=True)
sales_temp = sales_temp[-allowable_week_count:]
week_gps_count = int(allowable_week_count / week_gp_size)
week_gps_list = np.arange(1, week_gps_count + 1, 1)
week_gps_id_list = np.repeat(week_gps_list, week_gp_size)
sales_temp["week_gps_id"] = week_gps_id_list
sales_temp = sales_temp.groupby(
[key_col, store_col, comb_col, "week_gps_id"],
as_index=False).agg(
{"date": "first", "actual_demand": "sum"})
sales_4w_agg = sales_4w_agg.append(sales_temp)
sales_4w_agg = sales_4w_agg.drop("week_gps_id", axis=1)
sales_pred_4w_agg = self.make_future_df_4w_agg(sales_4w_agg.copy())
return sales_4w_agg, sales_pred_4w_agg
def comb_sales_12w(
self,
df
):
date_12w_back = df[date_col].max() - datetime.timedelta(weeks=12)
df_12w = df.loc[df[date_col] > date_12w_back]
df_12w = df_12w.groupby([store_col, comb_col], as_index=False).agg(
{target_col: 'sum'})
return df_12w
def comb_sales_4w_wtd(
self,
df
):
date_4w_back = df[date_col].max() - datetime.timedelta(weeks=4)
df_4w = df.loc[df[date_col] > date_4w_back]
# sales > 0 and all 4 latest week
df_4w_1 = df_4w[df_4w[target_col] > 0]
df_4w_cnt = df_4w_1.groupby([store_col, comb_col], as_index=False).agg(
{target_col: 'count'})
df_4w_cnt.rename({target_col: 'week_count'}, axis=1, inplace=True)
list_4w_combs = df_4w_cnt.loc[df_4w_cnt['week_count'] == 4][comb_col].tolist()
df_4w_1 = df_4w_1.loc[df_4w_1[comb_col].isin(list_4w_combs)]
dates_list = list(df_4w_1.date.unique())
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[3], 0.4, 0)
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[2], 0.3, df_4w_1['weights'])
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[1], 0.2, df_4w_1['weights'])
df_4w_1['weights'] = np.where(df_4w_1[date_col] == dates_list[0], 0.1, df_4w_1['weights'])
df_4w_1['wtd_demand'] = df_4w_1[target_col] * df_4w_1['weights']
df_4w_1 = df_4w_1.groupby([store_col, comb_col], as_index=False).agg(
{'wtd_demand': 'sum'})
# sales > 0 and only 3 latest week
df_4w_2 = df_4w[df_4w[target_col] > 0]
df_4w_cnt = df_4w_2.groupby([store_col, comb_col], as_index=False).agg(
{target_col: 'count'})
df_4w_cnt.rename({target_col: 'week_count'}, axis=1, inplace=True)
list_4w_combs = df_4w_cnt.loc[df_4w_cnt['week_count'] == 3][comb_col].tolist()
df_4w_2 = df_4w_2.loc[df_4w_2[comb_col].isin(list_4w_combs)]
df_4w_2['w_count'] = np.tile(np.arange(1, 4), len(df_4w_2))[:len(df_4w_2)]
df_4w_2['weights'] = np.where(df_4w_2['w_count'] == 3, 0.5, 0)
df_4w_2['weights'] = np.where(df_4w_2['w_count'] == 2, 0.3, df_4w_2['weights'])
df_4w_2['weights'] = np.where(df_4w_2['w_count'] == 1, 0.2, df_4w_2['weights'])
df_4w_2['wtd_demand'] = df_4w_2[target_col] * df_4w_2['weights']
df_4w_2 = df_4w_2.groupby([store_col, comb_col], as_index=False).agg(
{'wtd_demand': 'sum'})
# sales > 0 and only 2 latest week
df_4w_3 = df_4w[df_4w[target_col] > 0]
df_4w_cnt = df_4w_3.groupby([store_col, comb_col], as_index=False).agg(
{target_col: 'count'})
df_4w_cnt.rename({target_col: 'week_count'}, axis=1, inplace=True)
list_4w_combs = df_4w_cnt.loc[df_4w_cnt['week_count'] == 2][
comb_col].tolist()
df_4w_3 = df_4w_3.loc[df_4w_3[comb_col].isin(list_4w_combs)]
df_4w_3['w_count'] = np.tile(np.arange(1, 3), len(df_4w_3))[:len(df_4w_3)]
df_4w_3['weights'] = np.where(df_4w_3['w_count'] == 2, 0.6, 0)
df_4w_3['weights'] = np.where(df_4w_3['w_count'] == 1, 0.4, df_4w_3['weights'])
df_4w_3['wtd_demand'] = df_4w_3[target_col] * df_4w_3['weights']
df_4w_3 = df_4w_3.groupby([store_col, comb_col], as_index=False).agg(
{'wtd_demand': 'sum'})
df_4w = pd.concat([df_4w_1, df_4w_2, df_4w_3], axis=0)
df_4w['wtd_demand'] = np.round(df_4w['wtd_demand'] * 4)
return df_4w
def preprocess_all(
self,
sales=None,
cfr_pr=None,
comb_list=None,
calendar=None,
first_bill_date=None,
last_date=None,
):
sales = self.add_ts_id(sales)
# filter
#################################################
if local_testing == 1:
tsid_list = \
sales.sort_values(by=['net_sales_quantity'], ascending=False)[
key_col].unique().tolist()[:20]
sales = sales[sales[key_col].isin(tsid_list)]
#################################################
sales = self.preprocess_sales(sales, comb_list)
sales = self.get_formatted_data(sales)
cfr_pr = self.preprocess_cfr_pr(cfr_pr)
sales_daily = self.merge_cfr_pr(sales, cfr_pr)
calendar, cal_sales = self.preprocess_calendar(calendar, last_date)
sales = self.merge_calendar(sales_daily, calendar)
first_bill_date = self.preprocess_bill_date(first_bill_date)
sales = self.merge_first_bill_date(sales, first_bill_date)
sales_pred = self.make_future_df(sales.copy())
sales_pred_vald, train_vald_max_date = self.sales_pred_vald_df(sales)
sales_4w_agg, sales_pred_4w_agg = self.sales_4w_agg(sales)
sales_pred_4w_agg_vald, train_4w_agg_vald_max_date = self.sales_pred_vald_df(sales_4w_agg)
comb_sales_latest_12w = self.comb_sales_12w(sales)
comb_sales_4w_wtd = self.comb_sales_4w_wtd(sales)
return (
comb_sales_4w_wtd,
comb_sales_latest_12w,
train_4w_agg_vald_max_date,
sales_pred_4w_agg_vald,
train_vald_max_date,
sales_pred_vald,
sales_4w_agg,
sales_pred_4w_agg,
sales,
sales_pred,
cal_sales,
sales_daily
) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_combination_fcst/engine/data_pre_process.py | data_pre_process.py |
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger("_logger")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
from zeno_etl_libs.utils.ipc_pmf.config_ipc_combination import (
date_col,
target_col,
store_col,
comb_col,
eol_cutoff
)
class Segmentation:
def add_ts_id(self, df):
df['ts_id'] = (
df[store_col].astype(int).astype(str)
+ '_'
+ df[comb_col].astype(str)
)
return df
def _calc_abc(self, df52):
B_cutoff = 0.5
C_cutoff = 0.80
D_cutoff = 0.95
tot_sales = (
df52.groupby([
'ts_id'
])[target_col].sum().reset_index()
)
tot_sales.rename(columns={target_col: 'total_LY_sales'}, inplace=True)
tot_sales.sort_values('total_LY_sales', ascending=False, inplace=True)
tot_sales["perc_sales"] = (
tot_sales['total_LY_sales'] / tot_sales['total_LY_sales'].sum()
)
tot_sales["cum_perc_sales"] = tot_sales.perc_sales.cumsum()
tot_sales["ABC"] = "A"
tot_sales.loc[tot_sales.cum_perc_sales > B_cutoff, "ABC"] = "B"
tot_sales.loc[tot_sales.cum_perc_sales > C_cutoff, "ABC"] = "C"
tot_sales.loc[tot_sales.cum_perc_sales > D_cutoff, "ABC"] = "D"
# tot_sales = self.add_ts_id(tot_sales)
return tot_sales[['ts_id', 'ABC', 'total_LY_sales', 'perc_sales', 'cum_perc_sales']]
# TODO: lower COV cutoffs
def get_abc_classification(self, df52):
province_abc = df52.groupby(
[store_col]
).apply(self._calc_abc)
province_abc = province_abc[['ts_id', "ABC"]].reset_index(drop=True)
# one
tot_sales = (
df52
.groupby(['ts_id'])[target_col]
.agg(['sum', 'mean'])
.reset_index()
)
tot_sales.rename(
columns={'sum': 'total_LY_sales', 'mean': 'avg_ly_sales'},
inplace=True)
tot_sales = tot_sales.merge(
province_abc,
on=['ts_id'],
how='left'
)
tot_sales = tot_sales.drop_duplicates()
# tot_sales = self.add_ts_id(tot_sales)
tot_sales = tot_sales[['ts_id', 'ABC']]
return tot_sales
def get_xyzw_classification(self, df1):
input_ts_id = df1['ts_id'].unique()
df1 = df1[df1[target_col] > 0]
cov_df = df1.groupby(['ts_id'])[target_col].agg(
["mean", "std", "count", "sum"])
cov_df.reset_index(drop=False, inplace=True)
cov_df['cov'] = np.where(
((cov_df["count"] > 2) & (cov_df["sum"] > 0)),
(cov_df["std"]) / (cov_df["mean"]),
np.nan
)
cov_df['WXYZ'] = 'Z'
cov_df.loc[cov_df['cov'] <= 1.2, 'WXYZ'] = 'Y'
cov_df.loc[cov_df['cov'] <= 0.8, 'WXYZ'] = 'X'
cov_df.loc[cov_df['cov'] <= 0.5, 'WXYZ'] = 'W'
# cov_df = self.add_ts_id(cov_df)
cov_df = cov_df[['ts_id', 'cov', 'WXYZ']]
non_mapped_ts_ids = list(
set(input_ts_id) - set(cov_df['ts_id'].unique())
)
non_mapped_cov = pd.DataFrame({
'ts_id': non_mapped_ts_ids,
'cov': [np.nan] * len(non_mapped_ts_ids),
'WXYZ': ['Z'] * len(non_mapped_ts_ids)
})
cov_df = pd.concat([cov_df, non_mapped_cov], axis=0)
cov_df = cov_df.reset_index(drop=True)
return cov_df
def get_std(self, df1):
input_ts_id = df1['ts_id'].unique()
# df1 = df1[df1[target_col]>0]
std_df = df1.groupby(['ts_id'])[target_col].agg(["std"])
return std_df
def calc_interval_mean(self, x, key):
df = pd.DataFrame({"X": x, "ts_id": key}).reset_index(
drop=True).reset_index()
df = df[df.X > 0]
df["index_shift"] = df["index"].shift(-1)
df["interval"] = df["index_shift"] - df["index"]
df = df.dropna(subset=["interval"])
df['ADI'] = np.mean(df["interval"])
return df[['ts_id', 'ADI']]
def calc_adi(self, df):
# df = self.add_ts_id(df)
logger.info(
'Combinations entering adi: {}'.format(df['ts_id'].nunique()))
dict_of = dict(iter(df.groupby(['ts_id'])))
logger.info("Total tsids in df: {}".format(df.ts_id.nunique()))
logger.info("Total dictionary length: {}".format(len(dict_of)))
list_dict = [
self.calc_interval_mean(dict_of[x][target_col], x) for x in
tqdm(dict_of.keys())
]
data = (
pd.concat(list_dict)
.reset_index(drop=True)
.drop_duplicates()
.reset_index(drop=True)
)
logger.info('Combinations exiting adi: {}'.format(data.ts_id.nunique()))
return data
def get_PLC_segmentation(self, df, mature_cutoff_date, eol_cutoff_date):
df1 = df[df[target_col] > 0]
df1 = df1.groupby(['ts_id']).agg({date_col: [min, max]})
df1.reset_index(drop=False, inplace=True)
df1.columns = [' '.join(col).strip() for col in df1.columns.values]
df1['PLC Status L1'] = 'Mature'
df1.loc[
(df1[date_col + ' min'] > mature_cutoff_date), 'PLC Status L1'
] = 'New Product'
df1.loc[
(df1[date_col + ' max'] <= eol_cutoff_date), 'PLC Status L1'
] = 'EOL'
# df1 = self.add_ts_id(df1)
df1 = df1[['ts_id', 'PLC Status L1']]
return df1
def get_group_mapping(self, seg_df):
seg_df['Mixed'] = seg_df['ABC'].astype(str) + seg_df['WXYZ'].astype(str)
seg_df['Group'] = 'Group3'
group1_mask = seg_df['Mixed'].isin(['AW', 'AX', 'BW', 'BX'])
seg_df.loc[group1_mask, 'Group'] = 'Group1'
group2_mask = seg_df['Mixed'].isin(['AY', 'AZ', 'BY', 'BZ'])
seg_df.loc[group2_mask, 'Group'] = 'Group2'
return seg_df
def calc_dem_pat(self, cov_df, adi_df):
logger.info('Combinations entering calc_dem_pat: {}'.format(
cov_df.ts_id.nunique()))
logger.info('Combinations entering calc_dem_pat: {}'.format(
adi_df.ts_id.nunique()))
df = pd.merge(cov_df, adi_df, how='left', on='ts_id')
df["cov2"] = np.power(df["cov"], 2)
df["classification"] = "Lumpy"
df.loc[
(df.ADI >= 1.32) & (df.cov2 < 0.49), "classification"
] = "Intermittent"
df.loc[
(df.ADI < 1.32) & (df.cov2 >= 0.49), "classification"
] = "Erratic"
df.loc[
(df.ADI < 1.32) & (df.cov2 < 0.49), "classification"
] = "Smooth"
logger.info(
'Combinations exiting calc_dem_pat: {}'.format(df.ts_id.nunique()))
return df[['ts_id', 'classification']]
def get_start_end_dates_df(self, df, key_col, date_col, target_col,
train_max_date, end_date):
start_end_date_df = (
df[df[target_col] > 0]
.groupby(key_col)[date_col]
.agg({'min', 'max'})
.reset_index()
.rename(columns={'min': 'start_date', 'max': 'end_date'})
)
start_end_date_df.loc[
(
start_end_date_df['end_date'] > (
train_max_date - relativedelta(weeks=eol_cutoff)
)
), 'end_date'
] = end_date
return start_end_date_df
def get_weekly_segmentation(self, df, df_sales_daily, train_max_date,
end_date):
df = df[df[date_col] <= train_max_date]
df1 = df[
df[date_col] > (train_max_date - relativedelta(weeks=52))
].copy(deep=True)
df_std = df_sales_daily[
df_sales_daily[date_col] > (train_max_date - relativedelta(days=90))
].copy(deep=True)
df1 = self.add_ts_id(df1)
abc_df = self._calc_abc(df1)
xyzw_df = self.get_xyzw_classification(df1)
std_df = self.get_std(df_std)
adi_df = self.calc_adi(df1)
demand_pattern_df = self.calc_dem_pat(xyzw_df[['ts_id', 'cov']], adi_df)
mature_cutoff_date = train_max_date - relativedelta(weeks=52)
eol_cutoff_date = train_max_date - relativedelta(weeks=13)
plc_df = self.get_PLC_segmentation(df, mature_cutoff_date,
eol_cutoff_date)
start_end_date_df = self.get_start_end_dates_df(
df, key_col='ts_id',
date_col=date_col,
target_col=target_col,
train_max_date=train_max_date,
end_date=end_date
)
seg_df = plc_df.merge(abc_df, on='ts_id', how='outer')
seg_df = seg_df.merge(xyzw_df, on='ts_id', how='outer')
seg_df = seg_df.merge(adi_df, on='ts_id', how='outer')
seg_df = seg_df.merge(demand_pattern_df, on='ts_id', how='outer')
seg_df = seg_df.merge(start_end_date_df, on='ts_id', how='outer')
seg_df = seg_df.merge(std_df, on='ts_id', how='outer')
seg_df = self.get_group_mapping(seg_df)
seg_df['Mixed'] = np.where(seg_df['Mixed']=='nannan', np.nan, seg_df['Mixed'])
return seg_df | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/ipc_combination_fcst/engine/segmentation.py | segmentation.py |
import numpy as np
from zeno_etl_libs.utils.ipc_pmf.correction_flag import compare_df, \
add_correction_flag, compare_df_comb, add_correction_flag_comb
def fcst_correction(fcst_df_comb_lvl, comb_sales_latest_12w,
fcst_df_drug_lvl, drug_sales_latest_12w,
drug_sales_latest_4w, comb_sales_4w_wtd,
drug_sales_4w_wtd, logger):
comb_sales_latest_12w['latest_28d_demand'] = np.round(
comb_sales_latest_12w['actual_demand'] / 3)
drug_sales_latest_12w['latest_28d_demand'] = np.round(
drug_sales_latest_12w['actual_demand'] / 3)
fcst_df_comb_lvl['fcst'] = np.round(fcst_df_comb_lvl['fcst'])
fcst_df_comb_lvl = fcst_df_comb_lvl.merge(
comb_sales_latest_12w[['comb_id', 'latest_28d_demand']],
on='comb_id', how='left')
fcst_df_drug_lvl['drug_id'] = fcst_df_drug_lvl['drug_id'].astype(int)
fcst_df_drug_lvl['fcst'] = np.round(fcst_df_drug_lvl['fcst'])
fcst_df_drug_lvl = fcst_df_drug_lvl.merge(
drug_sales_latest_12w[['drug_id', 'latest_28d_demand']],
on='drug_id', how='left')
logger.info(f"Combination Fcst Recency Correction starts")
df_pre_corr = fcst_df_comb_lvl.copy()
fcst_df_comb_lvl['fcst'] = np.where(fcst_df_comb_lvl['fcst'] == 0,
fcst_df_comb_lvl['latest_28d_demand'],
fcst_df_comb_lvl['fcst'])
df_post_corr = fcst_df_comb_lvl.copy()
logger.info(f"Sum Fcst before: {df_pre_corr['fcst'].sum()}")
logger.info(f"Sum Fcst after: {df_post_corr['fcst'].sum()}")
corr_comb_lst = compare_df_comb(df_pre_corr, df_post_corr, logger,
cols_to_compare=['fcst'])
fcst_df_comb_lvl = add_correction_flag_comb(fcst_df_comb_lvl, corr_comb_lst,
'REC_CORR1')
logger.info(f"Drug Fcst Recency Correction 1 starts")
df_pre_corr = fcst_df_drug_lvl.copy()
fcst_df_drug_lvl['fcst'] = np.where(fcst_df_drug_lvl['fcst'] == 0,
fcst_df_drug_lvl['latest_28d_demand'],
fcst_df_drug_lvl['fcst'])
df_post_corr = fcst_df_drug_lvl.copy()
logger.info(f"Sum Fcst before: {fcst_df_drug_lvl['fcst'].sum()}")
logger.info(f"Sum Fcst after: {fcst_df_drug_lvl['fcst'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['fcst'])
fcst_df_drug_lvl = add_correction_flag(fcst_df_drug_lvl, corr_drug_lst,
'REC_CORR1')
fcst_df_comb_lvl.drop('latest_28d_demand', axis=1, inplace=True)
fcst_df_drug_lvl.drop('latest_28d_demand', axis=1, inplace=True)
# add drugs with recent sales
forecasted_drug_list = fcst_df_drug_lvl['drug_id'].tolist()
df_add1 = drug_sales_latest_12w.loc[~drug_sales_latest_12w['drug_id'].isin(forecasted_drug_list)]
df_add2 = drug_sales_latest_4w.loc[~drug_sales_latest_4w['drug_id'].isin(forecasted_drug_list)]
df_add1.rename({'latest_28d_demand': 'fcst'}, axis=1, inplace=True)
df_add1.drop('actual_demand', axis=1, inplace=True)
df_add2.rename({'actual_demand': 'fcst'}, axis=1, inplace=True)
df_add = df_add1.append(df_add2)
df_add = df_add.loc[df_add['fcst'] > 0]
if df_add.shape[0] > 0:
df_add['model'] = 'NA'
df_add['bucket'] = 'NA'
df_add['std'] = 0
df_add['correction_flags'] = ""
df_add['ts_id'] = (
df_add['store_id'].astype(int).astype(str)
+ '_'
+ df_add['drug_id'].astype(int).astype(str)
)
logger.info(f"Drug Fcst Recency Correction 2 starts")
df_pre_corr = fcst_df_drug_lvl.copy()
fcst_df_drug_lvl = fcst_df_drug_lvl.append(df_add[fcst_df_drug_lvl.columns])
df_post_corr = fcst_df_drug_lvl.copy()
logger.info(f"Sum Fcst before: {fcst_df_drug_lvl['fcst'].sum()}")
logger.info(f"Sum Fcst after: {fcst_df_drug_lvl['fcst'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['fcst'])
fcst_df_drug_lvl = add_correction_flag(fcst_df_drug_lvl, corr_drug_lst,
'REC_CORR2')
fcst_df_comb_lvl['fcst'] = np.where(fcst_df_comb_lvl['fcst'] < 0, 0,
fcst_df_comb_lvl['fcst'])
fcst_df_drug_lvl['fcst'] = np.where(fcst_df_drug_lvl['fcst'] < 0, 0,
fcst_df_drug_lvl['fcst'])
# fcst 4 week weighted replace
logger.info(f"Comb Fcst Recency Correction 3 starts")
df_pre_corr = fcst_df_comb_lvl.copy()
fcst_df_comb_lvl = fcst_df_comb_lvl.merge(comb_sales_4w_wtd, on=['store_id', 'comb_id'],
how='left')
fcst_df_comb_lvl['fcst'] = np.where(fcst_df_comb_lvl['fcst'] < fcst_df_comb_lvl['wtd_demand'],
fcst_df_comb_lvl['wtd_demand'], fcst_df_comb_lvl['fcst'])
df_post_corr = fcst_df_comb_lvl.copy()
logger.info(f"Sum Fcst before: {df_pre_corr['fcst'].sum()}")
logger.info(f"Sum Fcst after: {df_post_corr['fcst'].sum()}")
corr_comb_lst = compare_df_comb(df_pre_corr, df_post_corr, logger,
cols_to_compare=['fcst'])
fcst_df_comb_lvl = add_correction_flag_comb(fcst_df_comb_lvl, corr_comb_lst,
'REC_CORR3')
logger.info(f"Drug Fcst Recency Correction 3 starts")
df_pre_corr = fcst_df_drug_lvl.copy()
fcst_df_drug_lvl = fcst_df_drug_lvl.merge(drug_sales_4w_wtd, on=['store_id', 'drug_id'],
how='left')
fcst_df_drug_lvl['fcst'] = np.where(fcst_df_drug_lvl['fcst'] < fcst_df_drug_lvl['wtd_demand'],
fcst_df_drug_lvl['wtd_demand'], fcst_df_drug_lvl['fcst'])
df_post_corr = fcst_df_drug_lvl.copy()
logger.info(f"Sum Fcst before: {df_pre_corr['fcst'].sum()}")
logger.info(f"Sum Fcst after: {df_post_corr['fcst'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger,
cols_to_compare=['fcst'])
fcst_df_drug_lvl = add_correction_flag(fcst_df_drug_lvl, corr_drug_lst,
'REC_CORR3')
fcst_df_comb_lvl.drop('wtd_demand', axis=1, inplace=True)
fcst_df_drug_lvl.drop('wtd_demand', axis=1, inplace=True)
return fcst_df_comb_lvl, fcst_df_drug_lvl | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/zeno_etl_libs/utils/ipc_pmf/heuristics/recency_corr.py | recency_corr.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.