code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import argparse
import sys
import os
import pandas as pd
import time
from zeno_etl_libs.db.db import MySQL, DB
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
""" opening the postgres connection """
rs_db = DB()
rs_db.open_connection()
""" opening the MySQL connection """
ms_db = MySQL(read_only=False)
ms_db.open_connection()
""" opening the MySQL write(many) connection """
ms_db_write = MySQL(read_only=False)
ms_db_write.open_connection()
ms_schema = "test-generico" if env in ("dev", "stage") else "prod2-generico"
""" Query to get patients-metadata from RS """
query = f"""
SELECT
id as "patient-id",
date("first-bill-date") as "first-bill-date",
date("last-bill-date") as "last-bill-date",
"number-of-bills",
"total-spend",
"average-bill-value"
FROM
"prod2-generico"."patients-metadata-2"
order by id desc
"""
patients_meta_rs = rs_db.get_df(query=query)
patients_meta_rs.columns = [c.replace('-', '_') for c in patients_meta_rs.columns]
""" date columns converted to string """
for i in ['first_bill_date', 'last_bill_date']:
patients_meta_rs[i] = pd.to_datetime(patients_meta_rs[i], errors='coerce').dt.strftime("%Y-%m-%d")
logger.info(f"patients_meta_rs sample data: \n{str(patients_meta_rs[0:2])}")
"""checking the latest last_bill_date """
try:
bill_date_max_pg = pd.to_datetime(patients_meta_rs['last_bill_date']).max().strftime('%Y-%m-%d')
except ValueError:
bill_date_max_pg = '0000-00-00'
logger.info("patients_meta_rs, latest last_bill_date: {}".format(bill_date_max_pg))
logger.info("patients_meta_rs, total count {}".format(len(patients_meta_rs)))
""" Query to get patients-metadata from MySQL """
query = f"""
SELECT
`patient-id`,
`first-bill-date`,
`last-bill-date`,
`number-of-bills`,
`total-spend`,
`average-bill-value`
FROM
`{ms_schema}`.`patients-metadata`
"""
patients_meta_ms = pd.read_sql_query(query, ms_db.connection)
patients_meta_ms.columns = [c.replace('-', '_') for c in patients_meta_ms.columns]
""" date columns converted to string """
for i in ['first_bill_date', 'last_bill_date']:
patients_meta_ms[i] = pd.to_datetime(patients_meta_ms[i], errors='coerce').dt.strftime("%Y-%m-%d")
try:
bill_date_max_ms = pd.to_datetime(patients_meta_ms['last_bill_date']).max().strftime('%Y-%m-%d')
except ValueError:
bill_date_max_ms = '0000-00-00'
logger.info("patients_meta_ms, latest last_bill_date: {}".format(bill_date_max_ms))
logger.info("patients_meta_ms, total count: {}".format(len(patients_meta_ms)))
""" merge the Redshift and MySQL data """
patients_meta_union = patients_meta_rs.merge(
patients_meta_ms[['patient_id', 'first_bill_date', 'last_bill_date', 'number_of_bills']],
how='outer',
on=['patient_id', 'first_bill_date', 'last_bill_date', 'number_of_bills'],
indicator=True
)
metadata_cols = ['patient_id', 'first_bill_date', 'last_bill_date', 'number_of_bills',
'total_spend', 'average_bill_value']
"""
New data in redshift has two parts
1. Insert - Completely new customers
2. Update - Old customers with data change
"""
patients_meta_new = patients_meta_union[patients_meta_union['_merge'] == 'left_only']
patients_meta_new = patients_meta_new[metadata_cols]
logger.info("patients_meta_new (difference), total count: {}".format(len(patients_meta_new)))
ms_patient_ids = patients_meta_ms['patient_id'].to_list() # mysql patients ids
# To be inserted
patients_meta_new_insert = patients_meta_new.query("patient_id not in @ms_patient_ids")
patients_meta_new_insert.columns = [c.replace('_', '-') for c in patients_meta_new_insert.columns]
""" Don't upload patients ids which are not in patients table, Can be Removed later """
query = f"""
SELECT
`id` AS `patient-id`
FROM
`{ms_schema}`.`patients`
"""
patients = pd.read_sql_query(query, ms_db.connection)
patients_meta_new_insert_clean = patients_meta_new_insert.merge(patients, how='inner', on=['patient-id'])
logger.info("Absent in patients table count: {}".format(
len(patients_meta_new_insert) - len(patients_meta_new_insert_clean)))
logger.info("patients_meta_new_insert_clean (to be inserted) count: {}".format(len(patients_meta_new_insert_clean)))
logger.info("Ignored if patient is absent in patient table")
if len(patients_meta_new_insert_clean):
logger.info("Start of insert, patients_meta_new_insert...")
patients_meta_new_insert_clean.to_sql(
name='patients-metadata', con=ms_db_write.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("End of insert, patients_meta_new_insert...")
""" Delay to allow the replication in RDS DB replica (for Inserts)"""
time.sleep(5)
"""Verifying the inserted data"""
query = f"""
SELECT
`patient-id`
FROM
`{ms_schema}`.`patients-metadata`
WHERE
`patient-id` in ('%s')
""" % ("','".join([str(i) for i in patients_meta_new_insert_clean['patient-id'].to_list()]))
inserted_in_mysql = pd.read_sql_query(query, ms_db_write.connection)
logger.info("Total successfully inserted patients metadata count: {}".format(len(inserted_in_mysql)))
expected_insert_count = len(patients_meta_new_insert_clean)
actual_insert_count = len(inserted_in_mysql)
is_insert_successful = False
if expected_insert_count == actual_insert_count:
logger.info(f"[INFO] Insert was successful.")
is_insert_successful = True
else:
logger.info(f"[ERROR] Insert was incomplete, count expected: {expected_insert_count}, "
f"actual: {actual_insert_count}")
""" To be updated """
patients_meta_new_update = patients_meta_new.query("patient_id in @ms_patient_ids")
patients_meta_new_update.columns = [c.replace('_', '-') for c in patients_meta_new_update.columns]
logger.info("patients_meta_new_update count: {}".format(len(patients_meta_new_update)))
patients_meta_new_update_dicts = list(patients_meta_new_update.apply(dict, axis=1))
""" Query to bulk update """
query = f"""
UPDATE
`{ms_schema}`.`patients-metadata`
SET
`first-bill-date` = %s,
`last-bill-date` = %s,
`number-of-bills` = %s,
`total-spend` = %s,
`average-bill-value` = %s
WHERE
`patient-id` = %s
"""
logger.info("Start of update, patients_meta_new_update...")
batch_size = 1000
cur = ms_db_write.connection.cursor()
for start_index in range(0, len(patients_meta_new_update_dicts), batch_size):
logger.info(
f"Updating from start index: {start_index}, to: "
f"{min((start_index + batch_size), len(patients_meta_new_update_dicts))}")
values = patients_meta_new_update_dicts[start_index: start_index + batch_size]
values_list = []
for i in values:
values_list.append(
(i['first-bill-date'],
i['last-bill-date'],
i['number-of-bills'],
i['total-spend'],
i['average-bill-value'],
i['patient-id'])
)
""" Updating multiple patients at time """
try:
cur.executemany(query, values_list)
except ms_db_write.connection.Error as e:
try:
logger.info("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
logger.info("MySQL Error: %s" % str(e))
ms_db_write.connection.rollback()
ms_db_write.connection.commit()
cur.close()
logger.info("End of update, patients_meta_new_update...")
""" Delay to allow the replication in RDS DB replica """
time.sleep(5)
"""Verifying the updated data"""
query = f"""
SELECT
`patient-id`,
`first-bill-date`,
`last-bill-date`,
`number-of-bills`,
`total-spend`,
`average-bill-value`
FROM
`{ms_schema}`.`patients-metadata`
where
date(`updated-at`) >= CURRENT_DATE()
"""
""" Alert: We are intentionally reading it from write database because we want real time numbers for validation """
updated_in_mysql = pd.read_sql_query(query, ms_db_write.connection)
for i in ['first-bill-date', 'last-bill-date']:
updated_in_mysql[i] = pd.to_datetime(updated_in_mysql[i], errors='coerce').dt.strftime("%Y-%m-%d")
# Inner join with existing data
updated_in_mysql_matched = updated_in_mysql.merge(
patients_meta_new_update[["patient-id", "first-bill-date", "last-bill-date", "number-of-bills"]],
how='inner',
on=["patient-id", "first-bill-date", "last-bill-date", "number-of-bills"])
logger.info("Total successfully updated patients metadata count: {}".format(len(updated_in_mysql_matched)))
expected_update_count = len(patients_meta_new_update)
actual_update_count = len(updated_in_mysql_matched)
is_update_successful = False
if expected_update_count == actual_update_count:
logger.info(f"[INFO] Update was successful.")
is_update_successful = True
else:
logger.info(f"[ERROR] Update was incomplete, count expected: {expected_update_count}, "
f"actual: {actual_update_count}")
if not is_update_successful:
raise Exception("Update in the metadata table failed.")
if not is_insert_successful:
raise Exception("Insert in the metadata table failed.")
""" closing the database connections """
rs_db.close_connection()
ms_db.close()
ms_db_write.close()
logger.info("Closed all DB connections successfully.") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patients-metadata-mysql-sync/patients-metadata-mysql-sync.py | patients-metadata-mysql-sync.py |
#@owner: [email protected]
#@Purpose: To find purchase sales ration at drug level and purchase margin.
import os
import sys
import datetime
import argparse
import pandas as pd
import numpy as np
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.db.db import MSSql
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]",
type=str, required=False)
parser.add_argument('-sd', '--start_date', default='NA', type=str, required=False)
parser.add_argument('-ed', '--end_date', default='NA', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
start_date = args.start_date
end_date = args.end_date
os.environ['env'] = env
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
d = datetime.timedelta(days = 5)
start_dt=cur_date-d
end_dt = cur_date - datetime.timedelta(1)
if start_date == 'NA' and end_date == 'NA':
start_date = start_dt
end_date = end_dt
s3=S3()
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
# Net Sales
q_1='''
select
'' as "wh-name",
'SB' as "sub-type-1",
'store' as "sub-type-2",
'' as "sub-type-3",
s."drug-id" ,
s."drug-name" ,
s."type" as "drug-type",
s.category as "drug-category",
(case when s."company-id"=6984 then 'true'
else 'false' end) as "goodaid-flag",
(case when s."invoice-item-reference" is null then 76
else s."distributor-id" end ) as "distributor-id",
(case when s."invoice-item-reference" is null then 'Local Purchase'
else s."distributor-name" end ) as "distributor-name",
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) as "as-ms-pr",
date(s."created-at") as "approved-date",
SUM(s."net-quantity") as "net-quantity",
SUM(s."net-quantity"*s.rate) as "net-value",
SUM(s."net-quantity"*s.mrp) as "mrp-value",
SUM(s."net-quantity"*s."purchase-rate") as "wc-value"
from
"prod2-generico".sales s
left join (select * from (select
"invoice-item-id",
"short-book-id",
row_number() over(partition by "invoice-item-id"
order by
"short-book-id" desc) as count_new
from
"prod2-generico"."prod2-generico"."short-book-invoice-items" sbii ) a
where a.count_new=1 ) g on
s."invoice-item-reference" = g."invoice-item-id"
left join "prod2-generico"."prod2-generico"."short-book-1" sb on
g."short-book-id" = sb.id
where date(s."created-at")>='{}' and date(s."created-at")<='{}' and s."franchisee-id" =1
and s."store-b2b" ='Store'
group by
"sub-type-1",
"sub-type-2",
"sub-type-3",
s."drug-id" ,
s."drug-name",
s."type" ,
s.category ,
(case when s."company-id"=6984 then 'true'
else 'false' end) ,
(case when s."invoice-item-reference" is null then 76
else s."distributor-id" end ) ,
(case when s."invoice-item-reference" is null then 'Local Purchase'
else s."distributor-name" end ) ,
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ),
date(s."created-at")
'''.format(start_date, end_date)
store_sales=rs_db.get_df(query=q_1)
# DC
q_4='''
select
'' as "wh-name",
'PB' as "sub-type-1",
'DC' as "sub-type-2",
'' as "sub-type-3",
d.id as "drug-id",
d."drug-name" ,
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) as "as-ms-pr",
date(i."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value",
SUM(ii.mrp*ii."actual-quantity") as "mrp-value",
SUM(ii."net-value") as "wc-value"
from
"prod2-generico"."prod2-generico"."invoice-items" ii
left join "prod2-generico"."prod2-generico".invoices i on
ii."invoice-id" = i.id
left join "prod2-generico"."prod2-generico".stores s on
i."dc-id" =s.id
left join "prod2-generico"."prod2-generico".stores s2 on
i."store-id" =s2.id
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
left join (select * from (select
"invoice-item-id",
"short-book-id",
row_number() over(partition by "invoice-item-id"
order by
"short-book-id" desc) as count_new
from
"prod2-generico"."prod2-generico"."short-book-invoice-items" sbii ) a
where a.count_new=1 ) g on
ii.id = g."invoice-item-id"
left join "prod2-generico"."prod2-generico"."short-book-1" sb on
g."short-book-id" = sb.id
where
date(i."approved-at") >='{}' and date(i."approved-at") <='{}'
and s2."franchisee-id" =1 and i."distributor-id" !=8105
group by
"sub-type-1",
"sub-type-2",
"sub-type-3",
d.id ,
d."drug-name" ,
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ),
date(i."approved-at")
'''.format(start_date, end_date)
network_dc_purchase=rs_db.get_df(query=q_4)
# Lp
q_5='''
select
'' as "wh-name",
'PB' as "sub-type-1",
'LP' as "sub-type-2",
'' as "sub-type-3",
d.id as "drug-id" ,
d."drug-name" ,
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
'' as "as-ms-pr",
date(i."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value",
SUM(ii.mrp*ii."actual-quantity") as "mrp-value",
SUM(ii."net-value") as "wc-value"
from
"prod2-generico"."prod2-generico"."invoice-items-1" ii
left join "prod2-generico"."prod2-generico"."invoices-1" i on ii."franchisee-invoice-id" =i.id
left join "prod2-generico"."prod2-generico".stores s on
s.id = i."store-id"
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
where
ii."invoice-item-reference" is null and s."franchisee-id" =1 and
date(i."approved-at") >='{}' and date(i."approved-at") <='{}'
group by
"sub-type-1",
"sub-type-2",
"sub-type-3",
d.id ,
d."drug-name",
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
date(i."approved-at")
'''.format(start_date, end_date)
network_lp_purchase=rs_db.get_df(query=q_5)
# Sale to Franchise
q_6='''
select
'' as "wh-name",
'SB' as "sub-type-1",
'Franchisee' as "sub-type-2",
(case
when i."distributor-id" = 8105 then 'WH'
else 'DC' end) as "sub-type-3",
d.id as "drug-id",
d."drug-name" ,
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
'' as "as-ms-pr",
date(i1."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value",
SUM(ii.mrp*ii."actual-quantity") as "mrp-value",
SUM(ii2."net-value") as "wc-value"
from
"prod2-generico"."prod2-generico"."invoice-items-1" ii
left join "prod2-generico"."prod2-generico"."invoice-items" ii2 on ii."invoice-item-reference" =ii2.id
left join "prod2-generico"."prod2-generico"."invoices-1" i1 on ii."franchisee-invoice-id" =i1.id
left join "prod2-generico"."prod2-generico".invoices i on
ii."invoice-id" =i.id
left join "prod2-generico"."prod2-generico".stores s on
i1."store-id" =s.id
left join "prod2-generico"."prod2-generico".franchisees f
on s."franchisee-id" =f.id
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
where
date(i1."approved-at") >='{}' and date(i1."approved-at") <='{}'
and s."franchisee-id" !=1 and i1."franchisee-invoice" =0
group by
"sub-type-1",
"sub-type-2",
(case
when i."distributor-id" = 8105 then 'WH'
else 'DC' end),
d.id ,
d."drug-name",
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
date(i1."approved-at");
'''.format(start_date, end_date)
network_franchisee_sale=rs_db.get_df(query=q_6)
drug_info = '''
select
d.id as "drug-id",
d."drug-name",
d."type" as "drug-type" ,
d."category" as "drug-category",
(case when d."company-id" =6984 then 'true'
else 'false' end) as "goodaid-flag"
from
"prod2-generico".drugs d
'''
drug_info = rs_db.get_df(query=drug_info)
mssql = MSSql(connect_via_tunnel=False)
cnxn = mssql.open_connection()
cursor = cnxn.cursor()
#Warehouse purchase
q_7='''
select
'bhiwnadi-warehouse' as "wh-name",
'PB' as "sub-type-1",
'WH' as "sub-type-2",
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end) as "sub-type-3",
CONVERT(int, i.Barcode) as "drug-id",
sp.acno as "distributor-id",
a."name" as "distributor-name",
(CASE when f.Uid is null then 'Auto'
else 'manual' end ) as "as-ms-pr",
CONVERT(date ,
sp.Vdt) as "approved-date",
SUM(sp.qty) as "net-quantity",
SUM(sp.netamt + sp.taxamt) as "net-value",
SUM(f1.mrp*sp.Qty) as "mrp-value",
SUM(sp.NetAmt+sp.Taxamt) as "wc-value"
from
salepurchase2 sp
left join item i on
sp.itemc = i.code
left join FIFO f1 on
(f1.Pbillno = sp.Pbillno
and f1.Psrlno = sp.Psrlno
and f1.Itemc = sp.Itemc
and f1.Vdt = sp.Vdt)
left join acm a on
sp.acno = a.code
left join (Select Uid,Vtyp,Vdt,Acno,Vno FROM Salepurchase1 sp1 where sp1.Vtyp ='PO' and sp1.Slcd ='SL') f
on (f.Vno=sp.RefVno and convert(date,sp.RefVdt) =convert(date,f.Vdt) and sp.Acno =f.acno)
where
sp.vtype = 'PB'
and sp.vdt >= '{}'
and sp.vdt <= '{}'
and sp.qty >0
and
isnumeric(i.Barcode) = 1
and i.barcode not like '%[^0-9]%' and sp.Acno not IN (59489)
group by
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end),
i.Barcode ,
sp.acno ,
a."name" ,
(CASE when f.Uid is null then 'Auto'
else 'manual' end ),
sp.Vdt
'''.format(start_date, end_date)
network_wh_purchase_bhiwandi= pd.read_sql(q_7,cnxn)
mssql_ga = MSSql(connect_via_tunnel=False,db='Esdata_WS_2')
cnxn = mssql_ga.open_connection()
cursor = cnxn.cursor()
q_8='''select
'goodaid-warehouse' as "wh-name",
'PB' as "sub-type-1",
'WH' as "sub-type-2",
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end) as "sub-type-3",
CONVERT(int, i.Barcode) as "drug-id",
sp.acno as "distributor-id",
a."name" as "distributor-name",
(CASE when f.Uid is null then 'Auto'
else 'manual' end ) as "as-ms-pr",
CONVERT(date ,
sp.Vdt) as "approved-date",
SUM(sp.qty) as "net-quantity",
SUM(sp.netamt + sp.taxamt) as "net-value",
SUM(f1.mrp*sp.Qty) as "mrp-value",
SUM(sp.NetAmt+sp.Taxamt) as "wc-value"
from
salepurchase2 sp
left join item i on
sp.itemc = i.code
left join FIFO f1 on
(f1.Pbillno = sp.Pbillno
and f1.Psrlno = sp.Psrlno
and f1.Itemc = sp.Itemc
and f1.Vdt = sp.Vdt)
left join acm a on
sp.acno = a.code
left join (Select Uid,Vtyp,Vdt,Acno,Vno FROM Salepurchase1 sp1 where sp1.Vtyp ='PO' and sp1.Slcd ='SL') f
on (f.Vno=sp.RefVno and convert(date,sp.RefVdt) =convert(date,f.Vdt) and sp.Acno =f.acno)
where
sp.vtype = 'PB'
and sp.vdt >= '{}'
and sp.vdt <= '{}'
and sp.qty >0
and
isnumeric(i.Barcode) = 1
and i.barcode not like '%[^0-9]%'
group by
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end),
i.Barcode ,
sp.acno ,
a."name" ,
(CASE when f.Uid is null then 'Auto'
else 'manual' end ),
sp.Vdt
'''.format(start_date, end_date)
network_wh_purchase_goodaid= pd.read_sql(q_8,cnxn)
mssql_tepl = MSSql(connect_via_tunnel=False,db='Esdata_TEPL')
cnxn = mssql_tepl.open_connection()
cursor = cnxn.cursor()
q_9='''select
'tepl-warehouse' as "wh-name",
'PB' as "sub-type-1",
'WH' as "sub-type-2",
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end) as "sub-type-3",
CONVERT(int, i.Barcode) as "drug-id",
sp.acno as "distributor-id",
a."name" as "distributor-name",
(CASE when f.Uid is null then 'Auto'
else 'manual' end ) as "as-ms-pr",
CONVERT(date ,
sp.Vdt) as "approved-date",
SUM(sp.qty) as "net-quantity",
SUM(sp.netamt + sp.taxamt) as "net-value",
SUM(f1.mrp*sp.Qty) as "mrp-value",
SUM(sp.NetAmt+sp.Taxamt) as "wc-value"
from
salepurchase2 sp
left join item i on
sp.itemc = i.code
left join FIFO f1 on
(f1.Pbillno = sp.Pbillno
and f1.Psrlno = sp.Psrlno
and f1.Itemc = sp.Itemc
and f1.Vdt = sp.Vdt)
left join acm a on
sp.acno = a.code
left join (Select Uid,Vtyp,Vdt,Acno,Vno FROM Salepurchase1 sp1 where sp1.Vtyp ='PO' and sp1.Slcd ='SL') f
on (f.Vno=sp.RefVno and convert(date,sp.RefVdt) =convert(date,f.Vdt) and sp.Acno =f.acno)
where
sp.vtype = 'PB'
and sp.vdt >= '{}'
and sp.vdt <= '{}'
and sp.qty >0
and
isnumeric(i.Barcode) = 1
and i.barcode not like '%[^0-9]%'
group by
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end),
i.Barcode ,
sp.acno ,
a."name" ,
(CASE when f.Uid is null then 'Auto'
else 'manual' end ),
sp.Vdt
'''.format(start_date, end_date)
network_wh_purchase_tepl= pd.read_sql(q_9,cnxn)
network_wh_purchase=pd.concat([network_wh_purchase_bhiwandi,
network_wh_purchase_goodaid,network_wh_purchase_tepl],
sort=False,ignore_index=False)
network_wh_purchase[['drug-id']]= \
network_wh_purchase[['drug-id']].\
apply(pd.to_numeric, errors='ignore').astype('Int64')
network_wh_purchase=pd.merge(network_wh_purchase,drug_info,how='left',on='drug-id')
network_wh_purchase[['drug-type','drug-category', 'goodaid-flag']]=\
network_wh_purchase[['drug-type','drug-category', 'goodaid-flag']].\
fillna('NA')
network_wh_purchase[['net-quantity']]=\
network_wh_purchase[['net-quantity']].astype(np.int64)
network_wh_purchase=network_wh_purchase[['wh-name','sub-type-1',
'sub-type-2','sub-type-3',
'drug-id','drug-name','drug-type','drug-category',
'goodaid-flag','distributor-id',
'distributor-name','as-ms-pr','approved-date','net-quantity',
'net-value','mrp-value','wc-value']]
sale_purchase_all=pd.concat([store_sales,
network_dc_purchase,
network_lp_purchase,
network_wh_purchase,network_franchisee_sale],
sort=False,ignore_index=False)
sale_purchase_all[['drug-id', 'distributor-id']]= \
sale_purchase_all[['drug-id','distributor-id']].\
apply(pd.to_numeric, errors='ignore').astype('Int64')
sale_purchase_all[['net-quantity']]=sale_purchase_all[['net-quantity']].astype(np.int64)
sale_purchase_all[['net-value','mrp-value','wc-value']]=\
sale_purchase_all[['net-value','mrp-value','wc-value']].\
astype(np.float64)
created_at = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
sale_purchase_all['created-at']=datetime.datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
sale_purchase_all['updated-at']=datetime.datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
sale_purchase_all['created-by'] = 'etl-automation'
sale_purchase_all['updated-by'] = 'etl-automation'
sale_purchase_all.columns = [c.replace('_', '-') for c in sale_purchase_all.columns]
schema = "prod2-generico"
table_name = "purchase-sales-meta-drug-level"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
#truncating the last 5 days data
delete_q = """
delete
from
"prod2-generico"."purchase-sales-meta-drug-level"
where
date("approved-date") >= '{start_date_n}'
and date("approved-date") <= '{end_date_n}'
and "sub-type-1" in ('inventory', 'SB', 'PB')
""".format(start_date_n=start_date, end_date_n=end_date)
rs_db.execute(delete_q)
#Keep only Last one year data
delete_one_year='''
delete from "prod2-generico"."purchase-sales-meta-drug-level"
where date("approved-date")<=current_date -interval '12 months'
'''
rs_db.execute(delete_one_year)
s3.write_df_to_db(df=sale_purchase_all[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
status=True
if status==True:
script_status="Success"
else:
script_status="Failed"
email = Email()
#email.send_email_file(
# subject=f"purchase_sales
# start date: {start_date} end date: {end_date} {script_status}",
#mail_body=f"purchase_sales status: {script_status} ",
#to_emails=email_to)
email.send_email_file(subject=f"sale purchase report drug level for date {end_date}",
mail_body=f"PFA sale purchase data drug level for date {end_date} ",
to_emails=email_to)
rs_db.close_connection()
mssql.close_connection()
mssql_ga.close_connection()
mssql_tepl.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ops-data-query/purchase-sales-drug-level.py | purchase-sales-drug-level.py |
#@owner: [email protected]
#@Purpose: To find purchase sales ration at store and network level
import os
import sys
from dateutil.tz import gettz
import datetime
import argparse
import pandas as pd
import numpy as np
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.db.db import MSSql
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default=
"[email protected],[email protected],[email protected]",
type=str, required=False)
parser.add_argument('-sd', '--start_date', default='NA', type=str, required=False)
parser.add_argument('-ed', '--end_date', default='NA', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
start_date = args.start_date
end_date = args.end_date
os.environ['env'] = env
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
d = datetime.timedelta(days = 5)
start_dt=cur_date-d
end_dt = cur_date - datetime.timedelta(1)
if start_date == 'NA' and end_date == 'NA':
start_date = start_dt
end_date = end_dt
s3=S3()
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
#Store-sales
q_1='''select
'store' as "type-1",
s."store-id" as "entity-id" ,
s."store-name" as "entity-name" ,
'SB' as "sub-type-1",
'' as "sub-type-2",
'' as "sub-type-3",
s."type" as "drug-type",
s.category as "drug-category",
(case when s."company-id"=6984 then 'true'
else 'false' end) as "goodaid-flag",
(case when s."invoice-item-reference" is null then 76
else s."distributor-id" end ) as "distributor-id",
(case when s."invoice-item-reference" is null then 'Local Purchase'
else s."distributor-name" end ) as "distributor-name",
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) as "as-ms-pr",
date(s."created-at") as "approved-date",
SUM(s."net-quantity") as "net-quantity",
SUM(s."net-quantity"*s.rate) as "net-value"
from
"prod2-generico".sales s
left join (select * from (select
"invoice-item-id",
"short-book-id",
row_number() over(partition by "invoice-item-id"
order by
"short-book-id" desc) as count_new
from
"prod2-generico"."prod2-generico"."short-book-invoice-items" sbii ) a
where a.count_new=1 ) g on
s."invoice-item-reference" = g."invoice-item-id"
left join "prod2-generico"."prod2-generico"."short-book-1" sb on
g."short-book-id" = sb.id
where date(s."created-at")>='{}' and date(s."created-at")<='{}' and s."franchisee-id" =1
and s."store-b2b" ='Store'
group by
"type-1" ,
s."store-id" ,
s."store-name" ,
"sub-type-1",
"sub-type-2",
"sub-type-3",
s."type" ,
s.category ,
(case when s."company-id"=6984 then 'true'
else 'false' end) ,
(case when s."invoice-item-reference" is null then 76
else s."distributor-id" end ) ,
(case when s."invoice-item-reference" is null then 'Local Purchase'
else s."distributor-name" end ) ,
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) ,
date(s."created-at")
'''.format(start_date, end_date)
store_sales=rs_db.get_df(query=q_1)
#Store DC/WH Purchase
q_2='''
select
'store' as "type-1",
i."store-id" as "entity-id" ,
s."name" as "entity-name",
'PB' as "sub-type-1",
(case
when i."distributor-id" = 8105 then 'WH'
else 'DC' end) as "sub-type-2",
'' as "sub-type-3",
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) as "as-ms-pr",
date(i."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value"
from
"prod2-generico"."prod2-generico"."invoice-items" ii
left join "prod2-generico"."prod2-generico".invoices i on
ii."invoice-id" = i.id
left join "prod2-generico"."prod2-generico".stores s on
s.id = i."store-id"
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
left join (select * from (select
"invoice-item-id",
"short-book-id",
row_number() over(partition by "invoice-item-id"
order by
"short-book-id" desc) as count_new
from
"prod2-generico"."prod2-generico"."short-book-invoice-items" sbii ) a
where a.count_new=1 ) g on
ii.id = g."invoice-item-id"
left join "prod2-generico"."prod2-generico"."short-book-1" sb on
g."short-book-id" = sb.id
where
date(i."approved-at") >='{}' and date(i."approved-at") <='{}'
and s."franchisee-id" =1
group by
"type-1",
i."store-id" ,
s."name" ,
"sub-type-1",
(case
when i."distributor-id" = 8105 then 'WH'
else 'DC' end) ,
"sub-type-3",
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ),
date(i."approved-at")
'''.format(start_date, end_date)
store_dc_wh_purchase=rs_db.get_df(query=q_2)
#Store Local Purchase
q_3='''
select
'store' as "type-1",
i."store-id" as "entity-id" ,
s."name" as "entity-name",
'PB' as "sub-type-1",
'LP' as "sub-type-2",
'' as "sub-type-3",
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
'' as "as-ms-pr",
date(i."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value"
from
"prod2-generico"."prod2-generico"."invoice-items-1" ii
left join "prod2-generico"."prod2-generico"."invoices-1" i on ii."franchisee-invoice-id" =i.id
left join "prod2-generico"."prod2-generico".stores s on
s.id = i."store-id"
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
where
ii."invoice-item-reference" is null and s."franchisee-id" =1 and
date(i."approved-at") >='{}' and date(i."approved-at") <='{}'
group by
"type-1",
i."store-id" ,
s."name" ,
"sub-type-1",
"sub-type-2",
"sub-type-3",
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
"as-ms-pr",
date(i."approved-at")
'''.format(start_date, end_date)
store_lp_purchase=rs_db.get_df(query=q_3)
#Network Level
# DC Purchase
q_4='''
select
'network' as "type-1",
i."dc-id" as "entity-id" ,
s."name" as "entity-name",
'PB' as "sub-type-1",
'DC' as "sub-type-2",
'' as "sub-type-3",
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) as "as-ms-pr",
date(i."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value"
from
"prod2-generico"."prod2-generico"."invoice-items" ii
left join "prod2-generico"."prod2-generico".invoices i on
ii."invoice-id" = i.id
left join "prod2-generico"."prod2-generico".stores s on
i."dc-id" =s.id
left join "prod2-generico"."prod2-generico".stores s2 on
i."store-id" =s2.id
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
left join (select * from (select
"invoice-item-id",
"short-book-id",
row_number() over(partition by "invoice-item-id"
order by
"short-book-id" desc) as count_new
from
"prod2-generico"."prod2-generico"."short-book-invoice-items" sbii ) a
where a.count_new=1 ) g on
ii.id = g."invoice-item-id"
left join "prod2-generico"."prod2-generico"."short-book-1" sb on
g."short-book-id" = sb.id
where
date(i."approved-at") >='{}' and date(i."approved-at") <='{}'
and s2."franchisee-id" =1 and i."distributor-id" !=8105
group by
"type-1",
i."dc-id" ,
s."name" ,
"sub-type-1",
"sub-type-2",
"sub-type-3",
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
(case when sb."patient-id"=4480 and sb."auto-short"=1 then 'AS'
When sb."auto-short"=0 and sb."auto-generated"=0 and sb."patient-id"!=4480 then 'PR'
when sb."auto-short" =1 and sb."patient-id" !=4480 then 'MS'
else 'distributor-dump' end ) ,
date(i."approved-at")
'''.format(start_date, end_date)
network_dc_purchase=rs_db.get_df(query=q_4)
# Local purchase network
q_5='''
select
'network' as "type-1",
'' as "entity-id",
'' as "entity-name",
'PB' as "sub-type-1",
'LP' as "sub-type-2",
'' as "sub-type-3",
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
'' as "as-ms-pr",
date(i."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value"
from
"prod2-generico"."prod2-generico"."invoice-items-1" ii
left join "prod2-generico"."prod2-generico"."invoices-1" i on ii."franchisee-invoice-id" =i.id
left join "prod2-generico"."prod2-generico".stores s on
s.id = i."store-id"
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
where
ii."invoice-item-reference" is null and s."franchisee-id" =1 and
date(i."approved-at") >='{}' and date(i."approved-at") <='{}'
group by
"type-1",
"entity-id" ,
"entity-name" ,
"sub-type-1",
"sub-type-2",
"sub-type-3",
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
date(i."approved-at")
'''.format(start_date, end_date)
network_lp_purchase=rs_db.get_df(query=q_5)
# Sale to Franchisee
q_6='''
select
'network' as "type-1",
s."franchisee-id" as "entity-id" ,
f."name" as "entity-name",
'SB' as "sub-type-1",
'Franchisee' as "sub-type-2",
(case
when i."distributor-id" = 8105 then 'WH'
else 'DC' end) as "sub-type-3",
d."type" as "drug-type",
d.category as "drug-category",
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
i."distributor-id" ,
d2."name" as "distributor-name",
'' as "as-ms-pr",
date(i1."approved-at") as "approved-date",
SUM(ii."actual-quantity") as "net-quantity",
SUM(ii."net-value") as "net-value"
from
"prod2-generico"."prod2-generico"."invoice-items-1" ii
left join "prod2-generico"."prod2-generico"."invoices-1" i1 on ii."franchisee-invoice-id" =i1.id
left join "prod2-generico"."prod2-generico".invoices i on
ii."invoice-id" =i.id
left join "prod2-generico"."prod2-generico".stores s on
i1."store-id" =s.id
left join "prod2-generico"."prod2-generico".franchisees f
on s."franchisee-id" =f.id
left join "prod2-generico"."prod2-generico".drugs d on
ii."drug-id" = d.id
left join "prod2-generico"."prod2-generico".distributors d2 on
i."distributor-id" =d2.id
where
date(i1."approved-at") >='{}' and date(i1."approved-at") <='{}'
and s."franchisee-id" !=1 and i1."franchisee-invoice" =0
group by
"type-1",
s."franchisee-id" ,
f."name" ,
"sub-type-1",
"sub-type-2",
(case
when i."distributor-id" = 8105 then 'WH'
else 'DC' end),
d."type" ,
d.category ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end),
i."distributor-id" ,
d2."name" ,
date(i1."approved-at");
'''.format(start_date, end_date)
network_franchisee_sale=rs_db.get_df(query=q_6)
#Drug INFO
# drug info
drug_info = '''
select
d.id as "drug-id",
d."type" as "drug-type" ,
d."category" as "drug-category",
(case when d."company-id" =6984 then 'true'
else 'false' end) as "goodaid-flag"
from
"prod2-generico".drugs d
'''
drug_info = rs_db.get_df(query=drug_info)
# Warehouse purchase network
# Bhiwandi Warehouse
mssql_bhw = MSSql(connect_via_tunnel=False)
cnxn = mssql_bhw.open_connection()
cursor = cnxn.cursor()
q_7='''
select
'network' as "type-1",
199 as "entity-id" ,
'bhiwandi-warehouse' as "entity-name",
'PB' as "sub-type-1",
'WH' as "sub-type-2",
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end) as "sub-type-3",
i.Barcode as "drug-id",
sp.acno as "distributor-id",
a."name" as "distributor-name",
(CASE when f.Uid is null then 'Auto'
else 'manual' end ) as "as-ms-pr",
CONVERT(date ,
sp.Vdt) as "approved-date",
SUM(sp.qty) as "net-quantity",
SUM(sp.netamt + sp.taxamt) as "net-value"
from
salepurchase2 sp
left join item i on
sp.itemc = i.code
left join acm a on
sp.acno = a.code
left join (Select Uid,Vtyp,Vdt,Acno,Vno FROM Salepurchase1 sp1 where sp1.Vtyp ='PO' and sp1.Slcd ='SL') f
on (f.Vno=sp.RefVno and convert(date,sp.RefVdt) =convert(date,f.Vdt) and sp.Acno =f.acno)
where
sp.vtype = 'PB'
and sp.vdt >= '{}'
and sp.vdt <= '{}'
and sp.qty >0
and
isnumeric(i.Barcode) = 1
and i.barcode not like '%[^0-9]%' and sp.Acno not IN (59489)
group by
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end),
i.Barcode ,
sp.acno ,
a."name" ,
(CASE when f.Uid is null then 'Auto'
else 'manual' end ),
sp.Vdt;
'''.format(start_date, end_date)
network_wh_purchase_bhiwandi= pd.read_sql(q_7,cnxn)
# GOODAID Warehouse
mssql_ga = MSSql(connect_via_tunnel=False,db='Esdata_WS_2')
cnxn = mssql_ga.open_connection()
cursor = cnxn.cursor()
q_8='''
select
'network' as "type-1",
343 as "entity-id" ,
'goodaid-warehouse' as "entity-name",
'PB' as "sub-type-1",
'WH' as "sub-type-2",
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end) as "sub-type-3",
i.Barcode as "drug-id",
sp.acno as "distributor-id",
a."name" as "distributor-name",
(CASE when f.Uid is null then 'Auto'
else 'manual' end ) as "as-ms-pr",
CONVERT(date ,
sp.Vdt) as "approved-date",
SUM(sp.qty) as "net-quantity",
SUM(sp.netamt + sp.taxamt) as "net-value"
from
salepurchase2 sp
left join item i on
sp.itemc = i.code
left join acm a on
sp.acno = a.code
left join (Select Uid,Vtyp,Vdt,Acno,Vno FROM Salepurchase1 sp1 where sp1.Vtyp ='PO' and sp1.Slcd ='SL') f
on (f.Vno=sp.RefVno and convert(date,sp.RefVdt) =convert(date,f.Vdt) and sp.Acno =f.acno)
where
sp.vtype = 'PB'
and sp.vdt >= '{}'
and sp.vdt <= '{}'
and sp.qty >0 and
isnumeric(i.Barcode) = 1
and i.barcode not like '%[^0-9]%'
group by
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end),
i.Barcode ,
sp.acno ,
a."name" ,
(CASE when f.Uid is null then 'Auto'
else 'manual' end ),
sp.Vdt
'''.format(start_date, end_date)
network_wh_purchase_goodaid= pd.read_sql(q_8,cnxn)
mssql_tepl = MSSql(connect_via_tunnel=False,db='Esdata_TEPL')
cnxn = mssql_tepl.open_connection()
cursor = cnxn.cursor()
q_9='''
select
'network' as "type-1",
342 as "entity-id" ,
'tepl-warehouse' as "entity-name",
'PB' as "sub-type-1",
'WH' as "sub-type-2",
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end) as "sub-type-3",
i.Barcode as "drug-id",
sp.acno as "distributor-id",
a."name" as "distributor-name",
(CASE when f.Uid is null then 'Auto'
else 'manual' end ) as "as-ms-pr",
CONVERT(date ,
sp.Vdt) as "approved-date",
SUM(sp.qty) as "net-quantity",
SUM(sp.netamt + sp.taxamt) as "net-value"
from
salepurchase2 sp
left join item i on
sp.itemc = i.code
left join acm a on
sp.acno = a.code
left join (Select Uid,Vtyp,Vdt,Acno,Vno FROM Salepurchase1 sp1 where sp1.Vtyp ='PO' and sp1.Slcd ='SL') f
on (f.Vno=sp.RefVno and convert(date,sp.RefVdt) =convert(date,f.Vdt) and sp.Acno =f.acno)
where
sp.vtype = 'PB'
and sp.vdt >= '{}'
and sp.vdt <= '{}'
and sp.qty >0 and
isnumeric(i.Barcode) = 1
and i.barcode not like '%[^0-9]%'
group by
(CASE
when sp.vno>0 then 'barcoded'
else 'non-barcoded'
end),
i.Barcode ,
sp.acno ,
a."name" ,
(CASE when f.Uid is null then 'Auto'
else 'manual' end ),
sp.Vdt
'''.format(start_date, end_date)
network_wh_purchase_tepl= pd.read_sql(q_9,cnxn)
network_wh_purchase=\
pd.concat([network_wh_purchase_bhiwandi,network_wh_purchase_goodaid,network_wh_purchase_tepl],
sort=False,ignore_index=False)
network_wh_purchase[['drug-id']]= network_wh_purchase[['drug-id']].\
apply(pd.to_numeric, errors='ignore').\
astype('Int64')
network_wh_purchase=pd.merge(network_wh_purchase,drug_info,how='left',on='drug-id')
network_wh_purchase[['drug-type','drug-category', 'goodaid-flag']]=\
network_wh_purchase[['drug-type','drug-category', 'goodaid-flag']].\
fillna('NA')
network_wh_purchase=network_wh_purchase.drop(['drug-id'],axis=1)
network_wh_purchase[['net-quantity']]=network_wh_purchase[['net-quantity']].astype(np.int64)
network_wh_purchase=network_wh_purchase.\
groupby(['type-1','entity-id','entity-name','sub-type-1',
'sub-type-2','sub-type-3','drug-type','drug-category',
'goodaid-flag','distributor-id',
'distributor-name','as-ms-pr','approved-date']).sum()
network_wh_purchase=network_wh_purchase.reset_index()
network_wh_purchase[['entity-id']]=network_wh_purchase[['entity-id']].replace(0, np.nan)
network_wh_purchase=network_wh_purchase[['type-1','entity-id','entity-name','sub-type-1',
'sub-type-2','sub-type-3','drug-type','drug-category',
'goodaid-flag','distributor-id',
'distributor-name','as-ms-pr','approved-date','net-quantity',
'net-value']]
sale_purchase_all=pd.concat([store_sales,store_dc_wh_purchase,store_lp_purchase,
network_dc_purchase,
network_lp_purchase,network_wh_purchase,network_franchisee_sale],
sort=False,ignore_index=False)
sale_purchase_all[['entity-id', 'distributor-id']]= \
sale_purchase_all[['entity-id','distributor-id']].\
apply(pd.to_numeric, errors='ignore').astype('Int64')
sale_purchase_all[['net-quantity']]=sale_purchase_all[['net-quantity']].astype(np.int64)
sale_purchase_all[['net-value']]=sale_purchase_all[['net-value']].astype(np.float64)
#code for daily investor mail
sale_purchase_investor=sale_purchase_all[sale_purchase_all["approved-date"]==end_date]
sale_purchase_investor=sale_purchase_investor[sale_purchase_investor["type-1"]=='store']
sale_purchase_investor=sale_purchase_investor[sale_purchase_investor["sub-type-1"]=='PB']
sale_purchase_investor=sale_purchase_investor.\
drop(["type-1","sub-type-1","sub-type-3","drug-type",
"drug-category","goodaid-flag","distributor-id",
"distributor-name","as-ms-pr","net-quantity"],axis=1)
sale_purchase_investor=sale_purchase_investor.groupby(['entity-id','entity-name',
'sub-type-2',
'approved-date']).sum()
sale_purchase_investor = pd.pivot_table(sale_purchase_investor,
values='net-value',
index=['entity-id', 'entity-name','approved-date'],
columns=['sub-type-2']).reset_index()
sale_purchase_investor=sale_purchase_investor.fillna(0)
sale_purchase_investor=sale_purchase_investor.reset_index()
sale_purchase_investor=sale_purchase_investor.drop(["index"],axis=1)
sale_purchase_file_name = 'purchase_sale/sale_purchase_report_{}.csv'.format(end_date)
sale_purchase_uri = s3.save_df_to_s3(df=sale_purchase_investor, file_name=sale_purchase_file_name)
created_at = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
sale_purchase_all['created-at']=datetime.datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
sale_purchase_all['updated-at']=datetime.datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
sale_purchase_all['created-by'] = 'etl-automation'
sale_purchase_all['updated-by'] = 'etl-automation'
sale_purchase_all.columns = [c.replace('_', '-') for c in sale_purchase_all.columns]
schema = "prod2-generico"
table_name = "purchase-sales-meta"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
#truncating the last 5 days data
delete_q = """
DELETE
FROM
"prod2-generico"."purchase-sales-meta"
WHERE
date("approved-date") >= '{start_date_n}'
and date("approved-date") <= '{end_date_n}'
""".format(start_date_n=start_date, end_date_n=end_date)
rs_db.execute(delete_q)
#Keep only Last one year data
delete_one_year='''
delete from "prod2-generico"."purchase-sales-meta"
where date("approved-date")<=current_date -interval '12 months'
'''
rs_db.execute(delete_one_year)
s3.write_df_to_db(df=sale_purchase_all[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
status=True
if status==True:
script_status="Success"
else:
script_status="Failed"
email = Email()
#email.send_email_file(subject=f"purchase_sales
# start date: {start_date} end date: {end_date} {script_status}",
#mail_body=f"purchase_sales status: {script_status} ",
#to_emails=email_to)
email.send_email_file(subject=f"sale purchase report for date {end_date}",
mail_body=f"PFA sale purchase data for date {end_date} ",
to_emails=email_to, file_uris=[sale_purchase_uri])
rs_db.close_connection()
mssql_bhw.close_connection()
mssql_ga.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ops-data-query/ops-data-query.py | ops-data-query.py |
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.fcst_performance.helper_functions import get_store_ids, \
handle_multiple_resets
from zeno_etl_libs.utils.fcst_performance.get_data import GetData
from zeno_etl_libs.utils.fcst_performance.data_operations import cal_fields_store_drug_level
def main(debug_mode, days_to_replenish, days_delta, reset_date, exclude_stores,
rs_db_read, rs_db_write, read_schema, write_schema):
s3 = S3()
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# Get store_ids with corresponding store_type-map for above reset date
store_ids, store_type_map = get_store_ids(reset_date, exclude_stores,
rs_db_read, read_schema)
try:
if store_ids:
logger.info(f"Store IDs to perform calculations: {store_ids}")
# Initialize get_data class object
get_data = GetData(store_ids, reset_date, days_to_replenish, days_delta,
rs_db_read, read_schema, logger)
logger.info(f"Fetching required data for all stores")
# Get required raw data for all stores and group by store_id
df_inv_comb = get_data.curr_inv().groupby("store_id")
df_sales_comb = get_data.sales_28day().groupby("store_id")
df_pr_loss_comb = get_data.pr_loss_28day().groupby("store_id")
df_3m_sales_comb = get_data.sales_3m().groupby("store_id")
df_sdl_combined = []
for index, store_id in enumerate(store_ids):
print(f"Calculations started for store_id: {store_id}")
logger.info(f"Calculations started for store_id: {store_id}")
store_type = store_type_map[index]
# Get uploaded_at cut_off_condition if multiple resets happened
sql_cut_off_condition = handle_multiple_resets(reset_date, store_id,
store_type, rs_db_read,
read_schema, logger)
if store_type == "ipc":
df_ss = get_data.ipc_ss(store_id, sql_cut_off_condition)
elif store_type == "non_ipc":
df_ss = get_data.non_ipc_ss(store_id, sql_cut_off_condition)
else:
df_ss = get_data.ipc2_ss(store_id, sql_cut_off_condition)
# Get store level data from grouped data
df_inv = df_inv_comb.get_group(store_id)
df_sales = df_sales_comb.get_group(store_id)
df_pr_loss = df_pr_loss_comb.get_group(store_id).groupby(
"drug_id")["pr_loss"].sum().reset_index()
df_3m_sales = df_3m_sales_comb.get_group(store_id)
# Get store-drug level forecast performance table
logger.info("Creating store-drug level table")
df_sdl = cal_fields_store_drug_level(df_ss, df_inv, df_sales,
df_pr_loss, df_3m_sales)
df_sdl_combined.append(df_sdl)
df_store_drug_lvl = pd.concat(df_sdl_combined)
logger.info("All calculations completed")
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
df_store_drug_lvl['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_store_drug_lvl['created-by'] = 'etl-automation'
df_store_drug_lvl['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_store_drug_lvl['updated-by'] = 'etl-automation'
df_store_drug_lvl.columns = [c.replace('_', '-') for c in
df_store_drug_lvl.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='forecast-performance-store-drug-level',
schema=write_schema)
columns = list(table_info['column_name'])
df_store_drug_lvl = df_store_drug_lvl[columns] # required column order
logger.info("Writing to table: forecast-performance-store-drug-level")
s3.write_df_to_db(df=df_store_drug_lvl,
table_name='forecast-performance-store-drug-level',
db=rs_db_write, schema=write_schema)
else:
logger.info("Writing to RS-DB skipped")
else:
logger.info("No Stores to evaluate")
status = 'Success'
logger.info(f"Forecast performance code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Forecast performance code execution status: {status}")
return status, store_ids
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str,
required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-dtr', '--days_to_replenish', default="4", type=str,
required=False)
parser.add_argument('-dd', '--days_delta', default="28", type=str,
required=False)
parser.add_argument('-rd', '--reset_date', default="YYYY-MM-DD",
type=str, required=False)
parser.add_argument('-exs', '--exclude_stores', default="282,283,293,291,295,299,303,302,298,316,311,313",
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
# JOB EXCLUSIVE PARAMS
days_to_replenish = int(args.days_to_replenish)
days_delta = int(args.days_delta)
reset_date = args.reset_date
exclude_stores = args.exclude_stores.replace(" ", "").split(",")
logger = get_logger()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# Default value: days_to_replenish = 4, days_delta = 28
if days_delta < 28:
logger.info(f"INPUT: days_delta = {days_delta} not acceptable, changing to 28")
days_delta = 28
# convert sting store_ids to int
exclude_stores = [int(i) for i in exclude_stores]
# Reset date to look for in ss_table
if reset_date == 'YYYY-MM-DD':
reset_date = dt.date.today() - dt.timedelta(days_to_replenish + days_delta)
logger.info(f"Store reset date selected: {reset_date}")
else:
reset_date = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
logger.info(f"INPUT: Store reset date: {reset_date}")
if (dt.date.today() - reset_date).days < (days_delta + days_to_replenish):
logger.info("Reset date too close, reverting to default")
reset_date = dt.date.today() - dt.timedelta(days_to_replenish + days_delta)
logger.info(f"Store reset date selected: {reset_date}")
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
""" calling the main function """
status, store_ids = main(
debug_mode, days_to_replenish, days_delta, reset_date, exclude_stores,
rs_db_read, rs_db_write, read_schema, write_schema)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Fcst Performance Job (GLUE-{env}) {str(dt.date.today())}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Job Params: {args}
Reset Date Evaluated: {str(reset_date)}
Store ID's Evaluated: {store_ids}
""",
to_emails=email_to)
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/fcst_performance/fcst_performance_main.py | fcst_performance_main.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
def main(db):
table_name = "sb-sla"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"etl-created-by",
"updated-by",
"updated-at",
"patient-id",
"store-name",
"drug-name",
"as-ms-pr",
"round",
"distributor-flag",
"invoice-tat",
"dispatch-tat",
"delivery-tat",
"re-order-tat",
"ordered-tat",
"status",
"requested-quantity",
"quantity",
"required-quantity",
"inventory-at-creation",
"inventory-at-ordering",
"created-at",
"year-created-at",
"month-created-at",
"ordered-time",
"invoiced-at",
"dispatched-at",
"delivered-at",
"completed-at",
"re-ordered-at",
"store-delivered-at",
"decline-reason",
"type",
"store-id",
"drug-id",
"company",
"company-id",
"composition",
"composition-master-id",
"category",
"schedule",
"sub-type",
"preferred-distributor-id",
"preferred-distributor-name",
"drug-grade",
"purchase-rate",
"ptr",
"distributor-type",
"recieved-distributor-id",
"received-distributor-name",
"forward-dc-id",
"dc-name",
"abo",
"line-manager",
"store-manager",
"city",
"store-b2b",
"franchisee-short-book",
"auto-check",
"sla-id"
)
select
a.id,
'etl-automation' as "etl-created-by",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
a."patient-id" as "patient-id",
b."name" as "store-name",
a."drug-name" as "drug-name",
(case
when a."auto-short" = 1
and a."created-by" = 'AUTO SHORT'
and a."patient-id" = 4480 then 'as'
when a."auto-short" = 1
and a."patient-id" != 4480 then 'ms'
else 'pr'
end) as "as-ms-pr",
(case
when extract('hour'
from
a."created-at")<14 then 1
when (extract('hour'
from
a."created-at")>= 14
and extract('hour'
from
a."created-at")<23) then 2
else 3
end) as "round" ,
(case
when a."distributor-id" = 8105 then 'wh'
else 'dc'
end) as "distributor-flag" ,
--Fulfillment on Invoice
(case when (date(a."invoiced-at") = '0101-01-01'
or date(a."created-at") = '0101-01-01') then null
else dateadd(hour, tat."invoice-time",(dateadd(day, tat."invoice-date", date(a."created-at"))))
end) as "invoice-tat",
--Fulfillment on dispatch
(case when (date(a."dispatched-at") = '0101-01-01'
or date(a."created-at") = '0101-01-01') then null
else dateadd(hour, tat."dispatch-time",(dateadd(day, tat."dispatch-date", date(a."created-at"))))
end) as "dispatch-tat",
--Fulfillment on delivery
(case when (date(msda."store-delivered-at") = '0101-01-01'
or date(a."created-at") = '0101-01-01') then null
else dateadd(hour, tat."delivery-time",(dateadd(day, tat."delivery-date", date(a."created-at"))))
end) as "delivery-tat",
-- Re-order Timing --
(case when (date(a."re-ordered-at") = '0101-01-01'
or date(a."created-at") = '0101-01-01') then null
else dateadd(hour, tat."reorder-time",(dateadd(day, tat."reorder-date", date(a."created-at"))))
end) as "re-order-tat",
--order Timing--
(case when (date(a."ordered-at") = '0101-01-01'
or date(a."created-at") = '0101-01-01') then null
else dateadd(hour, tat."reorder-time",(dateadd(day, tat."reorder-date", date(a."created-at"))))
end) as "ordered-tat",
a."status" as "status",
a."requested-quantity" as "requested-quantity",
a."quantity" as "quantity",
a."required-quantity" as "required-quantity",
a."inventory-at-creation" as "inventory-at-creation" ,
a."inventory-at-ordering" as "inventory-at-ordering",
case
when a."created-at" = '0101-01-01' then null
else a."created-at"
end as "created-at",
extract(year
from
a."created-at") as "year-created-at",
extract(month
from
a."created-at") as "month-created-at",
case
when date(a."ordered-at") = '0101-01-01' then null
else a."ordered-at"
end as "ordered-time",
case
when date(a."invoiced-at") = '0101-01-01' then null
else a."invoiced-at"
end as "invoiced-at",
case
when date(a."dispatched-at") = '0101-01-01' then null
else a."dispatched-at"
end as "dispatched-at",
case
when date(a."delivered-at") = '0101-01-01' then null
else a."delivered-at"
end as "delivered-at",
case
when date(a."completed-at") = '0101-01-01' then null
else a."completed-at"
end as "completed-at",
case
when date(a."re-ordered-at") = '0101-01-01' then null
else a."re-ordered-at"
end as "re-ordered-at",
case
when date(msda."store-delivered-at") = '0101-01-01' then null
else msda."store-delivered-at"
end as "store-delivered-at",
a."decline-reason" as "decline-reason",
c."type",
a."store-id" as "store-id",
a."drug-id" as "drug-id",
c."company",
c."company-id" as "company-id" ,
c."composition" ,
c."composition-master-id" as "composition-master-id" ,
c."category" ,
c."schedule" ,
c."sub-type" as "sub-type" ,
f."id" as "preferred-distributor-id",
f."name" as "preferred-distributor-name",
e."drug-grade" as "drug-grade",
dp."purchase-rate" as "purchase-rate",
dp."ptr",
d."type" as "distributor-type",
d."id" as "recieved-distributor-id",
d."name" as "received-distributor-name",
j."forward-dc-id" as "forward-dc-id",
ss."name" as "dc-name",
msm."abo" ,
msm."line-manager" ,
msm."store-manager" ,
msm."city",
msm."store-b2b",
a."franchisee-short-book" as "franchisee-short-book",
a."auto-check",
tat."sla-id"
from
"prod2-generico"."short-book-1" a
left join "prod2-generico"."stores" b on
b."id" = a."store-id"
left join "prod2-generico"."drugs" c on
c."id" = a."drug-id"
left join (
select
"drug-id", AVG("purchase-rate") as "purchase-rate", AVG(ptr) as "ptr"
from
"prod2-generico"."inventory-1" i
where
"created-at" >= dateadd(day,
-360,
CURRENT_DATE)
group by
"drug-id") as dp on
a."drug-id" = dp."drug-id"
left join "prod2-generico"."distributors" d on
d."id" = a."distributor-id"
left join "prod2-generico"."drug-order-info" e on
e."store-id" = a."store-id"
and e."drug-id" = a."drug-id"
left join "prod2-generico"."distributors" f on
a."preferred-distributor" = f."id"
left join (
select
*
from
"prod2-generico"."store-dc-mapping"
where
"drug-type" = 'ethical') j on
j."store-id" = a."store-id"
left join "prod2-generico"."stores" ss on
ss."id" = j."forward-dc-id"
left join "prod2-generico"."store-delivered" msda on
a."id" = msda."id"
left join "prod2-generico"."stores-master" msm on
a."store-id" = msm.id
left join "prod2-generico"."tat-sla" tat on
(case
when extract('hour'
from
a."created-at")<14 then 1
when (extract('hour'
from
a."created-at")>= 14
and extract('hour'
from
a."created-at")<23) then 2
else 3
end) = tat.round
and
(case
when a."distributor-id" = 8105 then 'wh'
else 'dc'
end) = tat."distributor-type"
and
(case
when a."auto-short" = 1
and a."created-by" = 'AUTO SHORT'
and a."patient-id" = 4480 then 'as_ms'
when a."auto-short" = 1
and a."patient-id" != 4480 then 'as_ms'
else 'pr'
end) = tat."as-ms-pr-flag"
and datepart(weekday,
a."created-at") = tat.day
and a."store-id" = tat."store-id"
where a."status" not in ('deleted');
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sb-sla/sb-sla.py | sb-sla.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
from zeno_etl_libs.helper.websocket.websocket import Websocket
import json
from datetime import datetime, timedelta
import argparse
import pandas as pd
import numpy as np
import time
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-st', '--start1', default="NULL", type=str, required=False)
parser.add_argument('-ed', '--start2', default="NULL", type=str, required=False)
parser.add_argument('-ct', '--cluster_to_exclude_if_blank_none', default="NULL", type=str, required=False)
parser.add_argument('-wt', '--write_to_mysql', default="1", type=str, required=False)
parser.add_argument('-ah', '--api_hit', default="1", type=str, required=False)
parser.add_argument('-rfm', '--read_from_mysql', default="0", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
start1 = args.start1
start2 = args.start2
cluster_to_exclude_if_blank_none = args.cluster_to_exclude_if_blank_none
write_to_mysql = args.write_to_mysql
api_hit = args.api_hit
read_from_mysql = args.read_from_mysql
if int(read_from_mysql) == 1:
read_from_mysql = True
else:
read_from_mysql = False
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# Reason for using Mysql read - Just after writing We want to hit API with Incremetally added ID
mysql_read = MySQL()
mysql_read.open_connection()
s3 = S3()
ws = Websocket()
start_time = datetime.now(tz=gettz('Asia/Kolkata'))
today_date = start_time.strftime('%Y-%m-%d')
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("write_to_mysql- " + write_to_mysql)
logger.info("api_hit- " + api_hit)
logger.info("read_from_mysql- " + str(read_from_mysql))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
code_started_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
code_started_at_datetime = datetime.now(tz=gettz('Asia/Kolkata'))
# =============================================================================
# set parameters
# =============================================================================
if start1 == "NULL" and start2 == "NULL":
# pick orders from to last night 2030 to today morning 0800
logger.info("Read automated dates")
if datetime.now(tz=gettz('Asia/Kolkata')).strftime('%H:%M:%S') < '09:30:00':
start1 = (datetime.now(tz=gettz('Asia/Kolkata')) -
timedelta(days=1)).strftime('%Y-%m-%d 19:00:00')
start2 = (datetime.now(tz=gettz('Asia/Kolkata'))).strftime('%Y-%m-%d 09:00:00')
logger.info("start1 {}".format(start1))
logger.info("start2 {}".format(start2))
logger.info("")
else:
if env == 'dev':
def ceil_dt(dt, delta):
return dt + (datetime(1, 1, 1, 0, 0, tzinfo=gettz('Asia/Calcutta')) - dt) % delta
else:
def ceil_dt(dt, delta):
return dt + (datetime(1, 1, 1, 0, 0, 0, tzinfo=gettz('Asia/Calcutta')) - dt) % delta + timedelta(
minutes=23, seconds=20)
start2 = (ceil_dt(datetime.now(tz=gettz('Asia/Kolkata')), timedelta(minutes=-30)))
# If startup time is more than 6.40 minutes, ceil_dt gives output of next timeinterval
if start2>code_started_at_datetime:
start2 = start2 + timedelta( minutes=-30, seconds=0)
start1 = (start2 - timedelta(minutes=30)).strftime('%Y-%m-%d %H:%M:%S')
start2 = start2.strftime('%Y-%m-%d %H:%M:%S')
logger.info("start1 {}".format(start1))
logger.info("start2 {}".format(start2))
logger.info("")
else:
start1 = start1
start2 = start2
logger.info("Read manual dates")
logger.info("start1 {}".format(start1))
logger.info("start2 {}".format(start2))
# start1 = '2022-07-07 08:00:00'
# start2 = '2022-07-07 10:00:00'
# Writng this function so that we can get list of stores irrespective of input format in parameter
def fetch_number(list):
list2 = []
for i in list:
try:
int(i)
list2.append(int(i))
except:
pass
return list2
if cluster_to_exclude_if_blank_none == "NULL":
logger.info('Missing parameter for cluster exclusion, Taking all cluster')
cluster_to_exclude_if_blank_none = []
else:
cluster_to_exclude_if_blank_none = cluster_to_exclude_if_blank_none
cluster_to_exclude_if_blank_none = fetch_number(cluster_to_exclude_if_blank_none[1:-1].split(','))
logger.info('read parameters for cluster exclusion, cluster id to exclude are - {}'.format(
cluster_to_exclude_if_blank_none))
# =============================================================================
# store clusters
# =============================================================================
if read_from_mysql:
qc = """
select
sf.`feature-id`,
f.feature,
sf.`store-id`,
sf.`is-active`,
sc.`cluster-id`
from
features f
join `store-features` sf on
f.id = sf.`feature-id`
join `store-clusters` sc on
sc.`store-id` = sf.`store-id`
where
sf.`feature-id` = 69
and sf.`is-active` = 1
and sc.`is-active` = 1
"""
store_clusters = pd.read_sql_query(qc, mysql_read.connection)
else:
qc = """
select
sf."feature-id",
f.feature,
sf."store-id",
sf."is-active",
sc."cluster-id"
from
"prod2-generico".features f
join "prod2-generico"."store-features" sf on
f.id = sf."feature-id"
join "prod2-generico"."store-clusters" sc on
sc."store-id" = sf."store-id"
where
sf."feature-id" = 69
and sf."is-active" = 1
and sc."is-active" = 1
"""
store_clusters = rs_db.get_df(qc)
cluster_fullfilment_final = pd.DataFrame()
orders_raw = pd.DataFrame()
cluster_list = list(set(store_clusters['cluster-id'].unique()) - set(cluster_to_exclude_if_blank_none))
for cluster in cluster_list:
logger.info("")
logger.info("cluster {}".format(cluster))
temp = store_clusters[store_clusters['cluster-id'] == cluster]
cluster_stores = tuple(map(int, list(temp['store-id'].unique())))
# cluster_stores = tuple(map(int, list([2, 4, 7, 8, 230, 244, 264])))
logger.info("cluster stores {}".format(cluster_stores))
logger.info("")
summ_data = pd.DataFrame()
for i in cluster_stores:
logger.info("running for store {}".format(i))
logger.info("")
# analysis_store = tuple(map(int, list([i])))
analysis_store = i
# analysis_cluster = tuple(map(int, [x for x in cluster_stores if x != i]))
analysis_cluster = cluster_stores
# for manual run
# i = 8
# analysis_store = tuple(map(int, list([i])))
# analysis_cluster = tuple(map(int, [x for x in cluster_stores if x != i]))
# =============================================================================
# Fetch open PSOs for selected time period
# =============================================================================
if read_from_mysql:
orde = """
select
pso.`order-number`,
pso.`patient-request-id`,
pso.`zeno-order-id` ,
pso.`patient-id` ,
pso.id as `pso-id`,
pso.`order-source` ,
pso.`order-type` ,
pso.`status`,
pso.`created-at`,
pso.`store-id` ,
s.`name` as `store-name`,
pso.`drug-id` ,
pso.`drug-name` ,
pso.`requested-quantity`,
pso.`inventory-quantity` as `inventory-at-creation`,
pr.`required-quantity`,
pr.`quantity-to-order`
from
`prod2-generico`.`patients-store-orders` pso
left join `prod2-generico`.`patient-requests` pr on
pso.`patient-request-id` = pr.id
join `prod2-generico`.`stores` s on
s.`id` = pso.`store-id`
where
pr.`created-at` > '{start1}'
and pr.`created-at` <= '{start2}'
and pso.`store-id` = {analysis_store}
and pso.status not in ('billed', 'completed')
order by
pso.`created-at` DESC
""".format(start1=start1, start2=start2, analysis_store=analysis_store)
orders = pd.read_sql_query(orde, mysql_read.connection)
else:
orde = """
select
pso."order-number",
pso."patient-request-id",
pso."zeno-order-id" ,
pso."patient-id" ,
pso.id as "pso-id",
pso."order-source" ,
pso."order-type" ,
pso."status",
pso."created-at",
pso."store-id" ,
s."name" as "store-name",
pso."drug-id" ,
pso."drug-name" ,
pso."requested-quantity",
pso."inventory-quantity" as "inventory-at-creation",
pr."required-quantity",
pr."quantity-to-order"
from
"prod2-generico"."patients-store-orders" pso
left join "prod2-generico"."patient-requests" pr on
pso."patient-request-id" = pr.id
join "prod2-generico"."stores" s on s."id" = pso."store-id"
where
pr."created-at" > '{start1}'
and pr."created-at" <= '{start2}'
and pso."store-id" = {analysis_store}
and pso.status not in ('billed', 'completed')
order by pso."created-at" DESC;
""".format(start1=start1, start2=start2, analysis_store=analysis_store)
orders = rs_db.get_df(orde)
orders = orders[~orders['drug-id'].isnull()]
# =============================================================================
# cluster inventory
# =============================================================================
drugs = tuple(map(int, list(orders['drug-id'].unique())))
if len(drugs) < 2:
drugs = drugs + (0, 0)
if read_from_mysql:
q_inv = """
select
i.`store-id`,
s.`name` as `store-name`,
i.`drug-id`,
sum(i.`quantity`) as `ci`
from
`prod2-generico`.`inventory-1` i
join `prod2-generico`.stores s on
i.`store-id` = s.`id`
where
i.`store-id` in {cluster_stores}
and i.`drug-id` in {drugs}
and i.`quantity` > 0
and i.`expiry` > (NOW() + INTERVAL 90 DAY)
group by
i.`store-id`,
s.`name`,
i.`drug-id`;
""".format(cluster_stores=cluster_stores, drugs=drugs)
df_inv = pd.read_sql_query(q_inv, mysql_read.connection)
else:
q_inv = """
select
i."store-id",
s."name" as "store-name",
i."drug-id",
sum(i."quantity") as "ci"
from
"prod2-generico"."inventory-1" i
join "prod2-generico".stores s on
i."store-id" = s."id"
where
i."store-id" in {cluster_stores}
and i."drug-id" in {drugs}
and i."quantity" > 0
and i."expiry" > dateadd(day,90,getdate())
group by
i."store-id",
s."name",
i."drug-id";
""".format(cluster_stores=cluster_stores, drugs=drugs)
df_inv = rs_db.get_df(q_inv)
clus_inv = df_inv[df_inv['store-id'].isin(analysis_cluster)]
# =============================================================================
# Start - Pilot - Mulund West can receive Items but should not transfer item
# =============================================================================
clus_inv = clus_inv[~clus_inv['store-id'].isin([4])]
# =============================================================================
# End- Pilot - Mulund West can receive Items but should not transfer item
# =============================================================================
# cluster store inventory sum
clus_inv_st_sum = clus_inv.groupby(['drug-id', 'store-id', 'store-name'],
as_index=False).agg({
'ci': ['sum']
}).reset_index(drop=True)
clus_inv_st_sum.columns = ["-".join(x) for x in clus_inv_st_sum.columns.ravel()]
clus_inv_st_sum.rename(columns={'store-name-': 'clus-store-name',
'store-id-': 'store-id',
'drug-id-': 'drug-id',
'ci-sum': 'clus-store-inv'}, inplace=True)
# cluster inventory sum
clus_inv_all_sum = clus_inv.groupby(['drug-id'],
as_index=False).agg({
'ci': ['sum']
}).reset_index(drop=True)
clus_inv_all_sum.columns = ["-".join(x) for x in clus_inv_all_sum.columns.ravel()]
clus_inv_all_sum.rename(columns={'drug-id-': 'drug-id',
'ci-sum': 'clus-inv'}, inplace=True)
orders_clus_inv = pd.merge(left=orders,
right=clus_inv_st_sum,
how='left', on=['drug-id'], suffixes=('-x', '-y')).rename(
columns={'store-id-y': 'clus-store-id'})
orders_clus_inv = pd.merge(left=orders_clus_inv,
right=clus_inv_all_sum,
how='left', on=['drug-id'], suffixes=('-x', '-y')).rename(
columns={'store-id-y': 'clus-store-id'})
summ_data = summ_data.append(orders_clus_inv)
summ_data['clus-inv'].fillna(0, inplace=True)
summ_data['clus-store-inv'].fillna(0, inplace=True)
summ_data['required-quantity'].fillna(0, inplace=True)
summ_data['quantity-to-order'].fillna(0, inplace=True)
summ_data['clus-inv-diff'] = (summ_data['clus-store-inv'] -
summ_data['quantity-to-order'])
# remove same store transfer due to partial inventory
summ_data = summ_data[~(summ_data['store-id-x'] ==
summ_data['clus-store-id'])]
# for QC later
orders_raw = orders_raw.append([summ_data])
# =============================================================================
# MOST CRITICAL: tagging where to fulfill from
# the logic can be cleaner
# =============================================================================
conditions = [
(summ_data['quantity-to-order'] > 0) & (summ_data['clus-inv-diff'] >= 0),
(summ_data['quantity-to-order'] == 0)
]
choices = ['ff-using-single-store', 'ff-using-self-store']
summ_data['color'] = np.select(conditions, choices)
summ_data1 = summ_data[summ_data['color'].isin(['ff-using-single-store',
'ff-using-self-store'])]
summ_data1 = summ_data1[['order-number', 'patient-id',
'pso-id', 'drug-id',
'color']].drop_duplicates().rename(
columns={'color': 'tag'})
summ_data = pd.merge(left=summ_data, right=summ_data1,
how='left', on=['order-number', 'patient-id',
'pso-id', 'drug-id'])
conditions = [
(
(summ_data['quantity-to-order'] > 0) &
(summ_data['clus-inv'] >= summ_data['quantity-to-order']) &
(summ_data['tag'].isnull())
)
]
choices = ['ff-using-multi-store']
summ_data['tag'] = np.select(conditions, choices,
default=summ_data['tag'])
summ_data['tag'].fillna('ff-using-DC-WH', inplace=True)
# to consider partial ff cases
summ_data_temp = summ_data.groupby(['order-number',
'pso-id']).size().reset_index().rename(
columns={0: 'cumsum'})
summ_data['order-number'] = summ_data['order-number'].astype(object)
summ_data_temp['order-number'] = summ_data_temp['order-number'].astype(object)
summ_data['pso-id'] = summ_data['pso-id'].astype(int)
summ_data_temp['pso-id'] = summ_data_temp['pso-id'].astype(int)
summ_data = pd.merge(left=summ_data, right=summ_data_temp,
how='left', on=['order-number', 'pso-id'])
conditions = [
(
(summ_data['quantity-to-order'] > 0) &
(summ_data['tag'] == 'ff-using-DC-WH') &
(summ_data['cumsum'] == 1)
),
(
(summ_data['quantity-to-order'] > 0) &
(summ_data['tag'] == 'ff-using-DC-WH') &
(summ_data['cumsum'] > 1)
)
]
choices = ['ff-using-single-store', 'ff-using-multi-store']
summ_data['tag'] = np.select(conditions, choices,
default=summ_data['tag'])
# =============================================================================
# distance calculation
# =============================================================================
strs = """
select
*
from
(
select
sd."store-id-x" as "store-id-x",
sd."store-id-y" as "clus-store-id",
sd."distance-on-road" as "distance"
from
"prod2-generico"."store-distance" sd
where
sd."store-id-x" in {})x
where
x."clus-store-id" in {}
""".format(cluster_stores, cluster_stores)
str_info_cross = rs_db.get_df(strs)
summ_data = pd.merge(summ_data,
str_info_cross[['store-id-x', 'clus-store-id', 'distance']],
how='left',
left_on=['store-id-x', 'clus-store-id'],
right_on=['store-id-x', 'clus-store-id'])
summ_data_clean = summ_data.drop(summ_data[
# ((summ_data['tag'] == 'ff_using_single_store') &
# (summ_data.clus_inv_diff < 0)) |
(summ_data['tag'] == 'ff-using-self-store') |
(summ_data['tag'] == 'ff-using-DC-WH')].index)
# this is likely redundant
str_avail_cnt = summ_data_clean.groupby(['clus-store-id'])['pso-id'].count().reset_index().rename(
columns={'pso-id': 'drug-availability-cnt'})
summ_data_clean = pd.merge(summ_data_clean,
str_avail_cnt,
how='left',
left_on=['clus-store-id'],
right_on=['clus-store-id'])
# =============================================================================
# ff_using_single_store
# =============================================================================
ff_using_single_store = summ_data_clean[
summ_data_clean['tag'] == 'ff-using-single-store']
ff_using_single_store_best = ff_using_single_store.sort_values(
['clus-store-inv', 'distance'],
ascending=[False, True]).groupby(['order-number',
'pso-id']).head(1)
ff_using_single_store_best = ff_using_single_store_best[['order-number',
'pso-id',
'clus-store-id',
'clus-store-name']]. \
rename(columns={'clus-store-id': 'best-store-id',
'clus-store-name': 'best-store-name'})
ff_using_single_store_best_all = pd.merge(ff_using_single_store,
ff_using_single_store_best,
how='left',
left_on=['order-number', 'pso-id'],
right_on=['order-number', 'pso-id'])
ff_using_single_store_final = ff_using_single_store_best_all[
ff_using_single_store_best_all['clus-store-id'] ==
ff_using_single_store_best_all['best-store-id']]
# =============================================================================
# ff_using_multi_store
# =============================================================================
ff_using_multi_store = summ_data_clean[
summ_data_clean['tag'] == 'ff-using-multi-store']
ff_using_multi_store.sort_values(['order-number',
'pso-id',
'clus-store-inv'],
ascending=[True, True, False], inplace=True)
ff_using_multi_store['cumsum'] = ff_using_multi_store.groupby(['order-number',
'pso-id'])['clus-store-inv'].cumsum()
ff_using_multi_store['cond'] = np.where(
ff_using_multi_store['cumsum'] >= ff_using_multi_store['quantity-to-order'],
'red', 'green')
ff_using_multi_store['temp'] = ff_using_multi_store['cumsum'].mask(ff_using_multi_store['cond'] != 'red').groupby(
ff_using_multi_store['pso-id']).transform('first').astype(int, errors='ignore')
ff_using_multi_store['cond'] = np.where(((ff_using_multi_store['cumsum'] ==
ff_using_multi_store['temp']) &
(ff_using_multi_store['cond'] == 'red')),
'green',
ff_using_multi_store['cond'])
del ff_using_multi_store['temp']
ff_using_multi_store_final = ff_using_multi_store[
ff_using_multi_store['cond'] == 'green']
ff_using_multi_store_final['best-store-id'] = ff_using_multi_store_final['clus-store-id']
ff_using_multi_store_final['best-store-name'] = ff_using_multi_store_final['clus-store-name']
ff_using_single_multi_store = ff_using_single_store_final.append(
[ff_using_multi_store_final])
# =============================================================================
# final dataset
# =============================================================================
ff_using_single_multi_store['to-pick'] = np.where((
(ff_using_single_multi_store['clus-store-inv'] >=
ff_using_single_multi_store['quantity-to-order'])),
ff_using_single_multi_store['quantity-to-order'],
ff_using_single_multi_store['clus-store-inv'])
ff_using_single_multi_store['cluster-id'] = cluster
cluster_fullfilment_final = (cluster_fullfilment_final.append([ff_using_single_multi_store]))
# =============================================================================
# check whether algorithm missed any PSOs
# =============================================================================
check_final = cluster_fullfilment_final.groupby(['pso-id'])['to-pick'].sum().reset_index()
check_first = orders_raw[
orders_raw['quantity-to-order'] > 0][[
'pso-id', 'drug-name',
'quantity-to-order',
'clus-inv']].drop_duplicates().reset_index(drop=True)
check_first = check_first.groupby(['pso-id']).agg(
{'quantity-to-order': [np.sum],
'drug-name': [max],
'clus-inv': [np.max]}).reset_index()
check_first.columns = ["-".join(x) for x in check_first.columns.ravel()]
check_first.rename(columns={'pso-id-': 'pso-id',
'drug-name-max': 'drug-name',
'quantity-to-order-sum': 'quantity-to-order',
'clus-inv-amax': 'clus-inv'},
inplace=True)
check_first1 = check_first
check_first = check_first[check_first['clus-inv'] > 0]
check_first_final = pd.merge(check_first1,
check_final,
how='left',
left_on=['pso-id'],
right_on=['pso-id'])
logger.info("")
logger.info("missed {}".format((len(check_first) - len(check_final))))
logger.info("missed PSOs {}".
format(list(sorted(set(check_first['pso-id']) -
set(check_final['pso-id'])))))
logger.info("")
# =============================================================================
# for pushing to DSS/PROD
# =============================================================================
output_df = cluster_fullfilment_final[['pso-id',
'best-store-id',
'best-store-name',
'store-id-x',
'store-name',
'drug-id',
'drug-name',
'to-pick',
'created-at',
'cluster-id']].rename(columns={
'best-store-name': 'from-store',
'store-name': 'to-store',
'to-pick': 'item-quantity',
'created-at': 'pso-created_at'
})
output_df['slot-date'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
output_df['is-active'] = 1
output_df['created-by'] = '[email protected]'
output_df['updated-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
output_df.sort_values(by=['from-store'], ascending=False,
inplace=True)
# for DSS
output_df.rename(columns={'pso-id': 'patient-store-order-id',
'best-store-id': 'from-store-id',
'best-store-name': 'from-store-name',
'store-id-x': 'to-store-id',
'store-name': 'to-store-name'}, inplace=True)
# for MySQL
output_df_mysql = output_df[['patient-store-order-id',
'item-quantity',
'from-store-id',
'to-store-id',
'slot-date',
'is-active',
'created-by',
'updated-at']]
output_df_mysql.rename(columns={
'patient-store-order-id': 'patient-store-order-id',
'item-quantity': 'item-quantity',
'from-store-id': 'from-store-id',
'to-store-id': 'to-store-id',
'slot-date': 'slot-date',
'is-active': 'is-active',
'created-by': 'created-by',
'updated-at': 'updated-at'}, inplace=True)
logger.info("")
logger.info("completed for cluster {}".format(cluster_stores))
logger.info("")
logger.info("{} PSOs created from {} to {}".format(len(output_df),
start1, start2))
logger.info("")
pso_cluster_fulfillment = output_df
pso_cluster_fulfillment['pso-created-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# output_df_json = json.loads(output_df.to_json(orient='records'))
pso_cluster_fulfillment[['from-store-id', 'to-store-id', 'drug-id', 'item-quantity']] = pso_cluster_fulfillment[
['from-store-id', 'to-store-id', 'drug-id', 'item-quantity']].apply(pd.to_numeric, errors='ignore').astype('Int64')
#
# =============================================================================
# writing to PG
# =============================================================================
# pushing pso_cluster_fulfillment table to redshift table
status2 = False
number_of_writing_attempts = 0
if int(write_to_mysql) == 1:
try:
number_of_writing_attempts = number_of_writing_attempts + 1
schema = 'prod2-generico'
table_name = 'pso-cluster-fulfillment'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
s3.write_df_to_db(df=pso_cluster_fulfillment[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(' ')
logger.info('table appended to Redshift')
# pushing to mysql prod
output_df_mysql.to_sql(
name='pso-stock-transfer-mapping', con=mysql_write.engine,
if_exists='append',
chunksize=500, method='multi', index=False)
logger.info(' ')
logger.info('table appended to MySQL')
status2 = True
# =============================================================================
# Sending Notification to stores
# =============================================================================
if int(api_hit) == 1:
logger.info('sleep for 10 second')
time.sleep(10)
mysql_read2 = MySQL()
mysql_read2.open_connection()
# Reading Newly added queried
if env == 'dev':
mysql_schema = '`test-generico`'
else:
mysql_schema = '`prod2-generico`'
mysql_inserted_items_query = """
SELECT
pstm.id ,
pstm.`to-store-id` ,
pstm.`from-store-id`
FROM
{schema}.`pso-stock-transfer-mapping` pstm
WHERE
pstm.`created-at` >= '{code_started_at}'
""".format(code_started_at=code_started_at, schema=mysql_schema)
inserted_items = pd.read_sql_query(mysql_inserted_items_query, mysql_read2.connection)
# logger.info(inserted_items)
# logger.info(mysql_inserted_items_query)
mysql_read2.close()
for index, row in inserted_items.iterrows():
payload = {
"destinations": [
row['from-store-id'].astype(str)
],
"message": "cluster-request",
"payload": f"{row['id']}-{row['to-store-id']}"
}
response = ws.send(payload=payload)
logger.info('API hit successful for Notification in billing panel')
else:
logger.info('No API hit - Parameter is set as 0')
except Exception as error:
logger.exception(error)
logger.info(f'writing to mysql failed - attempt - {number_of_writing_attempts}')
status2 = False
if status2 == False:
logger.info('Writing to mysql table failed, Mostly it is due to deadlock issue, sleep for 3 mins')
time.sleep(180)
logger.info('slept for 3 mins')
try:
number_of_writing_attempts = number_of_writing_attempts + 1
logger.info(f'attempt number - {number_of_writing_attempts}')
# pushing to mysql prod
output_df_mysql.to_sql(
name='pso-stock-transfer-mapping', con=mysql_write.engine,
if_exists='append',
chunksize=500, method='multi', index=False)
logger.info(' ')
logger.info('table appended to MySQL')
status2 = True
# =============================================================================
# Sending Notification to stores
# =============================================================================
if int(api_hit) == 1:
logger.info('sleep for 10 second')
time.sleep(10)
mysql_read3 = MySQL()
mysql_read3.open_connection()
# Reading Newly added queried
if env == 'dev':
mysql_schema = '`test-generico`'
else:
mysql_schema = '`prod2-generico`'
mysql_inserted_items_query = """
SELECT
pstm.id ,
pstm.`to-store-id` ,
pstm.`from-store-id`
FROM
{schema}.`pso-stock-transfer-mapping` pstm
WHERE
pstm.`created-at` >= '{code_started_at}'
""".format(code_started_at=code_started_at, schema=mysql_schema)
inserted_items = pd.read_sql_query(mysql_inserted_items_query, mysql_read3.connection)
mysql_read3.close()
for index, row in inserted_items.iterrows():
payload = {
"destinations": [
row['from-store-id'].astype(str)
],
"message": "cluster-request",
"payload": f"{row['id']}-{row['to-store-id']}"
}
response = ws.send(payload=payload)
logger.info('API hit successful for Notification in billing panel')
else:
logger.info('No API hit - Parameter is set as 0')
except Exception as error:
logger.exception(error)
logger.info(f'writing to mysql failed - attempt - {number_of_writing_attempts}')
status2 = False
if status2 is True:
status = 'Success'
else:
status = 'Failed'
else:
status = 'test'
pso_added_uri = s3.save_df_to_s3(df=check_first_final,
file_name='pso_transfer_details_{}_{}.csv'.format(start1, start2))
end_time = datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject='{} - {} - {} PSOs, {} clus_missed, {} algo_missed from {} to {}'.format(
env, status,
len(check_first1),
(len(check_first1) - len(check_final)),
(len(check_first) - len(check_final)),
start1, start2),
mail_body=f" pso-stock-transfer-mapping table update - {status}\n"
f"Time for job completion - {min_to_complete} mins\n"
f" Number of writing attempts - {number_of_writing_attempts}",
to_emails=email_to, file_uris=[pso_added_uri])
rs_db.close_connection()
mysql_write.close()
mysql_read.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cluster/pso-cluster-fulfillment.py | pso-cluster-fulfillment.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
from datetime import datetime, timedelta
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rn', '--refresh_for_n_days', default="10", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
refresh_for_n_days = args.refresh_for_n_days
os.environ['env'] = env
logger = get_logger(level = 'INFO')
logger.info(f"env: {env}")
#secrets = config.secrets
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.now()
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("refresh_for_n_days - " + str(refresh_for_n_days))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
# =============================================================================
# set parameters
# =============================================================================
#pso_date1 = "2022-01-07 00:00:00" # launch date of 3 hour delivery # Use this when you want to refresh whole table
pso_date1 = (datetime.now(tz=gettz('Asia/Kolkata')) - timedelta(days=int(refresh_for_n_days))).strftime('%Y-%m-%d %H:%M:%S') # Use this when only to update for only 7 days
pso_date2 = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# =============================================================================
# store clusters
# =============================================================================
qc = """
select
sf."feature-id",
f.feature,
sf."store-id",
sf."is-active",
sc."cluster-id",
c.name as "cluster-name",
sc."is-active" as "sc-is-active"
from
"prod2-generico".features f
join "prod2-generico"."store-features" sf on
f.id = sf."feature-id"
join "prod2-generico"."store-clusters" sc on
sc."store-id" = sf."store-id"
join "prod2-generico".clusters c on
c.id = sc."cluster-id"
where
sf."feature-id" = 69
and sf."is-active" = 1
and sc."is-active" = 1
"""
store_clusters = rs_db.get_df(qc)
# =============================================================================
# fetching all stores
# =============================================================================
stores_query = """
SELECT
"id" AS "store-id"
FROM
"prod2-generico"."stores"
"""
stores = rs_db.get_df(stores_query)
all_stores = stores.merge(store_clusters, on = 'store-id', how = 'left')
all_stores['cluster-id'] = all_stores['cluster-id'].fillna(0)
all_stores['cluster-id'] = all_stores['cluster-id'].astype(int)
clusterlist = sorted(all_stores['cluster-id'].unique())
orders_transfers_all = pd.DataFrame()
for cluster in clusterlist :
logger.info("")
logger.info("cluster {}".format(cluster))
temp = all_stores[all_stores['cluster-id'] == cluster]
cluster_stores = tuple(map(int, list(temp['store-id'].unique())))
# =============================================================================
# GET PRs
# =============================================================================
q1 = """
select
pso."order-number",
pso.id as "patient-store-order-id",
pso."patient-request-id",
pso."zeno-order-id" ,
pso."patient-id" ,
pso."order-source" ,
pso."order-type" ,
pso."status" as "pso-status",
pso."created-at" as "pso-created-at",
pso."store-id" ,
s."name" as "store-name",
pso."drug-id" ,
pso."drug-name" ,
pso."requested-quantity",
pso."inventory-quantity" as "inventory-at-creation",
pr."required-quantity",
pr."quantity-to-order",
pso."bill-id",
b."created-at" as "bill-date",
dt."delivered-at",
ss."type" as "slot-type",
pso."slot-date" ,
ss."start-time" as "slot-start-time",
ss."end-time" as "slot-end-time",
s."franchisee-id",
pso."slot-recommendation-status"
from
"prod2-generico"."patients-store-orders" pso
left join "prod2-generico"."patient-requests" pr on
pso."patient-request-id" = pr.id
join "prod2-generico"."stores" s on
s."id" = pso."store-id"
left join "prod2-generico"."bills-1" b on
b."id" = pso."bill-id"
left join "prod2-generico"."delivery-tracking" dt on
dt."patient-store-order-id" = pso."id"
left join "prod2-generico"."store-slots" ss on
pso."slot-id" = ss.id
where
pso."created-at" >= '{pso_date1}'
and pso."created-at" <= '{pso_date2}'
and pso."store-id" in {cluster_stores}
order by
pso."created-at" desc;
""".format(pso_date1=pso_date1, pso_date2=pso_date2, cluster_stores=cluster_stores)
orders = rs_db.get_df(q1)
orders['required-quantity'].fillna(0, inplace=True)
orders['quantity-to-order'].fillna(0, inplace=True)
orders['availability-tag'] = np.where(
orders['quantity-to-order'] > 0, "pr-short", np.nan)
orders['availability-tag'] = orders.fillna('').sort_values(
['order-number',
'availability-tag']).groupby('order-number')['availability-tag'].transform('last')
orders['availability-tag'] = np.where(
orders['availability-tag'] != 'pr-short',
"pr-not-short", orders['availability-tag'])
# =============================================================================
# Initial Slot
# =============================================================================
qslot = """
select
t."order-number",
t."recommended-slot-date",
t."recommended-slot-id",
t."selected-slot"
from
(SELECT
pso."order-number",
pso."slot-date",
psr."recommended-slot-date",
pso."slot-id",
psr."recommended-slot-id",
(case WHEN pso."slot-date" = psr."recommended-slot-date" AND pso."slot-id" = psr."recommended-slot-id" THEN 'recommended_slot'
ELSE
'not_recommended_slot'
END) "selected-slot",
ROW_NUMBER() OVER(PARTITION BY pso."order-number" ORDER BY pso."slot-date", pso."slot-id" desc) AS "row-value"
FROM
"prod2-generico"."patients-store-orders" pso
LEFT JOIN "prod2-generico"."pso-slot-recommendation" psr ON
pso."order-number" = psr."order-number"
LEFT JOIN "prod2-generico"."store-slots" ss ON
pso."slot-id" = ss.id
WHERE
pso."created-at" >='{pso_date1}'
and pso."created-at" <= '{pso_date2}'
and pso."store-id" in {cluster_stores}) t
where "row-value" = 1;
""".format(pso_date1=pso_date1, pso_date2=pso_date2, cluster_stores=cluster_stores)
orders_slot_recommendation = rs_db.get_df(qslot)
orders = orders.merge(orders_slot_recommendation,on ='order-number',how='left' )
# =============================================================================
# Get transfers
# =============================================================================
if cluster!= 0:
trnfrs = tuple(map(int, list(orders['patient-store-order-id'].unique())))
q2 = """
select
pso."order-number",
pstm."patient-store-order-id",
pstm."from-store-id",
pstm."to-store-id",
pstm."item-quantity" as "to-be-transferred-qty",
sti."quantity" as "actual-transferred-qty",
st."total-items",
pstm."status" as "tn-status",
st."status" as "transfer-status",
st."initiated-at",
st."transferred-at",
st."received-at",
DATEDIFF(min,st."transferred-at",st."received-at") as "transfer-minutes",
zo."created-at" as "zeno-created-at"
from
"prod2-generico"."pso-stock-transfer-mapping" pstm
left join "prod2-generico"."patients-store-orders" pso on
pso.id = pstm."patient-store-order-id"
left join "prod2-generico"."stock-transfers-1" st on
st.id = pstm."stock-transfer-id"
left join "prod2-generico"."stock-transfer-items-1" sti on
sti.id = pstm."stock-transfer-item-id"
left join "prod2-generico"."zeno-order" zo on
zo.id = pso."zeno-order-id"
where
pstm."patient-store-order-id" in {}
""".format(trnfrs)
transfers = rs_db.get_df(q2)
transfers['received-at'] = pd.to_datetime(transfers['received-at'],
format='%Y-%m-%d %H:%M:%S',
errors='coerce')
transfers_summ = transfers.groupby(['order-number',
'patient-store-order-id']).agg(
{'initiated-at': [np.max],
'transferred-at': [np.max],
'received-at': [np.max],
'zeno-created-at': [np.max],
'to-be-transferred-qty': [np.sum],
'actual-transferred-qty': [np.sum]}).reset_index()
transfers_summ.columns = ["-".join(x) for x in transfers_summ.columns.ravel()]
transfers_summ.rename(columns={'initiated-at-amax': 'initiated-at',
'transferred-at-amax': 'transferred-at',
'received-at-amax': 'received-at',
'to-be-transferred-qty-sum': 'to-be-transferred-qty',
'actual-transferred-qty-sum': 'actual-transferred-qty',
'transfer-status-': 'transfer-status',
'order-number-': 'order-number',
'patient-store-order-id-': 'patient-store-order-id',
'zeno-created-at-amax': 'zeno-created-at'},
inplace=True)
orders_transfers = pd.merge(left=orders, right=transfers_summ,
how='left', on=['order-number',
'patient-store-order-id'])
orders_transfers['to-be-transferred-qty'].fillna(0, inplace=True)
orders_transfers['actual-transferred-qty'].fillna(0, inplace=True)
orders_transfers['zeno-created-at'] = pd.to_datetime(orders_transfers['zeno-created-at'])
# lead to pso creation
orders_transfers['lead-to-pso-creation-hours'] = (
(orders_transfers['pso-created-at'] - orders_transfers['zeno-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to transfer intitate
orders_transfers['pso-to-transfer-initiate-hours'] = (
(orders_transfers['initiated-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to transfer transferred
orders_transfers['pso-to-transfer-transfer-hours'] = (
(orders_transfers['transferred-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to transfer recevied
orders_transfers['pso-to-transfer-received-hours'] = (
(orders_transfers['received-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
if cluster == 0:
orders_transfers= orders
# PSO to bill
orders_transfers['pso-to-bill-hours'] = ((orders_transfers['bill-date'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
orders_transfers['pso-to-bill-hours'] = np.where(
orders_transfers['pso-to-bill-hours'] < 0, 0, orders_transfers['pso-to-bill-hours'])
# PSO to delivered
conditions = [orders_transfers['delivered-at']=='0101-01-01 00:00:00',orders_transfers['delivered-at']=='101-01-01 00:00:00',orders_transfers['delivered-at']!='0101-01-01 00:00:00']
choices = [None,None,orders_transfers['delivered-at']]
orders_transfers['delivered-at'] = np.select(conditions,choices)
orders_transfers['delivered-at'] = pd.to_datetime(orders_transfers['delivered-at'], errors = 'coerce')
orders_transfers['pso-to-delivered-hours'] = (
(orders_transfers['delivered-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
orders_transfers['cluster-id'] = cluster
# =============================================================================
# Cluster Name
# =============================================================================
qc1 = """
select
c.id AS "cluster-id" ,
c.name AS "cluster-name"
from
"prod2-generico".clusters c
"""
cluster_info = rs_db.get_df(qc1)
orders_transfers = pd.merge(orders_transfers, cluster_info, on='cluster-id', how='left')
if cluster == 0:
orders_transfers['cluster-name'] = 'not-in-any-cluster'
# =============================================================================
# OTIF calculation -- In Full Flag
# =============================================================================
bills = tuple(map(int, list(orders_transfers[orders_transfers['bill-id'].notna()]['bill-id'].unique())))
qc2 = """
select
bi."bill-id",
count(distinct i."drug-id") as "drug-billed-cnt",
sum(bi.quantity) as "quantity-billed-sum"
from
"prod2-generico"."bills-1" b
join "prod2-generico"."bill-items-1" bi on
b.id = bi."bill-id"
join "prod2-generico"."inventory-1" i on
i.id = bi."inventory-id"
where
bi."bill-id" in {}
group by
bi."bill-id"
""".format(bills)
billed = rs_db.get_df(qc2)
orders_transfers1 = pd.merge(left=orders_transfers,
right=billed,
how='left', on=['bill-id'])
orders_transfers_d_infull1 = orders_transfers.groupby(
['order-number'])['drug-id'].nunique().reset_index().rename(
columns={'drug-id': 'drug-ordered-cnt'})
orders_transfers_q_infull1 = orders_transfers.groupby(
['order-number']).agg(
{'requested-quantity': [np.sum]}).reset_index().rename(
columns={'requested-quantity': 'requested-quantity-ordered-sum'})
orders_transfers_q_infull1.columns = ["-".join(x) for x in orders_transfers_q_infull1.columns.ravel()]
orders_transfers_q_infull1.rename(columns={'requested-quantity-ordered-sum-sum': 'requested-quantity-ordered-sum',
'order-number-': 'order-number'},
inplace=True)
orders_transfers_infull1 = pd.merge(orders_transfers_d_infull1, orders_transfers_q_infull1,
on='order-number', how='inner')
orders_transfers2 = pd.merge(left=orders_transfers1,
right=orders_transfers_infull1,
how='left', on=['order-number'])
orders_transfers2['in-full-flag'] = np.where(
orders_transfers2['drug-billed-cnt'] >= orders_transfers2['drug-ordered-cnt'],
"in-full", "not-in-full")
orders_transfers2['qty-in-full-flag'] = np.where(
orders_transfers2['quantity-billed-sum'] >= orders_transfers2['requested-quantity-ordered-sum'],
"qty-in-full", "qty-not-in-full")
orders_transfers2['drug-billed-cnt'].fillna(0, inplace=True)
orders_transfers2['quantity-billed-sum'].fillna(0, inplace=True)
orders_transfers2['drug-ordered-cnt'].fillna(0, inplace=True)
orders_transfers2['requested-quantity-ordered-sum'].fillna(0, inplace=True)
# del orders_transfers2['drug_ordered_cnt']
# del orders_transfers2['drug_billed_cnt']
# del orders_transfers2['quantity_billed_sum']
# del orders_transfers2['requested_quantity_ordered_sum']
# =============================================================================
# OTIF calculation -- on_time_flag
# =============================================================================
def string_to_time(x):
try:
return datetime.strptime(x, "%I:%M %p").time()
except:
try:
return datetime.strptime(x, "%I:%M%p").time()
except:
return "Can't convert"
orders_transfers2['slot-end-time-format'] = orders_transfers2['slot-end-time'].apply(lambda x: string_to_time(x))
orders_transfers2['slot-end-date-time'] = orders_transfers2.apply(lambda x:
datetime.combine(x['slot-date'],
x['slot-end-time-format']), 1)
conditions = [
(orders_transfers2['order-type']== 'delivery') & (orders_transfers2['delivered-at'] <= orders_transfers2['slot-end-date-time']),
(orders_transfers2['order-type'] != 'delivery') & (orders_transfers2['bill-date'] <= orders_transfers2['slot-end-date-time']),
(orders_transfers2['delivered-at'] > orders_transfers2['slot-end-date-time'])
]
choices = ['on-time','on-time','not-on-time']
orders_transfers2['on-time-slot-basis-flag'] = np.select(conditions, choices, default='not-on-time')
orders_transfers2['otif-flag'] = np.where(
((orders_transfers2['in-full-flag'] == 'in-full') &
(orders_transfers2['on-time-slot-basis-flag'] == 'on-time')),
"otif", "not-otif")
orders_transfers2['qty-otif-flag'] = np.where(
((orders_transfers2['qty-in-full-flag'] == 'qty-in-full') &
(orders_transfers2['on-time-slot-basis-flag'] == 'on-time')),
"qty-otif", "qty-not-otif")
logger.info("")
logger.info(
"length is same {}".format(len(orders) == len(orders_transfers2)))
logger.info("")
orders_transfers_all = orders_transfers_all.append(orders_transfers2)
pso_cluster_fulfillment_board = orders_transfers_all
# pso_cluster_fulfillment_board.to_csv(r"D:\3 hours delivery\ClusterFulfillment\Quantity_OTIF\data1.csv")
pso_cluster_fulfillment_board['updated-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# Converting int column with null value to int data type
if cluster!= 0:
pso_cluster_fulfillment_board[['patient-request-id', 'zeno-order-id', 'drug-id','required-quantity', 'quantity-to-order', 'bill-id', 'to-be-transferred-qty', 'actual-transferred-qty', 'drug-billed-cnt', 'quantity-billed-sum','recommended-slot-id','inventory-at-creation']] = pso_cluster_fulfillment_board[['patient-request-id', 'zeno-order-id', 'drug-id','required-quantity', 'quantity-to-order', 'bill-id', 'to-be-transferred-qty', 'actual-transferred-qty', 'drug-billed-cnt', 'quantity-billed-sum','recommended-slot-id','inventory-at-creation']].apply(pd.to_numeric, errors='ignore').astype('Int64')
else:
pso_cluster_fulfillment_board[
['patient-request-id', 'zeno-order-id', 'drug-id', 'required-quantity', 'quantity-to-order', 'bill-id',
'drug-billed-cnt', 'quantity-billed-sum','recommended-slot-id','inventory-at-creation']] = \
pso_cluster_fulfillment_board[
['patient-request-id', 'zeno-order-id', 'drug-id', 'required-quantity', 'quantity-to-order', 'bill-id',
'drug-billed-cnt', 'quantity-billed-sum','recommended-slot-id','inventory-at-creation']].apply(
pd.to_numeric, errors='ignore').astype('Int64')
# =============================================================================
# writing to Redshift
# =============================================================================
schema = 'prod2-generico'
table_name = 'pso-cluster-fulfillment-board-temp'
table_name2 = 'pso-cluster-fulfillment-board'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
table_info2 = helper.get_table_info(db=rs_db_write, table_name=table_name2, schema=schema)
status2 = False
status1 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}" '''
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table deleted')
s3.write_df_to_db(df=pso_cluster_fulfillment_board[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status2 = True
if status2:
if isinstance(table_info2, type(None)):
raise Exception(f"table: {table_name2} do not exist, create the table first")
else:
logger.info(f"Table:{table_name2} exists")
delete_main_query = f'''
delete
from
"{schema}"."{table_name2}"
where
"{schema}"."{table_name2}"."pso-created-at" >= '{pso_date1}' '''
rs_db_write.execute(delete_main_query)
logger.info(str(table_name2) + ' table deleted')
insert_main_query = f'''
insert
into
"{schema}"."{table_name2}"
select
*
from
"{schema}"."{table_name}"
'''
rs_db_write.execute(insert_main_query)
status1 = True
logger.info(str(table_name2) + ' table uploaded')
if status1 is True:
status = 'Success'
else:
status = 'Failed'
#logger.close()
end_time = datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds()/60 , 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name2} table updated",
mail_body=f"{table_name2} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cluster/pso-cluster-fulfillment-board.py | pso-cluster-fulfillment-board.py |
#created by - saurav maskar
#Objective - push all transfer note failed everyday into store audit,
#Transfer note are failing because inventory is at store according to system
# but it is not locally found, so we need to put those in audit
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
from datetime import datetime, timedelta
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
s3 = S3()
start_time = datetime.now()
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
# =============================================================================
# set parameters
# =============================================================================
# Change pso_date_1 As Date required to add in audit, yesterdays,
pso_date1 = (datetime.now() - timedelta(days = 1)).date()
pso_date2 = (datetime.now() - timedelta(days = 1)).date()
# =============================================================================
# expired pso
# =============================================================================
q1 = """
Select
pstm."from-store-id" AS "store-id" ,
pso."drug-id"
FROM
"prod2-generico"."pso-stock-transfer-mapping" pstm
Left JOIN "prod2-generico"."pso-stock-transfer-inventory-mapping" pstim ON
pstm.id = pstim."pso-stock-transfer-mapping-id"
Left join "prod2-generico"."patients-store-orders" pso
ON
pstm."patient-store-order-id" = pso.id
WHERE
DATE(pso."created-at") >= '{}'
and DATE(pso."created-at") <= '{}'
and pstm."status" = 'expired'
order by
pso."created-at" DESC
""".format(pso_date1, pso_date2)
expired_pso = rs_db.get_df(q1)
expired_pso['store-drug'] = expired_pso['store-id'].astype(str) + "-" +expired_pso['drug-id'].astype(str)
logger.info("")
logger.info("fetched expired pso's data, total items - {}".format(len(expired_pso['store-drug'])) )
logger.info("")
#=============================================================================
# drugs which are already in audit extra and not scanned to be removed
#=============================================================================
q2 = """
select
ae."drug-id" ,
ae."store-id"
FROM
"prod2-generico"."audit-extra" ae
WHERE
ae.status = 'active'
"""
store_drugs_in_audit = rs_db.get_df(q2)
store_drugs_in_audit['store-drug-audit'] = store_drugs_in_audit['store-id'].astype(str) + "-" +store_drugs_in_audit['drug-id'].astype(str)
logger.info("")
logger.info("fetched active store-drug combinations in audit extra - {}".format(len(store_drugs_in_audit['store-drug-audit'])))
logger.info("")
#Checking if store-drug combination is already in audit
expired_pso_audit_check = pd.merge(expired_pso ,store_drugs_in_audit['store-drug-audit'],left_on = 'store-drug',
right_on = 'store-drug-audit', how ='left')
expired_pso_after_audit_check = expired_pso_audit_check[expired_pso_audit_check['store-drug-audit'].isna()]
unique_store_drug_series = pd.Series(expired_pso_after_audit_check['store-drug'].unique())
unique_store_drug_1 = unique_store_drug_series.str.split(pat= '-', expand = True)
logger.info("")
logger.info("Removed drugs which are already in audit extra and status is saved, unique store-drug-combination - {}".format(len(unique_store_drug_1)))
logger.info("")
# =============================================================================
# creating output table
# =============================================================================
unique_store_drug = pd.DataFrame()
if len(unique_store_drug_1)>0:
unique_store_drug['drug-id'] = unique_store_drug_1[1].astype(int)
unique_store_drug['store-id'] = unique_store_drug_1[0].astype(int)
unique_store_drug['created-by'] = '[email protected]'
logger.info("")
logger.info("Table to append created, items to add - {}".format(len(unique_store_drug['drug-id'])))
logger.info("")
else:
logger.info("")
logger.info("Table to append created, items to add - {}".format(len(unique_store_drug)))
logger.info("")
# =============================================================================
# writing to audit_extra
# =============================================================================
# prod mysql
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
status2 = False
try:
unique_store_drug.to_sql(
name='audit-extra', con=mysql_write.engine,
if_exists='append',
chunksize=500, method='multi', index=False)
logger.info('')
logger.info('audit-extra' + ' table appended')
status2 = True
except:
logger.info(' ')
logger.info(str('audit-extra') + ' table not appended correctly')
status2 = False
if status2 is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
# email.send_email_file(subject='{} {} : Cluster Audit extra table'.format(
# env, status),
# mail_body=f" audit-extra table updated, Time for job completion - {min_to_complete} mins ",
# to_emails=email_to, file_uris=[pso_added_uri])
rs_db.close_connection()
mysql_write.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cluster/cluster-audit-extra.py | cluster-audit-extra.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, PostGreWrite
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import numpy as np
import datetime
import gc
from dateutil.relativedelta import relativedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
# Redshift
rs_db = DB()
rs_db.open_connection()
# PostGre
pg = PostGreWrite()
pg.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name1 = 'dexter-medicine-suggestion'
table_info1 = helper.get_table_info(db=rs_db, table_name=table_name1, schema=schema)
# =============================================================================
# Retrieve recent patient entries
# =============================================================================
mdate ="""
select
max(bill_date::date) as max_date
from
dexter_medicine_suggestion
"""
m_date = pd.read_sql_query(mdate, pg.connection)
max_date = m_date.max_date[0]
if pd.isna(max_date)==False:
date1 = max_date.strftime('%Y-%m-%d')
else:
date1= (datetime.datetime.today() + relativedelta(months=-12)).replace(day=1).strftime('%Y-%m-%d')
mquery = f""" (select
"patient-id"
from
"prod2-generico"."sales"
where
"created-date" >= '{date1}'
group by
"patient-id") """
#########################################################
# Billing data (lengthy dataset)
########################################################
bills_q = f"""
select
s1."created-date" as "bill-date",
s1."patient-id" ,
s1."bill-id" ,
s1."drug-id",
sum(s1."revenue-value") as "spend",
sum(s1."net-quantity") as "quantity"
from
"{schema}"."sales" s1
inner join {mquery} s2
on s1."patient-id" = s2."patient-id"
where
s1."bill-flag" = 'gross'
and s1."created-date" >= date(date_trunc('month', current_date) - interval '12 month')
group by
s1."created-date" ,
s1."patient-id" ,
s1."bill-id" ,
s1."drug-id"
"""
data_bill = rs_db.get_df(bills_q)
data_bill.columns = [c.replace('-', '_') for c in data_bill.columns]
data_bill['bill_date'] = pd.to_datetime(data_bill['bill_date'])
logger.info('Column names of data_bill table {}'.format(str(data_bill.columns)))
drugs_q = f"""
SELECT
"id" as drug_id,
"composition",
"drug-name",
"type",
"category",
"repeatability-index",
"front-img-url"
FROM
"{schema}"."drugs"
"""
data_drugs = rs_db.get_df(drugs_q)
data_drugs.columns = [c.replace('-', '_') for c in data_drugs.columns]
logger.info("Data for drugs fetched")
# Merge these data-frames
data_raw = data_bill.merge(data_drugs, how='left', on=['drug_id'])
data_raw[['quantity','spend']].fillna(0,inplace=True)
data_raw['quantity']=data_raw['quantity'].astype('int64')
data_raw['spend']=data_raw['spend'].astype('float64')
logger.info('Column names of data_raw table {}'.format(str(data_raw.columns)))
# Delete temp data-frames
del [[data_bill]]
gc.collect()
data_bill = pd.DataFrame()
# Fill NA
logger.info("Composition NA is {}".format(data_raw['composition'].isnull().sum()))
data_raw['composition'] = data_raw['composition'].fillna('')
logger.info("Front image url NA is {}".format(data_raw['front_img_url'].isnull().sum()))
data_raw['front_img_url'] = data_raw['front_img_url'].fillna('')
logger.info("Raw data length - {}".format(len(data_raw)))
logger.info("Raw data info - {}".format(str(data_raw.info())))
# Grp on unique columns
data_bill_base = data_raw.groupby(['patient_id', 'composition',
'bill_date', 'bill_id', 'drug_id',
'drug_name', 'type',
'category', 'repeatability_index',
'front_img_url'])[['spend', 'quantity']].sum().reset_index()
logger.info(str(data_bill_base.columns))
# Avg rate again
data_bill_base['rate'] = data_bill_base['spend'] / data_bill_base['quantity']
# Delete temp dataframes
del [[data_raw]]
gc.collect()
data_raw = pd.DataFrame()
logger.info("Data length after grouping at unique level - "
"{}".format(len(data_bill_base)))
# Last bill date and NOB
data_bill_base_grp = data_bill_base.groupby(['patient_id']).agg({'bill_date': 'max',
'bill_id': 'nunique'}
).reset_index()
data_bill_base_grp = data_bill_base_grp.rename(columns={'bill_date': 'overall_last_bill_date',
'bill_id': 'overall_num_orders'})
logger.info("Length of patient level last bill date and nob is "
"{}".format(len(data_bill_base_grp)))
# Drug level Last bill date and NOB
data_bill_base_grp2 = data_bill_base.groupby(['patient_id', 'drug_id']).agg(
{'bill_date': 'max', 'bill_id': 'nunique'}).reset_index()
data_bill_base_grp2 = data_bill_base_grp2.rename(columns={'bill_date': 'last_bill_date',
'bill_id': 'num_orders'})
logger.info("Length of patient drug level last bill date and nob is "
"{}".format(len(data_bill_base_grp2)))
# Sort the base data and make unique on patient-drug
data_bill_base_unique = data_bill_base.sort_values(by=['patient_id',
'drug_id',
'bill_date'],
ascending=[True, True, False])
data_bill_base_unique = data_bill_base_unique.drop_duplicates(subset=['patient_id',
'drug_id'])
logger.info("Patient drug level unique base data length is "
"{}".format(len(data_bill_base_unique)))
# Merge with patient-drug metadata
data_bill_base2 = data_bill_base_unique.merge(data_bill_base_grp2,
how='left',
on=['patient_id', 'drug_id'])
logger.info("After merging with patient drug metadata, length is "
"{}".format(len(data_bill_base2)))
# Merge with patient-metadata
data_bill_base2 = data_bill_base2.merge(data_bill_base_grp,
how='left',
on=['patient_id'])
logger.info("After merging with patient metadata, length is "
"{}".format(len(data_bill_base2)))
# Recency
data_bill_base2['recency'] = (data_bill_base2['overall_last_bill_date'] -
data_bill_base2['last_bill_date']).dt.days
# Recency flag
data_bill_base2['recency_flag'] = np.where(data_bill_base2['recency'] <= 90, 1, 0)
# Sort on recency and drug nob and last bill date
data_bill_base2 = data_bill_base2.sort_values(by=['recency_flag',
'num_orders',
'last_bill_date'],
ascending=[False, False, False])
# Rank
data_bill_base2['recommendation_rank'] = data_bill_base2.groupby(['patient_id']
).cumcount() + 1
# Filter top 12
data_bill_base_f = data_bill_base2[data_bill_base2['recommendation_rank'] <= 12]
logger.info("After rank filtering length is {}".format(len(data_bill_base_f)))
##############################################
# Necessary columns
##############################################
data_bill_base_f['is_chronic'] = np.where(data_bill_base_f['category'] == 'chronic', 1, 0)
data_bill_base_f['is_repeatable'] = np.where((
((data_bill_base_f['category'] == 'chronic') &
(data_bill_base_f['repeatability_index'] >= 40))
| (data_bill_base_f['repeatability_index'] >= 80)), 1, 0)
data_bill_base_f = data_bill_base_f.rename(columns={'type': 'drug_type',
'category': 'drug_category',
'num_orders': 'drug_nob',
'front_img_url': 'drug_front_image_url'})
data_bill_base_f['last_bill_quantity'] = data_bill_base_f['quantity']
data_bill_base_f['refill_date'] = data_bill_base_f['bill_date'] + datetime.timedelta(days=15)
data_bill_base_f['price_rank'] = data_bill_base_f.sort_values(
by=['patient_id', 'rate'], ascending=[True, True]).groupby(
['patient_id']).cumcount() + 1
data_bill_base_f['refill_date_rank'] = data_bill_base_f.sort_values(
by=['patient_id', 'refill_date'], ascending=[True, True]).groupby(
['patient_id']).cumcount() + 1
data_bill_base_f = data_bill_base_f[~data_bill_base_f['drug_id'].isnull()]
logger.info("After non-null drug-id length is {}".format(len(data_bill_base_f)))
data_export = data_bill_base_f[['patient_id', 'composition', 'drug_id',
'drug_name', 'drug_category', 'drug_type',
'bill_date', 'quantity', 'refill_date',
'price_rank', 'refill_date_rank', 'is_repeatable',
'is_chronic', 'recommendation_rank', 'last_bill_date',
'last_bill_quantity', 'drug_nob',
'drug_front_image_url', 'recency_flag']]
# Convert to date-month-year format
for i in ['bill_date', 'refill_date', 'last_bill_date']:
data_export[i] = data_export[i].dt.strftime("%d-%b-%Y")
data_export['drug_id'] = data_export['drug_id'].astype('int64')
logger.info("Data export length is - {}".format(len(data_export)))
data_update = data_export[['patient_id']].drop_duplicates()
# Write to PostGre
#update table
table_update = 'dexter_medicine_suggestion_update'
truncate_u = f""" DELETE FROM {table_update} """
pg.engine.execute(truncate_u)
data_update.to_sql(name=table_update,
con=pg.engine, if_exists='append',
index=False, method='multi', chunksize=500)
# if one wants to reset index then use this in query -> RESTART IDENTITY
table_name1_pg = table_name1.replace("-", "_")
truncate_q = f""" DELETE FROM {table_name1_pg} m1 using (select patient_id from dexter_medicine_suggestion_update) m2
where m1.patient_id = m2.patient_id"""
pg.engine.execute(truncate_q)
for rank_number in range(1, 13):
final_data_rank = data_export[data_export['recommendation_rank'] == rank_number]
logger.info("Uploading for rank number: {}".format(rank_number))
final_data_rank.to_sql(name=table_name1_pg,
con=pg.engine, if_exists='append',
index=False, method='multi', chunksize=500)
logger.info("Successful with length: {}".format(len(final_data_rank)))
#
# # Write to Redshift DB
# data_export['created_at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
# '%Y-%m-%d %H:%M:%S')
# data_export['updated_at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
# '%Y-%m-%d %H:%M:%S')
# data_export['created_by'] = 'etl-automation'
# data_export['updated_by'] = 'etl-automation'
#
# data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# if isinstance(table_info1, type(None)):
# raise Exception(f"table: {table_name1} do not exist, create the table first")
# else:
# logger.info(f"Table:{table_name1} exists")
#
# truncate_query = f''' DELETE FROM "{schema}"."{table_name1}" '''
# rs_db.execute(truncate_query)
#
# s3.write_df_to_db(df=data_export[table_info1['column_name']], table_name=table_name1, db=rs_db,
# schema=schema)
# Closing the DB Connection
rs_db.close_connection()
pg.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/dexter-medicine-recommendation/dexter-medicine-recommendation.py | dexter-medicine-recommendation.py |
import csv
import math
# importing libraries
from datetime import datetime
from warnings import filterwarnings
import numpy as np
import pandas as pd
import xlsxwriter
filterwarnings("ignore")
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from datetime import timedelta
st_dt1 = (datetime.now() - timedelta(days=2)).strftime("%Y-%m-%d")
ed_dt1 = (datetime.now() - timedelta(days=2)).strftime("%Y-%m-%d")
st_dt2 = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d")
ed_dt2 = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d")
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sd1', '--start_date1', default=None, type=str, required=False)
parser.add_argument('-ed1', '--end_date1', default=None, type=str, required=False)
parser.add_argument('-sd2', '--start_date2', default=None, type=str, required=False)
parser.add_argument('-ed2', '--end_date2', default='', type=str, required=False)
parser.add_argument('-ip', '--initial_param', default='all', type=str, required=False)
parser.add_argument('-ipv', '--initial_param_value', default='all', type=str, required=False)
parser.add_argument('-p', '--param', default='all_params', type=str, required=False)
parser.add_argument('-pl', '--param_limit', default=5, type=int, required=False)
parser.add_argument('-ml', '--max_level', default=5, type=int, required=False)
parser.add_argument('-dt', '--data', default='filter', type=str, required=False)
# parser.add_argument('-fc', '--filter_cutoff',default= 5, type= float, required=False )
parser.add_argument('-ad', '--additional_data', default='both', type=str, required=False)
parser.add_argument('-hpl', '--hidden_param_list', default=None, type=str, required=False)
parser.add_argument('-msl', '--manual_sort_list', default=None, type=str, required=False)
parser.add_argument('-tp', '--top_parameter', default=5, type=int, required=False)
parser.add_argument('-ms', '--manual_sorting', default='no', type=str, required=False)
parser.add_argument('-sb', '--sorting_basis', default='param_value', type=str, required=False)
parser.add_argument('-fb', '--filter_basis', default='percentage', type=str, required=False)
# parser.add_argument('-dr', '--impact_direction',default= '', type= str, required=False )
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
# parameters
email_to = args.email_to
start_date1 = args.start_date1
end_date1 = args.end_date1
start_date2 = args.start_date2
end_date2 = args.end_date2
initial_param = args.initial_param
initial_param_value = args.initial_param_value
param = args.param
param_limit = args.param_limit
max_level = args.max_level
data = args.data
# filter_cutoff = args.filter_cutoff
additional_data = args.additional_data
hidden_param_list = args.hidden_param_list
manual_sort_list = args.manual_sort_list
top_parameter = args.top_parameter
manual_sorting = args.manual_sorting
# impact_direction = args.impact_direction
sorting_basis = args.sorting_basis
filter_basis = args.filter_basis
if start_date1 == None and end_date1 == None:
start_date1 = st_dt1
end_date1 = ed_dt1
if start_date2 == None and end_date2 == '':
start_date2 = st_dt2
end_date2 = ed_dt2
if hidden_param_list == None:
hidden_param_list = ['abo', 'line_manager', 'cluster_name', 'store_staff']
if manual_sort_list == None:
manual_sort_list = ['old_new_customer', 'drug_name', 'drug_type', 'drug_category', 'drug_company',
'drug_composition', 'pr_flag', 'hd_flag', 'ecom_flag', 'payment_method',
'promo_code', 'city', 'franchisee_name', 'cluster_name',
'store_name''store_staff', 'abo', 'line_manager']
elif manual_sort_list != None:
input_list = list(manual_sort_list.split(','))
manual_sort_list = []
manual_sort_list.extend(input_list)
logger = get_logger()
logger.info(f"env: {env}")
logger.info(f"print the env again: {env}")
schema = 'prod2-generico'
rs_db = DB()
rs_db.open_connection()
s3 = S3()
read_schema = 'prod2-generico'
# Query to fetch sales data
query1 = f"""
select
s."bill-id" ,
s."patient-id" ,
s."store-id" ,
s."drug-id" ,
s."drug-name",
s."type" as "drug-type" ,
s.category as "drug-category",
(case when s.composition = '' then 'null_composition' else s.composition end) as "drug-composition",
s.company as "drug-company",
(case when s."bill-flag" = 'gross' then s.quantity
when s."bill-flag" = 'return' then (-1*s.quantity)
else 0
end) as quantity,
date(s."created-at" ),
s.rate ,
s.ptr ,
s."substitution-status" ,
s."created-by" as "store-staff",
s."bill-flag" ,
s."old-new" as "old-new-customer",
s."payment-method" ,
s."promo-code" ,
s."pc-type" ,
(case when s."pr-flag" = true then 'PR' else 'Non-PR' end) as "PR-flag" ,
(case when s."hd-flag" = true then 'HD' else 'Non-HD' end) as "HD-flag" ,
(case when s."ecom-flag" = true then 'Ecom' else 'store' end) as "Ecom-flag" ,
s."store-name" ,
s."line-manager" ,
s.city ,
s.abo ,
s."franchisee-id" ,
s."franchisee-name" ,
s."cluster-id" ,
s."cluster-name" ,
s."drug-grade" ,
s."doctor-id" ,
(case when s."bill-flag" = 'gross' then (s.rate * s.quantity)
when s."bill-flag" = 'return' then (-1*(s.rate * s.quantity))
else 0
end) as sales
from
"{read_schema}".sales s
where date(s."created-at" ) between '{start_date1}' and '{end_date1}';
"""
# sales data for period 1
query = query1
mos1 = rs_db.get_df(query=query)
mos1.columns = [c.replace('-', '_') for c in mos1.columns]
# initial filter
if initial_param == 'all':
mos1 = mos1
else:
mos1 = mos1[mos1[initial_param] == initial_param_value]
# for period 2
query2 = f"""
select
s."bill-id" ,
s."patient-id" ,
s."store-id" ,
s."drug-id" ,
s."drug-name",
s."type" as "drug-type" ,
s.category as "drug-category",
(case when s.composition = '' then 'null_composition' else s.composition end) as "drug-composition",
s.company as "drug-company",
(case when s."bill-flag" = 'gross' then s.quantity
when s."bill-flag" = 'return' then (-1*s.quantity)
else 0
end) as quantity,
date(s."created-at" ),
s.rate ,
s.ptr ,
s."substitution-status" ,
s."created-by" as "store-staff",
s."bill-flag" ,
s."old-new" as "old-new-customer",
s."payment-method" ,
s."promo-code" ,
s."pc-type" ,
(case when s."pr-flag" = true then 'PR' else 'Non-PR' end) as "PR-flag" ,
(case when s."hd-flag" = true then 'HD' else 'Non-HD' end) as "HD-flag" ,
(case when s."ecom-flag" = true then 'Ecom' else 'store' end) as "Ecom-flag" ,
s."store-name" ,
s."line-manager" ,
s.city ,
s.abo ,
s."franchisee-id" ,
s."franchisee-name" ,
s."cluster-id" ,
s."cluster-name" ,
s."drug-grade" ,
s."doctor-id" ,
(case when s."bill-flag" = 'gross' then (s.rate * s.quantity)
when s."bill-flag" = 'return' then (-1*(s.rate * s.quantity))
else 0
end) as sales
from
"{read_schema}".sales s
where date(s."created-at" ) between '{start_date2}' and '{end_date2}';
"""
# sales data for period 2
mos2 = rs_db.get_df(query=query2)
mos2.columns = [c.replace('-', '_') for c in mos2.columns]
# initial filter
if initial_param == 'all':
mos2 = mos2
else:
mos2 = mos2[mos2[initial_param] == initial_param_value]
# defining change fuction
def change(A, B):
if A is None:
return float(np.round(B, 2))
elif B is None:
return float(np.round((-1 * A), 2))
elif A is None and B is None:
return float(0)
else:
return float(np.round((B - A), 2))
# Defining function to calculate percentage change
def per_change(A, B):
if (A == 0):
return float((B - A))
elif (A == B):
return float((B - A))
else:
return float(((B - A) / A) * 100)
# The function takes the bills-1 table and the period as 'period1' or 'period2'
def sale(table):
return float(table['sales'].sum())
def break_sale(table, sale_type):
if sale_type == 'gross':
mos_gs_local = table[table['bill_flag'] == 'gross']
return mos_gs_local
if sale_type == 'return':
mos_ret_local = table[table['bill_flag'] == 'return']
mos_ret_local['sales'] = np.where(mos_ret_local['sales'] <= 0, -1 * mos_ret_local['sales'],
mos_ret_local['sales'])
mos_ret_local['quantity'] = np.where(mos_ret_local['quantity'] <= 0, -1 * mos_ret_local['quantity'],
mos_ret_local['quantity'])
return mos_ret_local
# Defining functions for all required metrics
def num_cust(table):
num = table.patient_id.nunique()
return float(num)
def avg_gs_per_customer(table):
gs = sale(table)
num = num_cust(table)
return (gs / num) if (num) != 0 else 0
def num_bills(table):
num = table.bill_id.nunique()
return float(num)
def avg_gs_per_bill(table):
gs = sale(table)
num = num_bills(table)
return (gs / num) if (num) != 0 else 0
def num_drugs(table):
num = table.drug_id.nunique()
return float(num)
def avg_gs_per_drug(table):
gs = sale(table)
num = num_drugs(table)
return (gs / num) if (num) != 0 else 0
def num_quantity(table):
num = table['quantity'].sum()
return float(num)
def rate(table):
gs = sale(table)
num = num_quantity(table)
return (gs / num) if (num) != 0 else 0
def num_bills_per_customer(table):
num1 = num_bills(table)
num2 = num_cust(table)
return (num1 / num2) if (num2) != 0 else 0
def num_quantity_per_bill(table):
num1 = num_quantity(table)
num2 = num_bills(table)
return (num1 / num2) if (num2) != 0 else 0
# taking num of unique drug-bill combination
def num_bills_drugs(table):
num = len(table[['bill_id', 'drug_id']].drop_duplicates())
return float(num)
def num_drugs_per_bill(table):
num1 = num_bills_drugs(table)
num2 = num_bills(table)
return (num1 / num2) if (num2) != 0 else 0
def num_quantity_per_drug(table):
num1 = num_quantity(table)
num2 = num_drugs(table)
return (num1 / num2) if (num2) != 0 else 0
def avg_gs_per_drug_per_bill(table):
gs = sale(table)
num = num_bills_drugs(table)
return (gs / num) if (num) != 0 else 0
def num_quantity_per_drug_per_bill(table):
num1 = num_quantity(table)
num2 = num_bills_drugs(table)
return (num1 / num2) if (num2) != 0 else 0
# Defining function to find cont factor of metrics
def metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global):
ret2_sim = float(gs2_sim) * float(ret_per2)
ns2_sim = float(gs2_sim) - float(ret2_sim)
metric_fact = np.divide((ns2_local - ns2_sim), ns2_global) * 100
return float(metric_fact)
# Function to store metric values in dictionary
def metric(t1, t2, ns2_local, gs1_local, gs2_local, ret_per1, ret_per2, ns2_global):
# defining dictionary to store the metrics
d = {}
# No. of customers
nc1 = num_cust(t1)
nc2 = num_cust(t2)
# Avg gs per customer:
agpc1 = avg_gs_per_customer(t1)
agpc2 = avg_gs_per_customer(t2)
# Cont of Num of cust
gs2_sim = float(agpc2) * float(nc1)
d['nc'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# Cont of ACV
gs2_sim = float(agpc1) * float(nc2)
d['ACV'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# No. of bills per customer
nbpc1 = num_bills_per_customer(t1)
nbpc2 = num_bills_per_customer(t2)
# avg_gs_per_bill
agpb1 = avg_gs_per_bill(t1)
agpb2 = avg_gs_per_bill(t2)
# cont. of number of bills
agpc2_sim = float(nbpc1) * float(agpb2)
gs2_sim = agpc2_sim * float(nc2)
d['nbpc'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# cont. of ABV
agpc2_sim = float(nbpc2) * float(agpb1)
gs2_sim = agpc2_sim * float(nc2)
d['ABV'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# num of drugs per bill
ndpb1 = num_drugs_per_bill(t1)
ndpb2 = num_drugs_per_bill(t2)
# avg gs per drug per bill
agpdpb1 = avg_gs_per_drug_per_bill(t1)
agpdpb2 = avg_gs_per_drug_per_bill(t2)
# cont of num of drugs per bill
agpb2_sim = float(ndpb1) * float(agpdpb2)
agpc2_sim = agpb2_sim * float(nbpc2)
gs2_sim = agpc2_sim * float(nc2)
d['ndpb'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# cont. of avg gs per drug per bill
agpb2_sim = float(ndpb2) * float(agpdpb1)
agpc2_sim = agpb2_sim * float(nbpc2)
gs2_sim = agpc2_sim * float(nc2)
d['agpdpb'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# number of quantities per drug per bill
nqpdpb1 = num_quantity_per_drug_per_bill(t1)
nqpdpb2 = num_quantity_per_drug_per_bill(t2)
# Avg gs per quantity
agpq1 = rate(t1)
agpq2 = rate(t2)
# cont by number of quantities per drug per bill
agpdpb2_sim = float(nqpdpb1) * float(agpq2)
agpb2_sim = float(ndpb2) * agpdpb2_sim
agpc2_sim = agpb2_sim * float(nbpc2)
gs2_sim = agpc2_sim * float(nc2)
d['nqpdpb'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# cont by Avg gs per quantity
agpdpb2_sim = float(nqpdpb2) * float(agpq1)
agpb2_sim = float(ndpb2) * agpdpb2_sim
agpc2_sim = agpb2_sim * float(nbpc2)
gs2_sim = agpc2_sim * float(nc2)
d['rate'] = metric_factor(gs2_sim, ret_per2, gs2_local, ns2_local, ns2_global)
# returing the dictionary containing all metric values
return d
# Function to store the metric in a dataFrame
def store_table(df, d, ret_fact, ns2_global):
# gross sale
# level 1
df['Return%'] = ret_fact
# level 2
df['Number_cust'] = d['nc']
df['ACV'] = d['ACV']
# Level 3
df['Nob_per_cust'] = d['nbpc']
df['ABV'] = d['ABV']
# Level 4
df['Drugs_per_bill'] = d['ndpb']
# df['Avg. spend per drug per bill'] = d['agpdpb']
# Level 5
df['Quantity_per_drug'] = d['nqpdpb']
df['Avg_rate_per_quantity'] = d['rate']
return df
# Function to calculate change factor of any parameter
def factor_param(d1, d2, ns1, ns2, calc_type):
ns1_param = sale(d1)
ns2_param = sale(d2)
if calc_type == 'per':
ns1_param_fact = ns1_param / ns1
ns2_sim_param = ns2 * ns1_param_fact
elif calc_type == 'abs':
ns2_sim_param = ns1_param
ch_ns2_param = ns2_param - ns2_sim_param
ns_factor_param = (ch_ns2_param / ns2) * 100
return ns_factor_param
# Function to control level of output columns i.e level of decomposition
def level(table, max_level, local_list):
if max_level == 1:
df = table.loc[:, local_list[0:2]]
return df
if max_level == 2:
df = table.loc[:, local_list[0:4]]
return df
if max_level == 3:
df = table.loc[:, local_list[0:6]]
return df
if max_level == 4:
df = table.loc[:, local_list[0:7]]
return df
if max_level == 5:
df = table.loc[:, local_list[0:9]]
return df
# Function which returns the final table containing the metric contributions of total change of net sale
def decomposition(table1, table2, df, ns2_global):
# period1 net sale
ns1_local = sale(table1)
# period2 net sale
ns2_local = sale(table2)
# defining gross sale metrics
mos1_gs_local = break_sale(table1, 'gross')
mos2_gs_local = break_sale(table2, 'gross')
gs1_local = sale(mos1_gs_local)
gs2_local = sale(mos2_gs_local)
# defining return metrics
mos1_ret_local = break_sale(table1, 'return')
mos2_ret_local = break_sale(table2, 'return')
ret1_local = sale(mos1_ret_local)
ret2_local = sale(mos2_ret_local)
# return to be calculate as return %
ret_per1 = np.divide(ret1_local, gs1_local)
ret_per2 = np.divide(ret2_local, gs2_local)
ret2_sim = ret_per1 * gs2_local
ns2_sim = gs2_local - ret2_sim
ret_fact = np.divide((ns2_local - ns2_sim), ns2_global) * 100
# calling metrics
d = metric(mos1_gs_local, mos2_gs_local, ns2_local, gs1_local, gs2_local, ret_per1, ret_per2, ns2_global)
# final data frame
df_final = store_table(df, d, ret_fact, ns2_global)
return df_final
# Its like the final main function to call the decomposition function with given parameter and limit
def my_func(param, param_limit, max_level):
# period1 net sale
ns1 = sale(mos1)
# period2 net sale
ns2 = sale(mos2)
ns2_global = ns2
# Abs list
abs_list = ['drug_name', 'drug_composition', 'store_staff', 'store_name', 'line_manager', 'city', 'abo',
'franchisee_name',
'cluster_name', 'old_new_customer']
pct_list = ['drug_type', 'drug_category', 'drug_company', 'payment_method', 'promo_code', 'pr_flag', 'hd_flag',
'ecom_flag']
# If parameters are set
# sorting the parameter values in descending order of change of net sale from period1 to period2
df1_sort = mos1.groupby(param)['sales'].sum().reset_index()
df2 = pd.DataFrame()
df2[param] = df1_sort[param]
df3 = pd.DataFrame()
df3['sales'] = df1_sort['sales']
df3 = df3.applymap(lambda x: np.float(x))
df1_sort = pd.concat([df2, df3], axis=1)
df1_sort['fraction'] = (df1_sort['sales'] / ns1) * 100
df2_sort = mos2.groupby(param)['sales'].sum().reset_index()
df4 = pd.DataFrame()
df4[param] = df2_sort[param]
df5 = pd.DataFrame()
df5['sales'] = df2_sort['sales']
df5 = df5.applymap(lambda x: np.float(x))
df2_sort = pd.concat([df4, df5], axis=1)
df2_sort['fraction'] = (df2_sort['sales'] / ns2) * 100
df_sort = pd.merge(df1_sort, df2_sort, on=param, how='outer')
df_sort.fillna(0, inplace=True)
# sales diff
df_sort['s_diff'] = df_sort['sales_y'] - df_sort['sales_x']
# fraction diff
df_sort['f_diff'] = df_sort['fraction_y'] - df_sort['fraction_x']
# sorting absolute values
df_sort1 = df_sort[param]
df_sort2 = df_sort[['sales_x', 'fraction_x', 'sales_y', 'fraction_y', 's_diff', 'f_diff']]
df_sort2 = np.abs(df_sort2)
df_sort3 = pd.concat([df_sort1, df_sort2], axis=1)
df_sort3
# sorting
if param in abs_list:
df_sort = df_sort3.sort_values('s_diff', ascending=False)
elif param in pct_list:
df_sort = df_sort3.sort_values('f_diff', ascending=False)
# listing the sorted parameters
sort_list = list(df_sort[param])
# choosing the parameter values from set limit
if len(sort_list) <= param_limit:
param_list = sort_list
else:
param_list = sort_list[0:param_limit]
# creating dataframe with rows as parameter values
df_temp = pd.DataFrame()
# Iterating through each parameter value
for c in param_list:
# Filtering base table based on set parameter
p1 = mos1[mos1[param] == c]
p2 = mos2[mos2[param] == c]
# calculating contribution factor by calling the factor_param function
if param in abs_list:
ns_factor_param = factor_param(p1, p2, ns1, ns2, 'abs')
elif param in pct_list:
ns_factor_param = factor_param(p1, p2, ns1, ns2, 'per')
# printing the contribution of parameters in total change of net sale
df_op = pd.DataFrame(index=[c])
df_op['Net sale'] = ns_factor_param
# Calling the decomposition funtion for set parameters and level
df2 = decomposition(p1, p2, df_op, ns2_global)
df_final = pd.concat([df_temp, df2])
df_temp = df_final
# Arranging column names in a relevant way
local_list = ['Net sale', 'Return%',
'Number_cust', 'ACV',
'Nob_per_cust', 'ABV',
'Drugs_per_bill',
'Quantity_per_drug', 'Avg_rate_per_quantity']
# return final df
return level(df_final, max_level, local_list)
# Function to store output for all param in the list
def all_param(param_list, param_limit, max_level):
df_param_dict = {}
for param in param_list:
df_local = my_func(param, param_limit, max_level)
df_local['params'] = param
df_param_dict[param] = df_local
return df_param_dict
# Sorting param on the basis of contribution to change
def sort_param(df_param_dict):
params = []
cont_value = []
for key in df_param_dict:
params.append(key)
cont_value.append(abs(np.abs(df_param_dict[key]['Net sale']).sum()))
df_local = pd.DataFrame(data={'param': params, 'contribution': cont_value})
df_sorted = df_local.sort_values('contribution', ascending=False)
sorted_param_list = list(df_sorted['param'])
return sorted_param_list
# Concating all stores dataframe in descending order of contribution
def concat(sorted_param_list, df_param_dict):
p = 0
df_final = pd.DataFrame()
for param in sorted_param_list:
df_temp = df_param_dict[param]
df_final = pd.concat([df_final, df_temp])
p = p + 1
index = list(df_final.index)
df_final.set_index(['params', index], inplace=True)
return df_final
# Function to filter data based on larger contribution
def filtered(df, ch_ns, upper_value=0.95, lower_value=0.05):
uv = np.quantile(df, upper_value)
lv = np.quantile(df, lower_value)
if ch_ns > 0:
df = df.applymap(lambda x: np.nan if x <= uv else x)
elif ch_ns < 0:
df = df.applymap(lambda x: np.nan if x >= lv else x)
df = df.applymap(lambda x: np.round(x, 2))
df = df.dropna(axis=0, how='all')
df = df.dropna(axis=1, how='all')
return df
# Defining funstion to calculate absolute difference
def difference(p, pv, metric):
local_df1 = mos1[mos1[p] == pv]
local_df2 = mos2[mos2[p] == pv]
if metric == 'Net sale':
ns_local1 = sale(local_df1)
ns_local2 = sale(local_df2)
abs_ch = change(ns_local1, ns_local2)
elif metric == 'Return%':
mos1_gs_local = break_sale(local_df1, 'gross')
mos2_gs_local = break_sale(local_df2, 'gross')
gs1_local = sale(mos1_gs_local)
gs2_local = sale(mos2_gs_local)
mos1_ret_local = break_sale(local_df1, 'return')
mos2_ret_local = break_sale(local_df2, 'return')
ret1_local = sale(mos1_ret_local)
ret2_local = sale(mos2_ret_local)
ret_per1 = np.divide(ret1_local, gs1_local) * 100
ret_per2 = np.divide(ret2_local, gs2_local) * 100
abs_ch = change(ret_per1, ret_per2)
elif metric == 'Number_cust':
nc1 = num_cust(local_df1)
nc2 = num_cust(local_df2)
abs_ch = change(nc1, nc2)
elif metric == 'ACV':
agpc1 = avg_gs_per_customer(local_df1)
agpc2 = avg_gs_per_customer(local_df2)
abs_ch = change(agpc1, agpc2)
elif metric == 'Nob_per_cust':
nbpc1 = num_bills_per_customer(local_df1)
nbpc2 = num_bills_per_customer(local_df2)
abs_ch = change(nbpc1, nbpc2)
elif metric == 'ABV':
agpb1 = avg_gs_per_bill(local_df1)
agpb2 = avg_gs_per_bill(local_df2)
abs_ch = change(agpb1, agpb2)
elif metric == 'Drugs_per_bill':
ndpb1 = num_drugs_per_bill(local_df1)
ndpb2 = num_drugs_per_bill(local_df2)
abs_ch = change(ndpb1, ndpb2)
elif metric == 'Quantity_per_drug':
nqpdpb1 = num_quantity_per_drug_per_bill(local_df1)
nqpdpb2 = num_quantity_per_drug_per_bill(local_df2)
abs_ch = change(nqpdpb1, nqpdpb2)
elif metric == 'Avg_rate_per_quantity':
agpq1 = rate(local_df1)
agpq2 = rate(local_df2)
abs_ch = change(agpq1, agpq2)
return abs_ch
# Defining function to create data frame for differecnce
def diff_df(df):
df_diff = df.copy()
params_list = list(df_diff['params'])
param_value_list = list(df_diff['parameter value'])
column_list = list(df_diff.columns)[2:]
# Iterating through all the entries in the final dataframe
for c in column_list:
for (p, pv) in zip(params_list, param_value_list):
df_diff[c] = np.where((df_diff['params'] == p) & (df_diff['parameter value'] == pv), difference(p, pv, c),
df_diff[c])
return df_diff
# Defining function to find top parameter
def top_param(df_filtered, ch_ns, top_parameter, filter_basis):
df_array = df_filtered.iloc[:, 2:].values
list1 = []
for l in df_array:
for m in l:
list1.append(m)
# removing nan and inf
list1 = [v for v in list1 if not (math.isinf(v) or math.isnan(v))]
# Top N values
if ch_ns < 0:
# Ascending order
sort_list = list(np.sort(list1))
else:
# Descending order
sort_list = list(np.sort(list1))
sort_list.reverse()
if filter_basis == 'percentage':
length = len(sort_list)
per = int((length * top_parameter) / 100)
selected_list = sort_list[0:per]
else:
selected_list = sort_list[0:top_parameter]
df1 = df_filtered.iloc[:, :2]
df2 = df_filtered.iloc[:, 2:]
df3 = df2.applymap(lambda x: np.nan if x not in selected_list else x)
df_filtered = pd.concat([df1, df3], axis=1)
df_index = df_filtered.set_index(['params', 'parameter value'])
df_filtered = df_index.dropna(axis=0, how='all')
df_filtered = df_filtered.dropna(axis=1, how='all')
df_filtered.reset_index(inplace=True)
return df_filtered
# defining summary function
def summary(df_local):
p_list = list(df_local['params'].values)
pv_list = list(df_local['parameter value'].values)
metrics = (list(df_local.columns))[2:]
df_array = df_local.iloc[:, 2:].values
value_list = []
for l in df_array:
value_list.append(list(l))
zip_list = list(zip(p_list, pv_list, value_list))
n = len(metrics)
# Adding corresponding data
final_list = []
for m in zip_list:
for i in range(n):
if math.isnan(m[2][i]) is False:
final_list.append([m[0], m[1], metrics[i], m[2][i]])
return final_list
# Final run function
def run(ch_ns, hidden_param_list, manual_sort_list, param='all_params', param_limit=5, max_level=5, data='filter',
filter_basis='percentage', top_parameter=5, manual_sorting='no', sorting_basis='param_value'):
# threshold = filter_cutoff / 100
if param != 'all_params':
input_list = list(param.split(','))
param_list = []
param_list.extend(input_list)
df_param_dict = all_param(param_list, param_limit, max_level)
sorted_param_list = sort_param(df_param_dict)
df_required = concat(sorted_param_list, df_param_dict)
df_pr = df_required.copy()
if data == 'full':
df_pr = df_pr.applymap(lambda x: np.round(x, 2))
df_pr.reset_index(inplace=True)
df_pr.rename(columns={'level_1': 'parameter value'}, inplace=True)
df_filtered = df_pr.copy()
# df_filtered = df_filtered[~(df_filtered['params'].isin(hidden_param_list))]
# return df_filtered
elif data == 'filter':
# df_pr = df_pr.applymap(lambda x: 0 if x == np.nan else x)
# df_pr = df_pr.fillna(0)
# df = filtered(df_pr,ch_ns, (1 - threshold), threshold)
df_pr = df_pr.applymap(lambda x: np.round(x, 2))
df_pr.reset_index(inplace=True)
df_pr.rename(columns={'level_1': 'parameter value'}, inplace=True)
# local_hidden_list = [].append(initial_param)
# Hiding the initial param from impact factor
# df_pr = df_pr[~(df_pr['params'].isin(local_hidden_list))]
df = top_param(df_pr, ch_ns, top_parameter, filter_basis)
df_filtered = df.copy()
# df_filtered = df_filtered[~(df_filtered['params'].isin(hidden_param_list))]
# return df_filtered
if param == 'all_params':
param_list = ['drug_name', 'drug_type', 'drug_category', 'drug_composition', 'drug_company', 'store_staff',
'old_new_customer', 'payment_method',
'promo_code', 'pr_flag', 'hd_flag', 'ecom_flag', 'store_name', 'line_manager', 'city', 'abo',
'franchisee_name', 'cluster_name']
df_param_dict = all_param(param_list, param_limit, max_level)
sorted_param_list = sort_param(df_param_dict)
df_required = concat(sorted_param_list, df_param_dict)
df_pr = df_required.copy()
if data == 'full':
df_pr = df_pr.applymap(lambda x: np.round(x, 2))
df_pr.reset_index(inplace=True)
df_pr.rename(columns={'level_1': 'parameter value'}, inplace=True)
# hidden values
hidden_param_value = ['acute', 'store', 'cash', 'ZIPPIN PHARMA PVT. LTD', 'MAH-MUM']
df_filtered = df_pr.copy()
df_filtered = df_filtered[~(df_filtered['parameter value'].isin(hidden_param_value))]
df_filtered = df_filtered[~(df_filtered['params'].isin(hidden_param_list))]
# return df_filtered
elif data == 'filter':
df_pr = df_pr.applymap(lambda x: np.round(x, 2))
df_pr.reset_index(inplace=True)
df_pr.rename(columns={'level_1': 'parameter value'}, inplace=True)
# hidden values
hidden_param_value = ['acute', 'store', 'cash', 'ZIPPIN PHARMA PVT. LTD', 'MAH-MUM']
df_filtered = df_pr.copy()
df_filtered = df_filtered[~(df_filtered['parameter value'].isin(hidden_param_value))]
hidden_param_list.append(initial_param)
df_filtered = df_filtered[~(df_filtered['params'].isin(hidden_param_list))]
df_filtered = top_param(df_filtered, ch_ns, top_parameter, filter_basis)
# return df_filtered
if data == 'full':
if manual_sorting == 'no':
# hidden_param = hidden_param_list
if sorting_basis == 'param':
df_filtered = df_filtered
elif sorting_basis == 'param_value':
if ch_ns > 0:
sort_col = list(df_filtered.columns)[2]
df_filtered = df_filtered.sort_values(sort_col, ascending=False)
else:
sort_col = list(df_filtered.columns)[2]
df_filtered = df_filtered.sort_values(sort_col, ascending=True)
# dropping null rows and columns
df_filtered = df_filtered.dropna(axis=0, how='all')
df_filtered = df_filtered.dropna(axis=1, how='all')
elif manual_sorting == 'yes':
# taking the sorted parameters into a list
param_list = list(df_filtered['params'].values)
manual_sort_list = manual_sort_list
manual_sort = []
for p in manual_sort_list:
if p in param_list:
manual_sort.append(p)
# sorting by sorted param
df_concat = pd.DataFrame()
for c in manual_sort:
df_temp = df_filtered[df_filtered['params'] == c]
df_concat = pd.concat([df_concat, df_temp])
df_filtered = df_concat
return df_filtered
elif data == 'filter':
if manual_sorting == 'no':
df_filtered = df_filtered
# sorting in asc/desc order
if ch_ns > 0:
sort_col = list(df_filtered.columns)[2]
df_filtered = df_filtered.sort_values(sort_col, ascending=False)
else:
sort_col = list(df_filtered.columns)[2]
df_filtered = df_filtered.sort_values(sort_col, ascending=True)
elif manual_sorting == 'yes':
df_filtered = df_filtered
# taking the sorted parameters into a list
param_list = list(df_filtered['params'].values)
manual_sort_list = list(manual_sort_list)
manual_sort = []
for p in manual_sort_list:
if p in param_list:
manual_sort.append(p)
# sorting by sorted param
df_concat = pd.DataFrame()
for c in manual_sort:
df_temp = df_filtered[df_filtered['params'] == c]
df_concat = pd.concat([df_concat, df_temp])
df_filtered = df_concat
return df_filtered
# percent change in net sale from p1 to p2
ns1 = sale(mos1)
ns2 = sale(mos2)
ch_ns = change(ns1, ns2)
pc_ns = np.round(per_change(ns1, ns2), 2)
# Running final function to get output
df_filtered = run(ch_ns=ch_ns, hidden_param_list=hidden_param_list, manual_sort_list=manual_sort_list, param=param,
param_limit=param_limit, max_level=max_level, data=data, filter_basis=filter_basis,
top_parameter=top_parameter, manual_sorting=manual_sorting, sorting_basis=sorting_basis)
# Running function to get summary for top 5 parameters
df_top = run(ch_ns, hidden_param_list, manual_sort_list, param, param_limit, max_level, 'filter', 'abs', 5, 'no',
'param_value')
summary_list = summary(df_top)
df_top_diff = diff_df(df_top)
summary_list_diff = summary(df_top_diff)
# taking those difference where data is present
summary_list_filtered = []
for m in summary_list:
for n in summary_list_diff:
if m[0:3] == n[0:3]:
if n not in summary_list_filtered:
summary_list_filtered.append(n)
# Writing summary
if ch_ns > 0:
summary_text = f'Net Sale is increased by {pc_ns}% \n'
else:
summary_text = f'Net Sale is decreased by {pc_ns * -1}% \n'
x = len(summary_list)
for i in range(x):
m = summary_list[i]
n = summary_list_filtered[i]
if n[3] > 0:
change_word = 'is increased'
else:
change_word = 'is decreased'
if n[2] == 'Number_cust':
if n[0] == 'old_new_customer':
summary_text = summary_text + f" \n {i + 1}. The number of {n[1]} customers {change_word} by {np.abs(int(n[3]))} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
elif n[0] in ['drug_type', 'drug_category']:
summary_text = summary_text + f" \n {i + 1}. The number of customers for {n[1]} {n[0].replace('_', ' ')} {change_word} by {np.abs(int(n[3]))} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
elif n[0] in ['pr_flag', 'hd_flag', 'ecom_flag']:
summary_text = summary_text + f" \n {i + 1}. The number of customers for {n[1]} {change_word} by {np.abs(int(n[3]))} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
elif n[0] in ['payment_method']:
summary_text = summary_text + f" \n {i + 1}. The number of customers for {n[1]} payment {change_word} by {np.abs(int(n[3]))} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
else:
summary_text = summary_text + f" \n {i + 1}. The number of customers for the {n[0].replace('_', ' ')} {n[1]} {change_word} by {np.abs(int(n[3]))} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
else:
if n[0] == 'old_new_customer':
summary_text = summary_text + f" \n {i + 1}. The {n[2].replace('_', ' ')} of {n[1]} customers {change_word} by {np.abs(n[3])} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
elif n[0] in ['drug_type', 'drug_category']:
summary_text = summary_text + f" \n {i + 1}. The {n[2].replace('_', ' ')} for {n[1]} {n[0].replace('_', ' ')} {change_word} by {np.abs(n[3])} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
elif n[0] in ['pr_flag', 'hd_flag', 'ecom_flag']:
summary_text = summary_text + f" \n {i + 1}. The {n[2].replace('_', ' ')} for {n[1]} {change_word} by {np.abs(n[3])} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
elif n[0] in ['payment_method']:
summary_text = summary_text + f" \n {i + 1}. The {n[2].replace('_', ' ')} for {n[1]} payment {change_word} by {np.abs(n[3])} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
else:
summary_text = summary_text + f" \n {i + 1}. The {n[2].replace('_', ' ')} for the {n[0].replace('_', ' ')} {n[1]} {change_word} by {np.abs(n[3])} and contributing to estimated {np.round(((m[3] * ns2 * 0.01) / 100000), 2)} Lakhs revenue "
# Finally dropping null rows and columns
df_index = df_filtered.set_index(['params', 'parameter value'])
df_filtered = df_index.dropna(axis=0, how='all')
df_filtered = df_filtered.dropna(axis=1, how='all')
df_filtered.reset_index(inplace=True)
# Additional data
if additional_data == 'difference' or additional_data == 'both':
# Absolute difference
df_diff = diff_df(df_filtered)
# reverse of absolute dataframe
df_diff.rename(columns={'parameter value': ''}, inplace=True)
df_diff_rev = df_diff.T
df_diff_rev.to_csv('/tmp/final_output_diff.csv', index=True, header=False)
if additional_data == 'absolute_impact' or additional_data == 'both':
df1 = df_filtered.iloc[:, :2]
df2 = df_filtered.iloc[:, 2:]
df3 = df2.applymap(lambda x: round(np.divide((x * ns2), 100), 2) if x != np.nan else x)
df_abs = pd.concat([df1, df3], axis=1)
# reverse of absolute dataframe
df_abs.rename(columns={'parameter value': ''}, inplace=True)
df_abs_rev = df_abs.T
df_abs_rev.to_csv('/tmp/final_output_abs.csv', index=True, header=False)
# saving reverese dataframes
df_filtered.rename(columns={'parameter value': ''}, inplace=True)
df_rev = df_filtered.T
df_rev.to_csv('/tmp/final_output.csv', index=True, header=False)
# Formatting Excel
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
if not os.path.exists(path):
os.mkdir(path, 0o777)
time_now = datetime.now().strftime('%Y_%m_%d_%H%M%S')
file_name = "sales_decomposition_{}_{}.xlsx".format(email_to, time_now)
local_file_full_path = path + file_name
# wrting to excel
book = xlsxwriter.Workbook(local_file_full_path, {'strings_to_numbers': True})
ws = book.add_worksheet("SD")
bold = book.add_format({'bold': True})
cell_format_bg_yl = book.add_format({'bold': True})
cell_format_bg_yl.set_bg_color('yellow')
cell_format_bg_gr = book.add_format()
cell_format_bg_gr.set_bg_color('green')
cell_format_bg_rd = book.add_format()
cell_format_bg_rd.set_bg_color('red')
ws.write(0, 0, "Sales Decomposition", cell_format_bg_yl)
if initial_param != 'all':
ws.write(2, 0, "Overall filter applied for", bold)
ws.write(2, 1, initial_param, bold)
ws.write(2, 2, initial_param_value, bold)
else:
ws.write(2, 0, "Overall filter applied for all parameters", bold)
ws.write(1, 5, "Period 1")
ws.write(2, 5, "Period 2")
ws.write(0, 6, "Start Date")
ws.write(0, 7, "End Date")
ws.write(1, 6, start_date1)
ws.write(1, 7, end_date1)
ws.write(2, 6, start_date2)
ws.write(2, 7, end_date2)
ws.write(1, 8, "Net Sale1")
ws.write(1, 9, ns1)
ws.write(2, 8, "Net Sale2")
ws.write(2, 9, ns2)
ws.write(4, 5, "Net Sale Change ")
if ch_ns > 0:
ws.write(4, 6, ch_ns, cell_format_bg_gr)
elif ch_ns < 0:
ws.write(4, 6, ch_ns, cell_format_bg_rd)
ws.write(5, 5, "Net Sale Ch% ")
if pc_ns > 0:
ws.write(5, 6, pc_ns, cell_format_bg_gr)
elif pc_ns < 0:
ws.write(5, 6, pc_ns, cell_format_bg_rd)
# Adding csv data to excel
# Adding csv data to excel
ws.write(7, 0, "Percentage Impact", cell_format_bg_yl)
limit2 = df_rev.shape[1] + 1
row_index = 8
with open("/tmp/final_output.csv") as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
for l in range(limit2):
if l == 0:
ws.write(row_index, l, row[l], bold)
else:
ws.write(row_index, l, row[l])
row_index += 1
ws.conditional_format('B10:DX19', {'type': '3_color_scale'})
if additional_data == 'difference':
# adding difference data
ws.write(20, 0, "Difference", cell_format_bg_yl)
limit3 = df_diff_rev.shape[1] + 1
row_index = 21
with open("/tmp/final_output_diff.csv") as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
for l in range(limit3):
if l == 0:
ws.write(row_index, l, row[l], bold)
else:
ws.write(row_index, l, row[l])
row_index += 1
ws.conditional_format('B23:DX36', {'type': '3_color_scale'})
if additional_data == 'absolute_impact':
# adding absolute change data
ws.write(20, 0, "Absolute Impact", cell_format_bg_yl)
limit4 = df_abs_rev.shape[1] + 1
row_index = 21
with open("/tmp/final_output_abs.csv") as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
for l in range(limit4):
if l == 0:
ws.write(row_index, l, row[l], bold)
else:
ws.write(row_index, l, row[l])
row_index += 1
ws.conditional_format('B23:DX36', {'type': '3_color_scale'})
if additional_data == 'both':
# adding adifference data
ws.write(20, 0, "Difference", cell_format_bg_yl)
limit3 = df_diff_rev.shape[1] + 1
row_index = 21
with open("/tmp/final_output_diff.csv") as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
for l in range(limit3):
if l == 0:
ws.write(row_index, l, row[l], bold)
else:
ws.write(row_index, l, row[l])
row_index += 1
ws.conditional_format('B23:DX36', {'type': '3_color_scale'})
# adding absolute change data
ws.write(33, 0, "Absolute Impact on Net Sale", cell_format_bg_yl)
limit4 = df_abs_rev.shape[1] + 1
row_index = 34
with open("/tmp/final_output_abs.csv") as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
for l in range(limit4):
if l == 0:
ws.write(row_index, l, row[l], bold)
else:
ws.write(row_index, l, row[l])
row_index += 1
ws.conditional_format('B36:DX50', {'type': '3_color_scale'})
# changing width of column
if limit2 >= 12:
limit = limit2
else:
limit = 12
for l in range(limit):
ws.set_column(0, l, 15)
# ws2 for summary
ws2 = book.add_worksheet("Summary")
ws2.write(0, 0, "Summary for top contributing factors", cell_format_bg_yl)
ws2.write(3, 0, summary_text)
ws2.set_column(0, 0, 60)
# worksheet3 to store parameters
ws3 = book.add_worksheet("Parameters")
ws3.write(0, 0, "Parameters Applied", cell_format_bg_yl)
ws3.write(2, 0, "Overall Filter Selector", bold)
ws3.write(2, 1, initial_param)
ws3.write(3, 0, "Overall Filter Value", bold)
ws3.write(3, 1, initial_param_value)
ws3.write(4, 0, "Impact Parameters", bold)
if param != 'all_params':
# input_list = list(param.split(','))
ws3.write(4, 1, param)
else:
param_list = "['drug_name', 'drug_type', 'drug_category', 'drug_composition', 'drug_company', 'store_staff','old_new_customer', 'payment_method','promo_code', 'pr_flag', 'hd_flag', 'ecom_flag', 'store_name', 'line_manager', 'city', 'abo','franchisee_name', 'cluster_name']"
ws3.write(4, 1, param_list)
ws3.write(5, 0, "Max Top Values in each Inpact Parameter", bold)
ws3.write(5, 1, param_limit)
ws3.write(6, 0, "Output Values Filter", bold)
ws3.write(6, 1, data)
ws3.write(7, 0, "Select Top %", bold)
ws3.write(7, 1, top_parameter)
ws3.write(8, 0, "Manual Sorting", bold)
ws3.write(8, 1, manual_sorting)
ws3.set_column(0, 0, 30)
# closing Excel
book.close()
# uploading to s3
s3.s3_client.upload_file(
Filename=local_file_full_path,
Bucket=s3.bucket_name,
Key=file_name)
# sending email to user
Email().send_email_file(
subject=f"sales decomposition analysis with overall filter {initial_param} as {initial_param_value} ",
mail_body=summary_text,
to_emails=email_to, file_paths=[local_file_full_path])
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sales-decomposition/sales-decomposition.py | sales-decomposition.py |
import os
import sys
import argparse
from datetime import datetime
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, PostGreWrite, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.utils.general_funcs import nearest_store, month_diff
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
logger.info(f"env: {env}")
""" DB connections """
pg_db = PostGreWrite()
pg_db.open_connection()
ms_db = MySQL()
ms_db.open_connection()
run_date = datetime.today().strftime("%Y-%m-%b")
stores_q = """
SELECT
st.id AS 'store_id',
st.name AS 'store',
sa.`name` as `line_manager`,
abo.name AS 'abo',
sm.name AS 'store_manager',
st.category AS 'store_type',
DATE(`opened-at`) AS 'opened_at',
CAST(st.`lat` AS DECIMAL(10,6)) AS `latitude`,
CAST(st.`lon` AS DECIMAL(10,6)) AS `longitude`,
st.`contact-number-1` AS `store-contact-1`,
st.`contact-number-2` AS `store-contact-2`,
st.`address` AS `store-address`,
sg.name as `city`,
case when lower(SUBSTRING(st.name, 1, 3))='b2b' then 'B2B' else 'Store' end as `store_b2b`,
st.`franchisee-id` as franchisee_id,
fc.`name` as `franchisee_name`
FROM
stores st
LEFT JOIN
(
SELECT
us.`store-id`, u.`name`, MAX(u.`created-at`) AS `date`
FROM
`users-stores` AS us
LEFT JOIN `users` AS u ON u.`id` = us.`user-id`
WHERE
`type` = 'area-business-owner'
GROUP BY us.`store-id`
) AS abo
ON abo.`store-id` = st.id
LEFT JOIN
(
SELECT
us.`store-id`,
u.`name`,
MAX(u.`created-at`) AS `date`
FROM
`users-stores` AS us
LEFT JOIN `users` AS u ON u.`id` = us.`user-id`
WHERE
`type` = 'store-manager'
GROUP BY us.`store-id`
) AS sm
ON sm.`store-id` = st.id
LEFT JOIN
(
SELECT
us.`store-id`,
u.`name`,
MAX(u.`created-at`) AS `date`
FROM
`users-stores` AS us
LEFT JOIN `users` AS u ON u.`id` = us.`user-id`
WHERE
`type` = 'line-manager'
GROUP BY us.`store-id`
) AS sa
ON sa.`store-id` = st.id
LEFT JOIN `store-groups` sg
on st.`store-group-id` =sg.id
LEFT JOIN `franchisees` fc
on st.`franchisee-id` = fc.`id`
"""
data = pd.read_sql_query(stores_q, ms_db.connection)
# data['opened_at'] = pd.to_datetime(data['opened_at'], errors='coerce')
data['run_date'] = pd.to_datetime(run_date)
# data['date_diff'] = (data['run_date'] - data['opened_at']).dt.days
# Month diff
# data['month_diff'] = month_diff(data['run_date'], data['opened_at'])
# Filling Null values in lat lon
data['latitude'] = data['latitude'].fillna(0)
data['longitude'] = data['longitude'].fillna(0)
# Nearest store calc
data['nearest_store'] = data['store_id'].apply(lambda x: nearest_store(x,
data,
lat_lon_col_name=['latitude', 'longitude'],
from_distance=5)[1:])
# data['nearest_store'] = data['nearest_store'].apply(lambda x: str(x.tolist()))
data['nearest_store'] = data['nearest_store'].apply(lambda x: x.tolist(), 1)
data['line_store'] = data['store_id']
data['line'] = "NA"
data['landmark'] = "NA"
truncate_q = """ DELETE FROM stores_master """
pg_db.engine.execute(truncate_q)
# Data type correction
data['nearest_store'] = data['nearest_store'].apply(lambda x: str(x).replace("[", "{").replace("]", "}"))
for d in tuple(data[['store_id', 'nearest_store']].values):
query = f"""
INSERT INTO stores_master (store_id, nearest_store) VALUES ('%s', '%s')
""" % tuple(d)
pg_db.engine.execute(query)
# old method
# data.to_sql(name='stores_master', con=pg_db.engine, if_exists='append', index=False, method='multi', chunksize=500)
pg_db.close_connection()
ms_db.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/stores-master/nearest_store.py | nearest_store.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.helper.email.email import Email
import argparse
import pandas as pd
import datetime
import numpy as np
import math
import simplejson, urllib.request
from datetime import datetime, timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
logger.info(f"env: {env}")
start_time = datetime.now()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# Start
logger.info('Script Manager Initialized')
# =============================================================================
# Fetching store in clusters
# =============================================================================
store_clusters_query = f'''
select
sf."store-id",
sc."cluster-id"
from
"prod2-generico".features f
join "prod2-generico"."store-features" sf on
f.id = sf."feature-id"
join "prod2-generico"."store-clusters" sc on
sc."store-id" = sf."store-id"
where
sf."feature-id" = 69
and sf."is-active" = 1
and sc."is-active" = 1
'''
store_clusters = rs_db.get_df(store_clusters_query)
if isinstance(store_clusters, type(None)):
store_clusters = pd.DataFrame(columns=['store-id', 'cluster-id'])
logger.info("")
logger.info("Fetched Store Clusters")
str_info_cross = pd.DataFrame()
for cluster in store_clusters['cluster-id'].unique():
temp = store_clusters[store_clusters['cluster-id'] == cluster]
cluster_stores = tuple(map(int, list(temp['store-id'].unique())))
strs = """
select
id as "store-id",
name as "store-name",
lat ,
lon
from
"prod2-generico".stores
where
id in {}
""".format(cluster_stores)
str_info = rs_db.get_df(strs)
str_info['key'] = 0
str_info_cross_cluster = str_info.merge(str_info, on='key', how='outer', suffixes=('-x', '-y'))
str_info_cross_cluster = str_info_cross_cluster[(str_info_cross_cluster['store-id-x'] !=
str_info_cross_cluster['store-id-y'])]
del str_info_cross_cluster['key']
str_info_cross = str_info_cross.append(str_info_cross_cluster, ignore_index=True)
if isinstance(str_info_cross, type(None)):
str_info_cross = pd.DataFrame(columns=['store-id-x', 'store-name-x', 'lat-x', 'lon-x', 'store-id-y',
'store-name-y', 'lat-y', 'lon-y'])
logger.info("")
logger.info("Created Store Mapping in Cluster")
# =============================================================================
# Checking If New Stores are added or not
# Distance calculation will only run if there is change in cluster stores
# =============================================================================
strs2 = """
select
sd."store-id-x" as "store-id-x-in-dss",
sd."store-id-y" as "store-id-y-in-dss"
from
"prod2-generico"."store-distance" sd
"""
str_info_in_DSS = rs_db.get_df(strs2)
if isinstance(str_info_in_DSS, type(None)):
str_info_in_DSS = pd.DataFrame(columns=['store-id-x-in-dss', 'store-id-y-in-dss'])
check_if_change_in_cluster_store = str_info_cross.merge(str_info_in_DSS, left_on=['store-id-x', 'store-id-y'],
right_on=['store-id-x-in-dss', 'store-id-y-in-dss'], how='left')
differece = len(check_if_change_in_cluster_store[check_if_change_in_cluster_store['store-id-x-in-dss'].isna()])
logger.info("")
logger.info("Changes in store clusters - {}".format(differece))
if differece == 0:
logger.info("")
logger.info("No Changes in Cluster stores So not running GMAPS API part to fetch distance")
status2 = True
table_status = 'Unchanged'
api_status = 'No hit'
else:
logger.info("")
logger.info("Changes in Cluster stores So running GMAPS API part to fetch distance")
table_status = 'Updated'
api_status = 'hit'
# =========================================================================
# Calculating Distance in air
# =========================================================================
def distance_cal(a, b, c, d):
R = 6373.0
lat1 = math.radians(a)
lon1 = math.radians(b)
lat2 = math.radians(c)
lon2 = math.radians(d)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
distance = distance * 1000 # To convert in meters
distance = round(distance, 0)
distance = int(distance)
return distance
str_info_cross['lat-x'] = str_info_cross['lat-x'].astype('float')
str_info_cross['lon-x'] = str_info_cross['lon-x'].astype('float')
str_info_cross['lat-y'] = str_info_cross['lat-y'].astype('float')
str_info_cross['lon-y'] = str_info_cross['lon-y'].astype('float')
str_info_cross['distance-in-air'] = np.vectorize(distance_cal)(str_info_cross['lat-x'], str_info_cross['lon-x'],
str_info_cross['lat-y'], str_info_cross['lon-y'])
logger.info('')
logger.info('Calculated Distance in Air')
# =========================================================================
# Calculating Distance on road
# =========================================================================
str_info_cross['lat-x'] = str_info_cross['lat-x'].astype('str')
str_info_cross['lon-x'] = str_info_cross['lon-x'].astype('str')
str_info_cross['lat-y'] = str_info_cross['lat-y'].astype('str')
str_info_cross['lon-y'] = str_info_cross['lon-y'].astype('str')
str_info_cross['lat-lon-x'] = str_info_cross[['lat-x', 'lon-x']].apply(lambda x: ','.join(x[x.notnull()]), axis=1)
str_info_cross['lat-lon-y'] = str_info_cross[['lat-y', 'lon-y']].apply(lambda x: ','.join(x[x.notnull()]), axis=1)
configobj = Config.get_instance()
secrets = configobj.get_secrets()
api_key = secrets['GMAPS_API_KEY']
gmaps_op = pd.DataFrame()
distance_matrix = pd.DataFrame()
for index, row in str_info_cross.iterrows():
# logger.info (index)
lat_lon_x = row['lat-lon-x']
lat_lon_y = row['lat-lon-y']
url = "https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode=driving&language=en-EN&sensor=false&key={2}".format(
str(lat_lon_x), str(lat_lon_y), api_key)
result = simplejson.load(urllib.request.urlopen(url))
distance = result["rows"][0]["elements"][0]["distance"]["value"]
gmaps_op['store-id-x'] = row[0:1]
gmaps_op['store-id-y'] = row['store-id-y']
gmaps_op['distance'] = distance
distance_matrix = distance_matrix.append([gmaps_op]).reset_index(drop=True)
str_info_cross = pd.merge(left=str_info_cross, right=distance_matrix,
how='left', on=['store-id-x', 'store-id-y'])
str_info_cross.rename(columns={'distance': 'distance-on-road'}, inplace=True)
del str_info_cross['lat-lon-x']
del str_info_cross['lat-lon-y']
logger.info('')
logger.info('Calculated Distance on road via GMAPS API')
# str_info_cross['distance-on-road']=10
str_info_cross['uploaded-at'] = datetime.datetime.now()
# =========================================================================
# Writing table in Redshift
# =========================================================================
schema = 'prod2-generico'
table_name = 'store-distance'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
logger.info('')
logger.info(f"Table:{table_name} table does not exist - table uploaded")
s3.write_df_to_db(df=str_info_cross[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
else:
logger.info('')
logger.info(f"Table:{table_name} table exist")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=str_info_cross[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
if status2 is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject='{} {} : store_distance table {} - GMAPS API - {}'.format(
env, status, table_status, api_status),
mail_body=f" pso-stock-transfer-mapping table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/stores-master/store_distance.py | store_distance.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import time
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
# pip install mutagen
import urllib
from mutagen.mp3 import MP3
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-l', '--limit', default=None, type=int, required=False)
parser.add_argument('-dw', '--db_write', default="yes", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
limit = args.limit
db_write = args.db_write
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
# Connections
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
# ALERT: read_only=False, if you want connection which writes
# this is mysql_write
ms_connection = MySQL(read_only=False)
ms_connection.open_connection()
s3 = S3()
# Global variable
# Run date
# run_date = datetime.today().strftime('%Y-%m-%d')
# Timezone aware
run_date = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d")
# run_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
####################################
# DND list update
####################################
def dnd_list_update():
#########################################
# Connections start
#########################################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
##########################################
# DND customers
##########################################
calling_q = """
SELECT
a.`patient-id`,
b.`comment-id`,
b.`comment`,
c.`call-status`
FROM
`calling-dashboard` a
INNER JOIN
`calling-history` b
on a.`id` = b.`calling-dashboard-id`
LEFT JOIN
`call-statuses` c
ON b.`comment-id` = c.`id`
WHERE b.`comment-id` in (79)
GROUP BY
a.`patient-id`, b.`comment-id`, b.`comment`, c.`call-status`
"""
calling_q = calling_q.replace('`', '"')
logger.info(calling_q)
data_c = rs_db.get_df(query=calling_q)
data_c.columns = [c.replace('-', '_') for c in data_c.columns]
logger.info("Length of DND list fetched is {}".format(len(data_c)))
logger.info("Unique".format(data_c.nunique()))
logger.info("Unique comment-id is {}".format(data_c['comment_id'].unique()))
logger.info("Unique comment is {}".format(data_c['comment'].unique()))
logger.info("Unique call-status is {}".format(data_c['call_status'].unique()))
# Final list
data = data_c[['patient_id']].drop_duplicates()
data['call_dnd'] = 1
data['reason'] = 'Calling dashboard - Do not disturb'
logger.info("DND list length is {}".format(len(data)))
logger.info("Export columns are {}".format(data.columns))
# Remove those that are already part
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
dnd_q = """
SELECT
"patient-id"
FROM
"dnd-list"
WHERE
"patient-id" is not null
GROUP BY
"patient-id"
"""
dnd_q = dnd_q.replace('`', '"')
logger.info(dnd_q)
data_dss = rs_db_write.get_df(query=dnd_q)
data_dss.columns = [c.replace('-', '_') for c in data_dss.columns]
# Already dnd
already_dnd = data_dss['patient_id'].dropna().drop_duplicates().to_list()
data_export = data[~data['patient_id'].isin(already_dnd)]
logger.info("Data export length - after removing already in list, is {}".format(len(data_export)))
# Dummy column values
data_export['phone'] = "-1"
data_export['sms_dnd'] = 0
# Timestamp
data_export['created_at'] = pd.to_datetime(datetime.now())
# Created-by
data_export['created_by'] = 'etl-automation'
##########################################
# DANGER ZONE
##########################################
logger.info("Insert started for length {}".format(len(data_export)))
write_schema = 'prod2-generico'
write_table_name = 'dnd-list'
table_info = helper.get_table_info(db=rs_db, table_name=write_table_name, schema=write_schema)
table_info_clean = table_info[~table_info['column_name'].isin(['id', 'updated-at'])]
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
s3.write_df_to_db(df=data_export[table_info_clean['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Insert done")
def calling_history_metadata():
######################################################
# Check existing
######################################################
# Check data already in DSS
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
call_ds_q = """
SELECT
"calling-history-id"
FROM
"calling-history-metadata"
"""
call_ds_q = call_ds_q.replace('`', '"')
logger.info(call_ds_q)
last_dss = rs_db_write.get_df(query=call_ds_q)
last_dss.columns = [c.replace('-', '_') for c in last_dss.columns]
already_present = tuple(last_dss['calling_history_id'].to_list())
logger.info("Interaction id's present in DSS are : "
"{}".format(len(already_present)))
########################################
# Check recording lengths to be inserted
########################################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
limit_str = f" limit {limit} ; " if limit else ""
calling_h_q = f"""
SELECT
id AS `calling-history-id`,
`call-recording-url`
FROM
`calling-history`
WHERE
`call-recording-url` != ''
{limit_str}
"""
calling_h_q = calling_h_q.replace('`', '"')
# logger.info(calling_h_q)
data = rs_db.get_df(query=calling_h_q)
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info("Interaction history present in mySQL : {}".format(len(data)))
# Removing already present in dss
data = data[~data['calling_history_id'].isin(already_present)]
logger.info("New interaction history present in mySQL : {}".format(len(data)))
# If testing
# data = data.sample(n=10)
########################
# Calculation
########################
def get_length(url):
try:
download_data = urllib.request.urlopen(url)
local_file_path = s3.download_file_from_s3(file_name='sample.mp3')
f = open(local_file_path, 'wb')
f.write(download_data.read())
f.close()
audio = MP3(local_file_path)
return audio.info.length
except:
return 0
data['call_duration'] = data.apply(lambda row: get_length(row['call_recording_url']),
axis=1)
data_insert_dss = data[['calling_history_id', 'call_duration']]
#########################################################
# INSERT
########################################################
# Insert using to_sql
data_insert_dss.columns = [c.replace('_', '-') for c in data_insert_dss.columns]
logger.info("DSS - insert length is {}".format(len(data_insert_dss)))
expected_data_length_insert = len(last_dss) + len(data_insert_dss)
logger.info("DSS - Resulted data length after insert should be is "
"{}".format(expected_data_length_insert))
# DSS insert
logger.info("DSS - Insert starting")
write_schema = 'prod2-generico'
write_table_name = 'calling-history-metadata'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id', 'updated-at'])]
data_export = data_insert_dss.copy()
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("DSS - Insert ended")
def calling_dashboard_info_update(run_date_param=None):
######################################################
# Check Tickets
######################################################
s = """
SELECT
a.*,
b.`campaign-name`
FROM
`calling-dashboard` a
LEFT JOIN `calling-dashboard-campaigns` b
on a.`campaign-id` = b.id
"""
tickets_data = pd.read_sql_query(s, ms_connection.connection)
tickets_data.columns = [c.replace('-', '_') for c in tickets_data.columns]
# Convert date values to date type
tickets_data['list_date'] = pd.to_datetime(tickets_data['list_date'], errors='coerce')
tickets_data['call_date'] = pd.to_datetime(tickets_data['call_date'], errors='coerce')
logger.info("Tickets present in existing sheet : {}".format(len(tickets_data)))
#########################################################
# Check which tickets have call date earlier than today, but still in open or reopen status
########################################################
# fetch run_date from parameters
# if not, take current date as run date
if run_date_param is not None:
run_date = run_date_param
else:
run_date = datetime.today().strftime('%Y-%m-%d')
logger.info("Running for {}".format(run_date))
# Check open tickets
open_tickets = tickets_data[tickets_data['status'].isin(['open', 'reopen'])]
logger.info("Total open tickets are {}".format(len(open_tickets)))
# Check backlog tickets
backlog_tickets = open_tickets[open_tickets['list_date'] < run_date]
logger.info("Total backlog open tickets are {}".format(len(backlog_tickets)))
# Update call date and backlog count for backlog tickets
backlog_update = backlog_tickets[['id', 'list_date', 'call_date', 'backlog_days_count']]
backlog_update['call_date'] = run_date
backlog_update['backlog_days_count'] = (
pd.to_datetime(backlog_update['call_date']) - backlog_update['list_date']).dt.days
backlog_update_mysql = backlog_update[['id', 'call_date', 'backlog_days_count']]
# Out of this, how many do we actually need to update?
s = """
SELECT
`id`,
`call-date`,
`backlog-days-count`
FROM
`calling-dashboard`
"""
last_data_mysql = pd.read_sql_query(s, ms_connection.connection)
last_data_mysql.columns = [c.replace('-', '_') for c in last_data_mysql.columns]
# Join and check exactly which to update
# Data match with mySQL
common_cols = ['id', 'call_date', 'backlog_days_count']
# To merge, keep both dtypes same
backlog_update_mysql['call_date'] = pd.to_datetime(backlog_update_mysql['call_date']).dt.strftime("%Y-%m-%d")
last_data_mysql['call_date'] = pd.to_datetime(last_data_mysql['call_date']).dt.strftime("%Y-%m-%d")
data_update_mysql = backlog_update_mysql[common_cols].merge(
last_data_mysql[common_cols], how='outer', on=common_cols, indicator=True)
# To update
data_update_mysql = data_update_mysql[data_update_mysql['_merge'] == 'left_only']
data_update_mysql = data_update_mysql[['id', 'call_date', 'backlog_days_count']]
data_update_mysql.columns = [c.replace('_', '-') for c in data_update_mysql.columns]
logger.info("Update to be done for backlog tickets count is {}".format(len(data_update_mysql)))
data_to_be_updated_list_mysql = list(data_update_mysql.apply(dict, axis=1))
#################################
# DANGER ZONE start
#################################
# mySQL write engine
update_counter = 0
for i in data_to_be_updated_list_mysql:
update_q = """
UPDATE
`calling-dashboard`
SET
`call-date` = '{1}',
`backlog-days-count` = {2}
WHERE
`id` = {0}
""".format(i['id'], i['call-date'], i['backlog-days-count'])
# logger.info("Running update for ticket {}".format(i['id']))
# logger.info("id:", i['id'], "call-date:", i['call-date'],
# "backlog-days-count:", i['backlog-days-count'])
if db_write == 'yes':
ms_connection.engine.execute(update_q)
# logger.info("Update for ticket {} is successful".format(i['id']))
# Print success periodically
update_counter = update_counter + 1
if update_counter % 1000 == 0:
logger.info("mySQL - Update done till row {}".format(update_counter))
#################################
# DANGER ZONE END
#################################
logger.info("mySQL - Update for calling-dashboard successful")
# Verify updates
s = """
SELECT
`id`,
`call-date`,
`backlog-days-count`
FROM
`calling-dashboard`
"""
update_mysql_verify = pd.read_sql_query(s, ms_connection.connection)
# To merge, keep both dtypes same
update_mysql_verify['call-date'] = pd.to_datetime(update_mysql_verify['call-date']).dt.strftime("%Y-%m-%d")
# Inner join with existing data
update_mysql_check = update_mysql_verify.merge(data_update_mysql, how='inner',
on=['id', 'call-date', 'backlog-days-count'])
logger.info("mySQL - Update done for data entries length is {}".format(len(update_mysql_check)))
if len(update_mysql_check) != len(data_update_mysql):
logger.info("Warning: Error, update didn't happen for all entries")
############################################
# Create follow-up tickets
#############################################
# Check which tickets have requested follow-up
s = """
SELECT
a.`calling-dashboard-id`,
a.`follow-up-required`,
a.`follow-up-at`,
b.`original-reference-id`,
b.`store-id`,
b.`campaign-id`,
b.`callback-reason`,
b.`patient-id`,
b.`follow-up-count`
FROM
`calling-history` a
LEFT JOIN `calling-dashboard` b on
a.`calling-dashboard-id` = b.`id`
"""
history_data = pd.read_sql_query(s, ms_connection.connection)
history_data.columns = [c.replace('-', '_') for c in history_data.columns]
# Convert follow up time to timestamp
# Right now format is dd-mm-yy so putting dayfirst = True filter
history_data['follow_up_at'] = pd.to_datetime(history_data['follow_up_at'], dayfirst=True, errors='coerce')
# Keep date as string only
history_data['follow_up_date'] = history_data['follow_up_at'].dt.strftime("%Y-%m-%d")
logger.info("Follow up date converted to string")
# Take only those who have requested follow-up
# Check if flag is integer os string
if is_numeric_dtype(history_data['follow_up_required']):
follow_up_data = history_data[history_data['follow_up_required'] == 1]
logger.info(
"Follow up required (integer flag) is present in interactions - "
"length is {}".format(len(follow_up_data)))
else:
follow_up_data = history_data[history_data['follow_up_required'] == '1']
logger.info(
"Follow up required (string flag) is present in interactions - "
"length is {}".format(len(follow_up_data)))
# Sort on follow up dates, max is first
follow_up_data = follow_up_data.sort_values(by=['calling_dashboard_id', 'follow_up_at'], ascending=[True, False])
# Choose only maximum follow-up time
follow_up_data['rank'] = follow_up_data.groupby(['calling_dashboard_id']).cumcount() + 1
follow_up_data_latest = follow_up_data[follow_up_data['rank'] == 1]
logger.info("Follow up data (max date per ticket) length is {}".format(len(follow_up_data_latest)))
# Keep only future follow-ups
follow_up_data_latest_valid = follow_up_data_latest[
follow_up_data_latest['follow_up_at'] >= pd.to_datetime(run_date)]
logger.info("Valid (future) follow up date length is {}".format(len(follow_up_data_latest_valid)))
# New ticket information
new_ticket_data = follow_up_data_latest_valid.copy()
# If reference id already exists, then copy it, else take ticket id
new_ticket_data['new_ticket_reference_id'] = np.where(new_ticket_data['original_reference_id'] > 0,
new_ticket_data['original_reference_id'],
new_ticket_data['calling_dashboard_id'])
# Drop other ticket id columns
new_ticket_data.drop(['calling_dashboard_id', 'original_reference_id'], axis=1, inplace=True)
# New original reference id
new_ticket_data = new_ticket_data.rename(columns={
'new_ticket_reference_id': 'original_reference_id'})
# Keep only one new follow-up on unique original_reference_id
new_ticket_data2 = new_ticket_data.copy()
new_ticket_data2['follow_up_date'] = pd.to_datetime(new_ticket_data2['follow_up_date'])
# Sort on follow up dates, max is first
new_ticket_data2 = new_ticket_data2.sort_values(by=['original_reference_id', 'follow_up_date'],
ascending=[True, False])
# Choose only maximum follow-up time
new_ticket_data2['rank'] = new_ticket_data2.groupby(['original_reference_id']).cumcount() + 1
# Only one for one ticket
new_ticket_data3 = new_ticket_data2[new_ticket_data2['rank'] == 1]
# Keep date as string only
new_ticket_data3['follow_up_date'] = new_ticket_data3['follow_up_date'].dt.strftime("%Y-%m-%d")
logger.info("Follow up date converted to string")
# Since new ticket is generated, so add 1 to follow-up count
new_ticket_data3['follow_up_count'] = new_ticket_data3['follow_up_count'] + 1
# Ticket list date taken as follow up date
new_ticket_data3['list_date'] = new_ticket_data3['follow_up_date']
# Call date same as list date for now
new_ticket_data3['call_date'] = new_ticket_data3['list_date']
# Update data-type to follow up
new_ticket_data3['data_type'] = 'follow up'
logger.info("One follow up for one root ticket - upload to be done - length is {}".format(len(new_ticket_data3)))
# INSERT DATA
# Final columns
upload_cols = ['store_id', 'original_reference_id', 'list_date', 'call_date', 'patient_id', 'campaign_id',
'data_type', 'callback_reason', 'follow_up_count']
data_upload_mysql = new_ticket_data3[upload_cols]
unique_check_cols = ['store_id', 'list_date', 'campaign_id', 'callback_reason', 'patient_id']
# Assert uniqueness, for DB update
unique_data = data_upload_mysql[unique_check_cols].drop_duplicates()
if len(data_upload_mysql) != len(unique_data):
logger.info("Warning, duplicate entries for date {}".format(run_date))
# Check last data first
# Check on store, list date, campaign id, subtype id, patient id
# Don't check on data-type yet
s = """
SELECT
`store-id`,
`list-date`,
`campaign-id`,
`callback-reason`,
`patient-id`
FROM
`calling-dashboard`
"""
last_data_mysql = pd.read_sql_query(s, ms_connection.connection)
last_data_mysql.columns = [c.replace('-', '_') for c in last_data_mysql.columns]
logger.info("Last data in mySQL length {}".format(len(last_data_mysql)))
# Join and check which to insert and which to update
# To merge, keep both dtypes same
last_data_mysql['list_date'] = pd.to_datetime(last_data_mysql['list_date']).dt.strftime("%Y-%m-%d")
# Data match with mySQL
data_export_mysql = data_upload_mysql.merge(
last_data_mysql, how='outer', on=unique_check_cols, indicator=True)
# To upload
data_upload_mysql2 = data_export_mysql[data_export_mysql['_merge'] == 'left_only']
data_insert_mysql = data_upload_mysql2[upload_cols]
# Priority can be default, can be updated later on
# Don't do any update in DSS for now
# Check last data
s = """
SELECT
`id`
FROM
`calling-dashboard`
"""
last_data_mysql = pd.read_sql_query(s, ms_connection.connection)
# Insert using to_sql
data_insert_mysql.columns = [c.replace('_', '-') for c in data_insert_mysql.columns]
logger.info("mySQL - insert to be done - length is {}".format(len(data_insert_mysql)))
expected_data_length_insert = len(last_data_mysql) + len(data_insert_mysql)
logger.info("mySQL - Resulted data length after insert should be is {}".format(expected_data_length_insert))
# Upload to mySQL DB
logger.info("mySQL - Insert starting")
if db_write == 'yes':
data_insert_mysql.to_sql(name='calling-dashboard', con=ms_connection.engine, if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
logger.info("Follow up data uploaded to mySQL for run date {} with length : "
"{}".format(run_date, len(data_insert_mysql)))
logger.info("Sleeping for 10 seconds")
time.sleep(10)
logger.info("Slept for 10 seconds")
# Verify the inserted data
s = """
SELECT
`id`
FROM
`calling-dashboard`
"""
insert_mysql_verify = pd.read_sql_query(s, ms_connection.connection)
logger.info("mySQL - After insert - calling dashboard length is : {}".format(len(insert_mysql_verify)))
if len(insert_mysql_verify) != expected_data_length_insert:
logger.info("Warning: Error, update didn't happen for all entries")
def calling_dashboard_feedback_loop(run_date_param=None, follow_up_limit_param=None,
transaction_goal_campaigns_list_param=[]):
######################################################
# Check Tickets
######################################################
s = """
SELECT
a.*,
b.`campaign-name`
FROM
`calling-dashboard` a
LEFT JOIN `calling-dashboard-campaigns` b
on a.`campaign-id` = b.id
"""
tickets_data = pd.read_sql_query(s, ms_connection.connection)
tickets_data.columns = [c.replace('-', '_') for c in tickets_data.columns]
# Convert date values to date type
tickets_data['list_date'] = pd.to_datetime(tickets_data['list_date'], errors='coerce')
tickets_data['call_date'] = pd.to_datetime(tickets_data['call_date'], errors='coerce')
logger.info("Tickets present in existing sheet : {}".format(len(tickets_data)))
#########################################################
# Check which tickets have list date earlier than today, and in closed state
########################################################
# fetch run_date from parameters
# if not, take current date as run date
if run_date_param is not None:
run_date = run_date_param
else:
run_date = datetime.today().strftime('%Y-%m-%d')
logger.info("Running for {}".format(run_date))
# Check closed tickets
close_tickets = tickets_data[tickets_data['status'].isin(['closed'])]
logger.info("Total closed tickets are {}".format(len(close_tickets)))
# Check only maximum timestamp of non-null comments
# First fetch calling-history
# Tickets prior to run date, and of run date, if exist
# Tickets till T-2 only, in order to avoid scheduling in advance
run_date_minus2 = (pd.to_datetime(run_date) - timedelta(days=2)).strftime("%Y-%m-%d")
close_tickets2 = close_tickets[close_tickets['list_date'] <= run_date_minus2]
logger.info("Total closed tickets prior to run date minus2 is {}".format(len(close_tickets2)))
tickets = tuple(close_tickets2['id'].to_list())
logger.info("Tickets to find in calling-history are : {}".format(len(tickets)))
############################################
# Create follow-up tickets - for Non-responders
#############################################
# Check which tickets actually require follow-up
s = """
SELECT
a.`calling-dashboard-id`,
a.`follow-up-required`,
a.`comment`,
a.`created-at`,
b.`original-reference-id`,
b.`store-id`,
b.`campaign-id`,
b.`callback-reason`,
b.`patient-id`,
b.`follow-up-count`
FROM
`calling-history` a
LEFT JOIN `calling-dashboard` b
on a.`calling-dashboard-id` = b.`id`
WHERE
b.`id` in {}
""".format(tickets)
history_data = pd.read_sql_query(s, ms_connection.connection)
history_data.columns = [c.replace('-', '_') for c in history_data.columns]
logger.info("History data - length is {0} and unique tickets length is "
"{1}".format(len(history_data), history_data['calling_dashboard_id'].nunique()))
# Change to datetime
history_data['created_at'] = pd.to_datetime(history_data['created_at'])
# ORIGINAL REFERENCE ID, if exists, then take it as reference
# If reference id already exists, then copy it, else take ticket id
history_data['org_calling_dashboard_id'] = np.where(history_data['original_reference_id'] > 0,
history_data['original_reference_id'],
history_data['calling_dashboard_id'])
logger.info("Original reference id, if exists, has been taken up as original calling dashboard id,"
"so now unique tickets are {0}".format(history_data['org_calling_dashboard_id'].nunique()))
# Remove the tickets which already have at least a one follow-up required flag,
# because they will already be counted
already_follow_up_data = history_data[history_data['follow_up_required'] == 1][
['org_calling_dashboard_id']].drop_duplicates()
logger.info("Already Follow up required tickets - length is {}".format(len(already_follow_up_data)))
already_follow_up_data_t = already_follow_up_data['org_calling_dashboard_id'].to_list()
history_data2 = history_data.query("org_calling_dashboard_id not in @already_follow_up_data_t")
logger.info(
"After removing redundant entries - History data - length is "
"{0} and unique tickets length is {1}".format(
len(history_data2),
history_data2['org_calling_dashboard_id'].nunique()))
non_response_cols = ['Ringing/Not answering',
'Call rejected/Busy',
'Not reachable/Switched off']
# First see how many of them are also tagged in call-statuses master
s = """
SELECT
id AS call_status_id,
`call-status`
FROM
`call-statuses`
"""
dropdown_master = pd.read_sql_query(s, ms_connection.connection)
dropdown_master.columns = [c.replace('-', '_') for c in dropdown_master.columns]
logger.info("Dropdown master currently has dropdowns - length {}".format(len(dropdown_master)))
dropdown_match = dropdown_master.query("call_status in @non_response_cols")
logger.info("Dropdown master match with non-response cols defined - "
"length {}".format(len(dropdown_match)))
# Latest non-null comment should be one-amongst the non-response columns
follow_up_data_dropna = history_data2[~history_data2['comment'].isnull()]
logger.info("Follow up data - non-null comment - length is "
"{}".format(len(follow_up_data_dropna)))
follow_up_data = follow_up_data_dropna[follow_up_data_dropna['comment'] != '']
logger.info("Follow up data - non-empty string comment - length is "
"{}".format(len(follow_up_data)))
# Sort on interaction timestamp, max is first
follow_up_data = follow_up_data.sort_values(by=['org_calling_dashboard_id', 'created_at'],
ascending=[True, False])
# Choose only maximum follow-up time
follow_up_data['rank'] = follow_up_data.groupby(['org_calling_dashboard_id']).cumcount() + 1
follow_up_data_latest = follow_up_data[follow_up_data['rank'] == 1]
logger.info("Follow up data (latest interaction per ticket) length is "
"{}".format(len(follow_up_data_latest)))
# Latest interaction was non-response
follow_up_data_latest_nr = follow_up_data_latest.query("comment in @non_response_cols")
logger.info("Follow up data (latest interaction per ticket) Non-response length is {}".format(
len(follow_up_data_latest_nr)))
follow_up_data_latest_nr['interaction_date'] = follow_up_data_latest_nr['created_at'].dt.strftime("%Y-%m-%d")
follow_up_data_latest_nr['day_diff_rundate'] = (pd.to_datetime(run_date) -
pd.to_datetime(
follow_up_data_latest_nr['interaction_date'])).dt.days
# Add 2 days to latest interaction date, parametrize it later
follow_up_data_latest_nr['latest_date_plus2'] = (pd.to_datetime(follow_up_data_latest_nr['interaction_date']) +
timedelta(days=2)).dt.strftime("%Y-%m-%d")
# If follow-up is after >=2 days, keep rundate
# If follow-up is for yesterdays' tickets, then add 2days
follow_up_data_latest_nr['follow_up_date'] = np.where(follow_up_data_latest_nr['day_diff_rundate'] >= 2,
run_date,
follow_up_data_latest_nr['latest_date_plus2'])
# Remove those who are already-followed-up 3times, parametrized
if follow_up_limit_param is not None:
follow_up_limit = follow_up_limit_param
else:
follow_up_limit = 1
logger.info("Follow up limit is {}".format(follow_up_limit))
follow_up_data_latest_nr_focus1 = follow_up_data_latest_nr[
follow_up_data_latest_nr['follow_up_count'] < follow_up_limit]
logger.info(
"Follow up data after removing those already follow up equal to "
"limit or more times - length is {}".format(
len(follow_up_data_latest_nr_focus1)))
# Remove those with >7 days, vintage, parametrize it later
follow_up_data_latest_nr_focus2 = follow_up_data_latest_nr_focus1[
follow_up_data_latest_nr_focus1['day_diff_rundate'] <= 7]
logger.info("Follow up data after removing those with 7+days passed "
"since last interaction - length is {}".format(
len(follow_up_data_latest_nr_focus2)))
# Transaction goal - campaigns - patamatrized
if len(transaction_goal_campaigns_list_param) > 0:
transaction_goal_campaigns_list = transaction_goal_campaigns_list_param
else:
transaction_goal_campaigns_list = tickets_data['campaign_id'].drop_duplicates().to_list()
logger.info("Transaction goal campaigns are {}".format(transaction_goal_campaigns_list))
# For, transaction goal campaigns, if transaction done in last 15 days, then no call
# parametrize it later
bill_days_cutoff = 15
logger.info("Last bill date cutoff for transaction goal campaigns is "
"{}".format(bill_days_cutoff))
# Now join with patients-metadata
s = """
SELECT
`patient-id`,
max(date(`created-at`)) as `last-bill-date`
FROM
`bills-1`
GROUP BY
`patient-id`
"""
patients_lbd = pd.read_sql_query(s, ms_connection.connection)
patients_lbd.columns = [c.replace('-', '_') for c in patients_lbd.columns]
# Merge
follow_up_data_latest_nr_focus2 = follow_up_data_latest_nr_focus2.merge(patients_lbd, how='left', on=['patient_id'])
# Check recency
follow_up_data_latest_nr_focus2['day_diff_lbd_rundate'] = (pd.to_datetime(run_date) -
pd.to_datetime(
follow_up_data_latest_nr_focus2['last_bill_date'],
errors='coerce')).dt.days
# If campaign is in transaction goal campaigns then filter, else as it is
follow_up_data_latest_nr_focus2['exclude_goal_completed'] = np.where(
((follow_up_data_latest_nr_focus2['campaign_id'].isin(transaction_goal_campaigns_list)) &
(follow_up_data_latest_nr_focus2['day_diff_lbd_rundate'] <= bill_days_cutoff)), 1, 0)
to_be_removed = follow_up_data_latest_nr_focus2[follow_up_data_latest_nr_focus2['exclude_goal_completed'] == 1]
logger.info("To be removed due to transaction goal completed - in relevant campaigns - "
"length {}".format(len(to_be_removed)))
follow_up_data_final = follow_up_data_latest_nr_focus2[
follow_up_data_latest_nr_focus2['exclude_goal_completed'] == 0]
logger.info("Final follow up data after removing transaction goal completed tickets - "
"length {}".format(len(follow_up_data_final)))
# New ticket information
new_ticket_data = follow_up_data_final.copy()
# If reference id already exists, then copy it, else take ticket id
# org_calling_dashboard_id we have, which is combined
# Drop other ticket id columns
new_ticket_data.drop(['calling_dashboard_id', 'original_reference_id'], axis=1, inplace=True)
# New original reference id
new_ticket_data = new_ticket_data.rename(columns={
'org_calling_dashboard_id': 'original_reference_id'})
# Keep only one new follow-up on unique original_reference_id
new_ticket_data2 = new_ticket_data.copy()
new_ticket_data2['follow_up_date'] = pd.to_datetime(new_ticket_data2['follow_up_date'])
# Sort on follow up dates, max is first
new_ticket_data2 = new_ticket_data2.sort_values(by=['original_reference_id', 'follow_up_date'],
ascending=[True, False])
# Choose only maximum follow-up time
new_ticket_data2['rank'] = new_ticket_data2.groupby(['original_reference_id']).cumcount() + 1
# Only one for one ticket
new_ticket_data3 = new_ticket_data2[new_ticket_data2['rank'] == 1]
logger.info("Max for one original ticket - length {}".format(len(new_ticket_data3)))
# Remove those original reference id's which already have another ticket open
# Check open tickets
open_tickets = tickets_data[tickets_data['status'].isin(['open', 'reopen'])]
open_tickets_ref_id = open_tickets['original_reference_id'].drop_duplicates().to_list()
new_ticket_data3 = new_ticket_data3.query("original_reference_id not in @open_tickets_ref_id")
logger.info("After removing those with already open tickets in root, - "
"length {}".format(len(new_ticket_data3)))
# Keep date as string only
new_ticket_data3['follow_up_date'] = new_ticket_data3['follow_up_date'].dt.strftime("%Y-%m-%d")
logger.info("Follow up date converted to string")
# Since new ticket is generated, so add 1 to follow-up count
new_ticket_data3['follow_up_count'] = new_ticket_data3['follow_up_count'] + 1
# Ticket list date taken as follow up date
new_ticket_data3['list_date'] = new_ticket_data3['follow_up_date']
# If ticket date in negative then keep run-date
new_ticket_data3['list_date'] = np.where(pd.to_datetime(new_ticket_data3['list_date']) < run_date, run_date,
new_ticket_data3['list_date'])
# Call date same as list date for now
new_ticket_data3['call_date'] = new_ticket_data3['list_date']
# Update data-type to follow up
new_ticket_data3['data_type'] = 'follow up'
logger.info("One follow up for one root ticket - upload to be done - "
"length is {}".format(len(new_ticket_data3)))
#################################################
# Sanity check, if original reference id already has 2 follow ups in list
#################################################
reference_tickets = tuple(new_ticket_data3['original_reference_id'].dropna().drop_duplicates().to_list())
logger.info("Reference ticket length is {}".format(len(reference_tickets)))
s = """
SELECT
`original-reference-id`,
count(`id`) as already_ticket_count
FROM
`calling-dashboard`
WHERE
`original-reference-id` in {}
GROUP BY
`original-reference-id`
""".format(reference_tickets)
followup_already = pd.read_sql_query(s, ms_connection.connection)
followup_already.columns = [c.replace('-', '_') for c in followup_already.columns]
# Already follow ups done, as per limit
followup_already_limit = followup_already[followup_already['already_ticket_count'] >= follow_up_limit].copy()
logger.info('Already follow up done as per limit, or more times length is {}'.format(len(followup_already_limit)))
# Remove these from the list
followup_already_two_list = followup_already_limit['original_reference_id'].to_list()
new_ticket_data4 = new_ticket_data3.query("original_reference_id not in @followup_already_two_list")
logger.info('After removing those with already follow up done 2 or more times length is '
'{}'.format(len(new_ticket_data4)))
# INSERT DATA
# Final columns
upload_cols = ['store_id', 'original_reference_id',
'list_date', 'call_date',
'patient_id', 'campaign_id',
'data_type', 'callback_reason',
'follow_up_count']
data_upload_mysql = new_ticket_data4[upload_cols]
unique_check_cols = ['store_id', 'list_date', 'campaign_id',
'callback_reason', 'patient_id']
# Assert uniqueness, for DB update
unique_data = data_upload_mysql[unique_check_cols].drop_duplicates()
logger.info("Unique data should be - length is {}".format(len(unique_data)))
if len(data_upload_mysql) != len(unique_data):
logger.info("Warning, duplicate entries for date {}".format(run_date))
data_upload_mysql = data_upload_mysql.drop_duplicates(subset=unique_check_cols)
logger.info("Unique data after dropping duplicates - length is "
"{}".format(len(data_upload_mysql)))
# Check last data first
# Check on store, list date, campaign id, subtype id, patient id
# Don't check on data-type yet
s = """
SELECT
`store-id`,
`list-date`,
`campaign-id`,
`callback-reason`,
`patient-id`
FROM
`calling-dashboard`
GROUP BY
`store-id`,
`list-date`,
`campaign-id`,
`callback-reason`,
`patient-id`
"""
last_data_mysql = pd.read_sql_query(s, ms_connection.connection)
last_data_mysql.columns = [c.replace('-', '_') for c in last_data_mysql.columns]
logger.info("Last data in mySQL length {}".format(len(last_data_mysql)))
# Join and check which to insert and which to update
# To merge, keep both dtypes same
last_data_mysql['list_date'] = pd.to_datetime(last_data_mysql['list_date']
).dt.strftime("%Y-%m-%d")
# Data match with mySQL
data_export_mysql = data_upload_mysql.merge(
last_data_mysql, how='outer', on=unique_check_cols, indicator=True)
# To upload
data_upload_mysql2 = data_export_mysql[data_export_mysql['_merge'] == 'left_only'].copy()
logger.info("After removing same day duplicate tickets - length is {}".format(len(data_upload_mysql2)))
data_insert_mysql = data_upload_mysql2[upload_cols].copy()
# Priority can be default, can be updated later on
# Don't do any update in DSS for now
# Check last data
s = """
SELECT
`id`
FROM
`calling-dashboard`
"""
last_data_mysql = pd.read_sql_query(s, ms_connection.connection)
# Insert using to_sql
data_insert_mysql.columns = [c.replace('_', '-') for c in data_insert_mysql.columns]
logger.info("mySQL - insert to be done - length is {}".format(len(data_insert_mysql)))
expected_data_length_insert = len(last_data_mysql) + len(data_insert_mysql)
logger.info("mySQL - Resulted data length after insert should be is "
"{}".format(expected_data_length_insert))
# Upload to mySQL DB
logger.info("mySQL - Insert starting")
if db_write == 'yes':
data_insert_mysql.to_sql(name='calling-dashboard', con=ms_connection.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
logger.info(
"Follow up data uploaded to mySQL for run date {} with length : "
"{}".format(run_date, len(data_insert_mysql)))
logger.info("Sleeping for 10 seconds")
time.sleep(10)
logger.info("Slept for 10 seconds")
# Verify the inserted data
s = """
SELECT
`id`
FROM
`calling-dashboard`
"""
insert_mysql_verify = pd.read_sql_query(s, ms_connection.connection)
logger.info("mySQL - After insert - calling dashboard length is : "
"{}".format(len(insert_mysql_verify)))
if len(insert_mysql_verify) != expected_data_length_insert:
logger.info("Warning: Error, update didn't happen for all entries")
def calling_dashboard_profile_status():
######################################################
# Check existing tickets
######################################################
s = """
SELECT
id AS calling_dashboard_id,
`patient-id`,
`campaign-id` AS mysql_campaign_id,
`callback-reason`
FROM
`calling-dashboard`
"""
tickets_info = pd.read_sql_query(s, ms_connection.connection)
tickets_info.columns = [c.replace('-', '_') for c in tickets_info.columns]
logger.info("Tickets present in existing sheet : {}".format(len(tickets_info)))
# Check campaign in mySQL
s = """
SELECT
id AS mysql_campaign_id,
`is-active`,
`patient-billing-status-display`
FROM
`calling-dashboard-campaigns`
"""
mysql_campaigns = pd.read_sql_query(s, ms_connection.connection)
mysql_campaigns.columns = [c.replace('-', '_') for c in mysql_campaigns.columns]
logger.info("Campaigns present in mySQL are : {}".format(len(mysql_campaigns)))
campaign_metadata = mysql_campaigns.copy()
# To-do. Hard-coded. Evaluate later
campaign_metadata['patient_bill_status_cutoff_d'] = 30
# Now join with tickets data
# Only keep relevant ones, by doing inner join with status enabled campaigns
tickets_info2 = tickets_info.merge(campaign_metadata[['mysql_campaign_id',
'patient_billing_status_display',
'patient_bill_status_cutoff_d']],
how='inner',
on=['mysql_campaign_id'])
# bill status missing value input if any
bill_days_cutoff_default = 30
tickets_info2['patient_bill_status_cutoff_d'] = tickets_info2['patient_bill_status_cutoff_d'].fillna(
bill_days_cutoff_default)
# Now join with patients-metadata
# To-do change with RS ETL table patients-metadata-2
s = """
SELECT
`patient-id`,
max(date(`created-at`)) as `last-bill-date`
FROM
`bills-1`
GROUP BY
`patient-id`
"""
patients_lbd = pd.read_sql_query(s, ms_connection.connection)
patients_lbd.columns = [c.replace('-', '_') for c in patients_lbd.columns]
# Merge
tickets_info2 = tickets_info2.merge(patients_lbd, how='left', on=['patient_id'])
# Check recency
tickets_info2['day_diff'] = (pd.to_datetime(run_date)
- pd.to_datetime(tickets_info2['last_bill_date'], errors='coerce')).dt.days
# Check if days fall within the range
# Only bill status enabled campaigns
logger.info("DSS campaign metadata for status enabled - fetched")
tickets_info2['profile_status'] = np.where(tickets_info2['patient_billing_status_display'] == 1,
np.where(tickets_info2['day_diff'] <= tickets_info2[
'patient_bill_status_cutoff_d'],
'Active', 'Inactive'), 'NA')
#########################################################
# Profile status to update
########################################################
upload_profile_data = tickets_info2[['calling_dashboard_id', 'profile_status']]
logger.info("Upload profile data for these many tickets : "
"{}".format(len(upload_profile_data)))
# Check last data
s = """
SELECT
`calling-dashboard-id`,
`profile-status`
FROM `patient-profile-status`
"""
last_data_mysql = pd.read_sql_query(s, ms_connection.connection)
last_data_mysql.columns = [c.replace('-', '_') for c in last_data_mysql.columns]
# Join and check which to insert and which to update
# Data match with mySQL
data_export_mysql = upload_profile_data.merge(
last_data_mysql, how='outer', on=['calling_dashboard_id', 'profile_status'], indicator=True)
# To upload
data_upload_mysql = data_export_mysql[data_export_mysql['_merge'] == 'left_only']
data_upload_mysql = data_upload_mysql[['calling_dashboard_id', 'profile_status']]
#########################################################
# INSERT OR UPDATE
########################################################
# Out of this, how many need insert and how many update?
mysql_tickets = last_data_mysql['calling_dashboard_id'].to_list()
# Insert
data_insert_mysql = data_upload_mysql.query("calling_dashboard_id not in @mysql_tickets")
# How many do we need to update
data_update_mysql = data_upload_mysql.query("calling_dashboard_id in @mysql_tickets")
# Insert using to_sql
data_insert_mysql.columns = [c.replace('_', '-') for c in data_insert_mysql.columns]
logger.info("mySQL - insert length is {}".format(len(data_insert_mysql)))
expected_data_length_insert = len(last_data_mysql) + len(data_insert_mysql)
logger.info("mySQL - Resulted data length after insert should be is "
"{}".format(expected_data_length_insert))
# MySQL insert
logger.info("mySQL - Insert starting")
if db_write == 'yes':
data_insert_mysql.to_sql(name='patient-profile-status', con=ms_connection.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
logger.info("Sleeping for 10 seconds")
time.sleep(10)
logger.info("Slept for 10 seconds")
# Verify the inserted data
s = """
SELECT
`calling-dashboard-id`,
`profile-status`
FROM `patient-profile-status`
"""
insert_mysql_verify = pd.read_sql_query(s, ms_connection.connection)
logger.info("mySQL - After insert - patients profile status length is : "
"{}".format(len(insert_mysql_verify)))
if len(insert_mysql_verify) != expected_data_length_insert:
logger.info("Warning: Error, update didn't happen for all entries")
# Update existing entries
# Have to do one by one
data_update_mysql.columns = [c.replace('_', '-') for c in data_update_mysql.columns]
logger.info("mySQL - Update to be done for data entries length is "
"{}".format(len(data_update_mysql)))
# Try SQL engine
data_to_be_updated_list_mysql = list(data_update_mysql.apply(dict, axis=1))
logger.info("mySQL - Update to be done for data entries - converted into list - "
"length is {}".format(
len(data_to_be_updated_list_mysql)))
#################################
# DANGER ZONE start
#################################
# mySQL write engine
logger.info("mySQL - update for patients profile status started")
update_counter = 0
for i in data_to_be_updated_list_mysql:
update_q = """
UPDATE
`patient-profile-status`
SET
`profile-status` = '{1}'
WHERE
`calling-dashboard-id` = {0}
""".format(i['calling-dashboard-id'], i['profile-status'])
if db_write == 'yes':
ms_connection.engine.execute(update_q)
# Print success periodically
update_counter = update_counter + 1
if update_counter % 1000 == 0:
logger.info("mySQL - Update done till row {}".format(update_counter))
#################################
# DANGER ZONE END
#################################
logger.info("mySQL - update for patients profile status successful")
# Verify
s = """
SELECT
`calling-dashboard-id`,
`profile-status`
FROM
`patient-profile-status`
"""
update_mysql_verify = pd.read_sql_query(s, ms_connection.connection)
# Inner join with existing data
update_mysql_check = update_mysql_verify.merge(data_update_mysql,
how='inner',
on=["calling-dashboard-id", "profile-status"])
logger.info("mySQL - Update done for data entries length is {}".format(len(update_mysql_check)))
if len(update_mysql_check) != len(data_update_mysql):
logger.info("Warning: Error, update didn't happen for all entries")
####################################################
# Main block
###################################################
# Run DND List update()
logger.info("Running DND list update")
dnd_list_update()
# Run Calling history metadata
logger.info("Running Calling history metadata update")
try:
calling_history_metadata()
except:
logger.info("Error in calling_history_metadata")
# Run calling dashboard info update
logger.info("Running Calling dashboard info update")
calling_dashboard_info_update()
# Run calling dashboard feedback loop
logger.info("Running Calling feedback loop update")
calling_dashboard_feedback_loop()
# Run calling dashboard profile status
logger.info("Running Calling profile status update")
calling_dashboard_profile_status()
#################################################
# Closing the DB Connections
rs_db.close_connection()
rs_db_write.close_connection()
ms_connection.close()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/calling-dashboard-daily-update/calling-dashboard-daily-update.py | calling-dashboard-daily-update.py |
import argparse
import sys
import os
import time
from zeno_etl_libs.helper import helper
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.redshift import Redshift
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-ss', '--source_schema_name', default="prod2-generico", type=str, required=False)
parser.add_argument('-ts', '--target_schema_name', default="public", type=str, required=False)
parser.add_argument('-lot', '--list_of_tables', default="", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-st', '--snapshot_type', default="automated", type=str, required=False)
parser.add_argument('-ud', '--utc_date', default="NA", type=str, required=False)
parser.add_argument('-re', '--reason_code', default="mis", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
source_schema_name = args.source_schema_name
target_schema_name = args.target_schema_name
list_of_tables = args.list_of_tables
email_to = args.email_to
snapshot_type = args.snapshot_type
utc_date = args.utc_date
reason_code = args.reason_code
utc_dates = utc_date.split(",")
""" get the last snapshot, if not given """
rs = Redshift()
list_of_tables = list_of_tables.split(",")
logger.info(f"source_schema_name: {source_schema_name}")
logger.info(f"target_schema_name: {target_schema_name}")
logger.info(f"list_of_tables: {list_of_tables}")
logger.info(f"email_to: {email_to}")
logger.info(f"reason_code: {reason_code}")
for utc_date in utc_dates:
if utc_date == "NA":
utc_date = None
snapshot_identifier = rs.get_snapshot_identifier(snapshot_type=snapshot_type, utc_date=utc_date)
snapshot_date = snapshot_identifier[-19:-9]
logger.info(f"snapshot_date: {snapshot_date}")
logger.info(f"snapshot_identifier: {snapshot_identifier}")
if not list_of_tables:
raise Exception("Please provide list of tables")
source_database_name = rs.database_name
cluster_identifier = rs.cluster_identifier
""" since we have single database in redshift so keeping source and target db same """
target_database_name = source_database_name
client = rs.client
email = Email()
rs_db = DB(read_only=False)
rs_db.open_connection()
success_tables_list = []
failed_tables_list = []
for i in list_of_tables:
logger.info(f"started table: {i}")
new_table_name = i + '-' + str(reason_code) + '-' + str(snapshot_date)
table_info = helper.get_table_info(db=rs_db, table_name=new_table_name, schema=target_schema_name)
if isinstance(table_info, type(None)):
logger.info(f"Table: {new_table_name} is absent.")
else:
success_tables_list.append(i)
logger.info(f"Table already exists: {new_table_name}, moving to next table")
table_restore_status = "Already Present"
email.send_email_file(
subject=f"[Table Restoration], Table: {i}, Status: {table_restore_status}",
mail_body=f"Status: {table_restore_status} \nTable: {i} \nSnapshot Date: {snapshot_date} "
f"\nCluster Identifier: {cluster_identifier} \nMessage: None",
to_emails=email_to, file_uris=[], file_paths=[])
continue
response = client.restore_table_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier,
SnapshotIdentifier=snapshot_identifier,
SourceDatabaseName=source_database_name,
SourceSchemaName=source_schema_name,
SourceTableName=i,
TargetDatabaseName=target_database_name,
TargetSchemaName=target_schema_name,
NewTableName=new_table_name
)
logger.info(f"response: {response}")
table_restore_status = response['TableRestoreStatus']['Status']
table_restore_request_id = response['TableRestoreStatus']['TableRestoreRequestId']
message = ""
while table_restore_status not in ('SUCCEEDED', 'FAILED', 'CANCELED'):
time.sleep(60)
response = client.describe_table_restore_status(
ClusterIdentifier=cluster_identifier,
TableRestoreRequestId=table_restore_request_id,
MaxRecords=20
)
for r in response['TableRestoreStatusDetails']:
table_restore_status = r['Status']
logger.info(f"Status: {r['Status']}")
message = r.get("Message")
if table_restore_status == 'SUCCEEDED':
success_tables_list.append(i)
else:
failed_tables_list.append(i)
email.send_email_file(
subject=f"[Table Restoration], Table: {i}, Status: {table_restore_status}",
mail_body=f"Status: {table_restore_status} \nTable: {i} \nSnapshot Date: {snapshot_date} "
f"\nCluster Identifier: {cluster_identifier} \nMessage:{message}",
to_emails=email_to, file_uris=[], file_paths=[])
email.send_email_file(
subject=f"[Table Restoration], Full Status: Success-{len(success_tables_list)}, Failed-{len(failed_tables_list)}",
mail_body=f"Table successfully restored: {success_tables_list} \nTable restore failed: {failed_tables_list} "
f"\nSnapshot Date: {snapshot_date} \nCluster Identifier: {cluster_identifier}",
to_emails=email_to, file_uris=[], file_paths=[]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/restore-from-snapshot/restore_from_snapshot.py | restore_from_snapshot.py |
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import MySQL
from zeno_etl_libs.logger import get_logger
import argparse
import pandas as pd
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
mysql = MySQL()
mysql.open_connection()
# ALERT: read_only=False, if you want connection which writes
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# select demo
query = "SELECT * from `prod2-generico`.`test` limit 1"
df = pd.read_sql(sql=query, con=mysql_write.connection)
logger.info(f"df: {df}")
# Insert demo
df[['col1']].to_sql(name='test', con=mysql_write.engine, if_exists='append', index=False,
method='multi', chunksize=500, schema='prod2-generico')
# single line update demo
update_q = """
UPDATE
`prod2-generico`.`test`
SET
`col2` = '{1}'
WHERE
`col1` = '{0}'
""".format('xyz', '123')
logger.info(update_q)
mysql_write.engine.execute(update_q)
# delete demo
delete_q = """
delete from
`prod2-generico`.`test`
WHERE
`col1` = '{0}'
""".format('xyz')
logger.info(delete_q)
mysql_write.engine.execute(delete_q)
# Bulk update /update many at the same tome
values_list = [{"col1": "abc", "col2": "xyz"}]
values_tuple = []
for i in values_list:
values_tuple.append((i['col2'], i['col1']))
logger.info(values_tuple)
""" Query to bulk update """
query = """
UPDATE
`test`
SET
`col2` = %s
WHERE
`col1` = %s
"""
try:
a = mysql_write.cursor.executemany(query, values_tuple)
logger.info(a)
except mysql_write.cursor.Error as e:
try:
logger.info("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
logger.info("MySQL Error: %s" % str(e))
mysql_write.close()
mysql.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/experiments/mysql-sql-alchemny-demo.py | mysql-sql-alchemny-demo.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
def main(db):
try:
table_name = "promo"
db.execute(query="begin ;")
# db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
update_query = f"""update "prod2-generico"."{table_name}" a
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"promo-code" = b."promo-code" ,
"promo-code-type" = b."code-type",
"promo-eligibility" = b."type",
"promo-discount-type" = b."discount-type",
"promo-min-purchase" = b."min-purchase",
"campaign-id" = b."campaign-id",
"campaign-name" = b."campaign"
from (
select pc.id, pc."promo-code", pc."code-type", pc."type", pc."discount-type", pc."min-purchase", pc."campaign-id", c."campaign"
from "prod2-generico"."{table_name}" a
inner join "prod2-generico"."promo-codes" pc
on a.id = pc.id
left join "prod2-generico".campaigns c
on pc."campaign-id" = c.id
where
pc."updated-at" > a."updated-at"
or
c."updated-at" > a."updated-at") b
where a.id = b.id;
"""
db.execute(query=update_query)
insert_query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"promo-code",
"promo-code-type",
"promo-eligibility",
"promo-discount-type",
"promo-min-purchase",
"campaign-id",
"campaign-name"
)
select
pc.id ,
pc."created-by",
pc."created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta', GETDATE()) as "updated-at" ,
pc."promo-code" as "promo-code" ,
pc."code-type" as "promo-code-type",
pc."type" as "promo-eligibility",
pc."discount-type" as "promo-discount-type",
pc."min-purchase" as "promo-min-purchase",
pc."campaign-id" as "campaign-id",
c."campaign" as "campaign-name"
from
"prod2-generico"."promo-codes" pc
left join "prod2-generico"."{table_name}" pr on
pc.id = pr.id
left join "prod2-generico".campaigns c on
pc."campaign-id" = c.id
where
pr.id IS NULL
"""
db.execute(query=insert_query)
""" committing the transaction """
db.execute(query=" end; ")
except Exception as error:
raise error
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
print(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/experiments/insert_update_exp.py | insert_update_exp.py |
import argparse
import os
import sys
from zeno_etl_libs.db.db import DB
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-ul', '--user_list', default="", type=str, required=False,
help="This is env(dev, stage, prod)")
args, unknown = parser.parse_known_args()
env = args.env
user_list = args.user_list or ""
os.environ['env'] = env
user_list = user_list.split(",")
rs_db = DB()
rs_db.open_connection()
logger = get_logger()
def get_pids(user=""):
query = f"""
select
s.process as process_id,
c.remotehost || ':' || c.remoteport as remote_address,
s.user_name as username,
s.starttime as session_start_time,
s.db_name,
i.starttime as current_query_time,
i.text as query
from
stv_sessions s
left join pg_user u on
u.usename = s.user_name
left join stl_connection_log c
on
c.pid = s.process
and c.event = 'authenticated'
left join stv_inflight i
on
u.usesysid = i.userid
and s.process = i.pid
where
username = '{user}'
order by
session_start_time desc;
"""
df = rs_db.get_df(query=query)
return df
def kill_connection(pid):
query = f"""
select pg_terminate_backend({pid});
"""
rs_db.execute(query=query)
for user in user_list:
pids_df = get_pids(user=user)
""" extra filter to be 100% sure """
pids_df['username'] = pids_df['username'].apply(lambda x: x.strip())
pids_df1 = pids_df[pids_df['username'].isin(["ro_django_accounts"])]
for pid in pids_df1['process_id']:
kill_connection(pid=pid)
logger.info(f"Killed, pid: {pid}")
rs_db.close_connection()
logger.info(f"info message") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/experiments/kill_rs_connection.py | kill_rs_connection.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.queries.sales import sales_config
import pandas as pd
def main(db, table_suffix):
table_name = f"sales"
bill_table_name = "bill-flags"
stores_master_table_name = "stores-master"
if table_suffix:
table_name = f"sales_{table_suffix}"
bill_table_name = f"bill-flags-{table_suffix}"
stores_master_table_name = f"stores-master-{table_suffix}"
db.execute(query="begin ;")
db.execute(sales_config.max_bill_id.format(table_name), params=None)
sales_intermediate: pd.DataFrame = rs_db.cursor.fetch_dataframe()
max_bill_id = sales_intermediate.values[0][0]
db.execute(sales_config.max_return_id.format(table_name), params=None)
returns_intermediate: pd.DataFrame = rs_db.cursor.fetch_dataframe()
max_return_id = returns_intermediate.values[0][0]
db.execute(sales_config.insert_sales_record.format(
table_name, bill_table_name, stores_master_table_name, max_bill_id,
bill_table_name, stores_master_table_name, max_return_id))
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-ts', '--table_suffix', default="", type=str, required=False,
help="Table suffix for testing.")
args, unknown = parser.parse_known_args()
env = args.env
table_suffix = args.table_suffix
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db, table_suffix=table_suffix)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/experiments/insert_optimized_sales.py | insert_optimized_sales.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db, table_suffix):
table_name = f"sales"
bill_table_name = "bill-flags"
stores_master_table_name = "stores-master"
if table_suffix:
table_name = f"sales_{table_suffix}"
bill_table_name = f"bill-flags-{table_suffix}"
stores_master_table_name = f"stores-master-{table_suffix}"
db.execute(query="begin ;")
gross_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"drug-name" = c."drug-name",
"type" = c."type",
"category" = c."category",
"patient-category" = p."patient-category",
"p-reference" = p."reference",
"pr-flag" = NVL(pso2."pr-flag", false),
"hd-flag" = NVL(pso2."hd-flag", false),
"ecom-flag" = NVL(pso2."ecom-flag", false),
"hsncode" = c."hsncode",
"is-repeatable" = c."is-repeatable",
"store-manager" = msm."store-manager",
"line-manager" = msm."line-manager",
abo = msm.abo,
city = msm.city,
"store-b2b" = msm."store-b2b",
composition = c.composition,
company = c.company
from
"prod2-generico"."{table_name}" sales
join "prod2-generico"."drugs" c on
c."id" = sales."drug-id"
join "prod2-generico"."patients" p on
sales."patient-id" = p."id"
join "prod2-generico"."{bill_table_name}" as pso2 on
sales."bill-id" = pso2."id"
join "prod2-generico"."{stores_master_table_name}" as msm on
sales."store-id" = msm."id"
where
( c."updated-at" > sales."updated-at"
or p."updated-at" > sales."updated-at"
or pso2."updated-at" > sales."updated-at"
or msm."updated-at" > sales."updated-at")
and sales."bill-flag" = 'gross';
"""
#TODO: Optimize the bills-flag table
db.execute(query=gross_update_query)
return_update_query = f"""update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"drug-name" = c."drug-name",
"type" = c."type",
"category" = c."category",
"patient-category" = p."patient-category",
"p-reference" = p."reference",
"pr-flag" = NVL(pso2."pr-flag", false),
"hd-flag" = NVL(pso2."hd-flag", false),
"ecom-flag" = NVL(pso2."ecom-flag", false),
"hsncode" = c."hsncode",
"is-repeatable" = c."is-repeatable",
"store-manager" = msm."store-manager",
"line-manager" = msm."line-manager",
abo = msm.abo,
city = msm.city,
"store-b2b" = msm."store-b2b",
composition = c.composition,
company = c.company
from
"prod2-generico"."{table_name}" sales
join "prod2-generico"."drugs" c on
c."id" = sales."drug-id"
join "prod2-generico"."patients" p on
sales."patient-id" = p."id"
join "prod2-generico"."{bill_table_name}" as pso2 on
sales."bill-id" = pso2."id"
join "prod2-generico"."{stores_master_table_name}" as msm on
sales."store-id" = msm."id"
where
( c."updated-at" > sales."updated-at"
or p."updated-at" > sales."updated-at"
or pso2."updated-at" > sales."updated-at"
or msm."updated-at" > sales."updated-at")
and sales."bill-flag" = 'return';
"""
db.execute(query=return_update_query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-ts', '--table_suffix', default="", type=str, required=False,
help="Table suffix for testing.")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
table_suffix = args.table_suffix
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db, table_suffix=table_suffix)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/experiments/update_optimized_sales.py | update_optimized_sales.py |
import argparse
import sys
import os
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import MySQL
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-ss', '--source_schema', default="prod2-generico-14-08-22", type=str,
required=False)
parser.add_argument('-ts', '--target_schema', default="dev-3-9-22-generico", type=str,
required=False)
# parser.add_argument('-tn', '--table_name', default="molecule-master", type=str, required=False)
# parser.add_argument('-cns', '--column_names', default="name,molecule-group", type=str,
# required=False)
args, unknown = parser.parse_known_args()
env = args.env
source_schema = args.source_schema
target_schema = args.target_schema
def get_data(table=None, columns=None, db=None, count=None):
column_str = ",".join(["`" + col + "`" for col in columns])
limit_str = f" limit {count} " if count else " "
# limit_str = f" limit 10 "
query = f"""
SELECT
id, {column_str}
from
{table}
order by
id
{limit_str};
"""
return pd.read_sql_query(con=db.connection, sql=query)
os.environ['env'] = env
logger = get_logger()
# DB source
mysql_db_source = MySQL(read_only=False)
mysql_db_source.open_connection()
# DB target
mysql_db_target = MySQL(read_only=False)
mysql_db_target.open_connection()
table_columns = {
"molecule-master": "name,molecule-group,hash",
"composition-master-molecules-master-mapping": "molecule-master-id,composition-master-id,unit-type,unit-type-value",
"composition-master": "composition,hash",
"release-pattern-master": "name,group,short-form",
"drug-molecule-release": "molecule-master-id,release,drug-id",
"drugs": "available-in,composition-master-id",
"available-in-group-mapping": "available-in,available-group"
}
for table_name, column_names in table_columns.items():
logger.info(f"table: {table_name}")
# table_name = args.table_name
# column_names = args.column_names
column_names = column_names.split(",")
source_table_name = f"`{source_schema}`.`{table_name}`"
target_table_name = f"`{target_schema}`.`{table_name}`"
# logger.info(f"source_table_name: {source_table_name}")
# logger.info(f"target_table_name: {target_table_name}")
df_source = get_data(table=source_table_name, columns=column_names, db=mysql_db_source)
length = len(df_source)
logger.info(f"df_source: {df_source.head(2)}")
logger.info(f"df_source length: {length}")
df_target = get_data(table=target_table_name, columns=column_names, db=mysql_db_target,
count=length)
logger.info(f"df_target: {df_target.head(2)}")
df = df_source.compare(df_target)
if df.empty:
logger.info(f"Matched Successfully!")
else:
logger.info(f"\n\nMatch Failed: {df}")
# logger.info("\n\n") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/experiments/compare_tables.py | compare_tables.py |
import argparse
import os
import sys
import dateutil
import geopy as geopy
import geopy.distance
sys.path.append('../../../..')
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-bs', '--batch_size', default=10, type=int, required=False)
parser.add_argument('-fr', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
batch_size = args.batch_size
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = "patient-nearest-store"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
def geo_distance(lat1, long1, lat2, long2):
geo_dist = geopy.distance.geodesic((lat1, long1), (lat2, long2)).km
return geo_dist
max_q = f""" select max("created-at") max_exp from "{schema}"."{table_name}" """
max_date = rs_db.get_df(max_q)
max_date = max_date['max_exp'][0]
logger.info(f'max date: {max_date}')
# params
if max_date is None:
start_date = '2017-05-13 00:00:00'
start_date = dateutil.parser.parse(start_date)
else:
start_date = max_date
logger.info(f'start_date: {start_date}')
def get_store_data():
"""
function returns the stores lat, long data
"""
store_query = f"""
select
id,
-- store as nearest_store,
latitude,
longitude
-- , "store-type"
from
"{schema}"."stores-master" sm
where
"store-type" != 'dc'
and sm.latitude != ''
and sm.latitude is not null
and sm.longitude != ''
and sm.longitude is not null
and sm."id" is not null
"""
_store_df = rs_db.get_df(store_query)
logger.info(f'store count: {_store_df.shape}')
return _store_df
def patient_nearest_store():
global patient_df
for ix, patient in patient_df.iterrows():
nearest_store_id = store_df['id'][0]
nearest_store_distance = 1000000
for iy, store in store_df.iterrows():
distance = geo_distance(patient['latitude'], patient['longitude'], store['latitude'],
store['longitude'])
if distance < nearest_store_distance:
nearest_store_distance = distance
nearest_store_id = store['id']
# logger.info(
# f"i: {ix}, patient id: {patient['id']}, nearest_store_id: {nearest_store_id}, "
# f"nearest_store_distance: {nearest_store_distance}")
patient_df.loc[ix, 'nearest-store-id'] = int(nearest_store_id)
# return patient_df
def get_patients_data(batch=1):
patient_query = f"""
select
pm.id
, pm."last-bill-date"
-- , pm."primary-store-id"
, zpa.latitude
, zpa.longitude
-- , "previous-store-id"
from
"{schema}"."patients-metadata-2" pm
inner join (
select
"patient-id",
latitude,
longitude,
rank() over (partition by "patient-id" order by "created-at" desc ) r
from
"{schema}"."zeno-patient-address"
) zpa on
zpa."patient-id" = pm.id
where
r = 1
and zpa.latitude is not null
and zpa.latitude != ''
and zpa.longitude is not null
and zpa.longitude != ''
and pm."last-bill-date" >= '{start_date}'
-- and pm.id is not null
-- and pm.id = 5
-- and pm."primary-store-id" in (16,2)
-- and pm."last-bill-date" >= CURRENT_DATE - 90
group by
1,2,3,4
order by
pm."last-bill-date" asc
LIMIT {batch_size} OFFSET {(batch - 1) * batch_size}
"""
_patient_df = rs_db.get_df(patient_query)
logger.info(f'patient batch count: {_patient_df.shape}')
return _patient_df
# pat['i'] = 1
# store['i'] = 1
# hash_join = pat.merge(store, how='left', on='i')
# hash_join['geo_dist'] = hash_join.apply(
# lambda x: geo_distance(x.latitude_x, x.longitude_x, x.latitude_y, x.longitude_y), axis=1)
# hash_join['rank'] = hash_join.sort_values(by=['geo_dist']).groupby(['patient-id']).cumcount() + 1
# hash_join = hash_join[hash_join['rank'] == 1].copy()
# hash_join.columns = [c.replace('_', '-').lower() for c in hash_join.columns]
# hash_join = hash_join[['patient-id', 'nearest-store-id']]
# hash_join['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# return hash_join
try:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where date("created-at") > '{start_date}' '''
logger.info(f'truncate query: {truncate_query}')
rs_db.execute(truncate_query)
logger.info(f'batch size type:{type(batch_size)}')
store_df = get_store_data()
batch = 1
patient_df = get_patients_data(batch=batch)
while not patient_df.empty:
logger.info(f'Starting batch number:{batch}')
patient_nearest_store()
""" rename the columns """
patient_df.rename(columns={"id": "patient-id", "last-bill-date": "created-at"},
inplace=True)
patient_df[['nearest-store-id']] = patient_df[['nearest-store-id']].astype(int)
s3.write_df_to_db(df=patient_df[table_info['column_name']],
table_name=table_name, db=rs_db,
schema=schema)
logger.info(f'End of batch number:{batch}')
if not full_run:
break
batch += 1
patient_df = get_patients_data(batch=batch)
except Exception as e:
logger.exception(e)
finally:
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patient-nearest-store/patient-nearest-store.py | patient-nearest-store.py |
import argparse
import os
import sys
import geopy as geopy
import geopy.distance
sys.path.append('../../../..')
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-bs', '--batch_size', default=10, type=int, required=False)
parser.add_argument('-fr', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
batch_size = args.batch_size
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = "lat-long-nearest-store"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
def geo_distance(lat1, long1, lat2, long2):
geo_dist = geopy.distance.geodesic((lat1, long1), (lat2, long2)).km
return geo_dist
query = f"""
insert into "{schema}"."{table_name}" (
"created-at",
"latitude",
longitude,
"nearest-store-id" )
select
convert_timezone('Asia/Calcutta', GETDATE()),
ll."latitude",
ll.longitude,
0
from
( select
latitude,
longitude
from
"{schema}"."zeno-patient-address"
where
latitude != '' and latitude is not null
and longitude != '' and longitude is not null
group by 1, 2 ) ll
left join "{schema}"."{table_name}" llns on
(llns.latitude = ll.latitude
and llns.longitude = ll.longitude)
where
llns.latitude is null and llns.longitude is null;
"""
""" Insert the new lat long in the table """
rs_db.execute(query=query)
def get_store_data():
"""
function returns the stores lat, long data
"""
store_query = f"""
select
id,
-- store as nearest_store,
latitude,
longitude
-- , "store-type"
from
"{schema}"."stores-master" sm
where
"store-type" != 'dc'
and sm.latitude != ''
and sm.latitude is not null
and sm.longitude != ''
and sm.longitude is not null
and sm."id" is not null
"""
_store_df = rs_db.get_df(store_query)
logger.info(f'store count: {_store_df.shape}')
return _store_df
def lat_long_nearest_store():
global lat_long_df
for ix, lat_long in lat_long_df.iterrows():
nearest_store_id = store_df['id'][0]
nearest_store_distance = 1000000
for iy, store in store_df.iterrows():
distance = geo_distance(lat_long['latitude'], lat_long['longitude'], store['latitude'],
store['longitude'])
if distance < nearest_store_distance:
nearest_store_distance = distance
nearest_store_id = store['id']
lat_long_df.loc[ix, 'nearest-store-id'] = int(nearest_store_id)
# return patient_df
def get_lat_long_data(batch=1):
lat_long_query = f"""
select
latitude,
longitude ,
"nearest-store-id"
from
"prod2-generico"."lat-long-nearest-store"
where
"nearest-store-id" = 0
LIMIT {batch_size} OFFSET {(batch - 1) * batch_size}
"""
_lat_long_df = rs_db.get_df(lat_long_query)
logger.info(f'lat long batch count: {_lat_long_df.shape}')
return _lat_long_df
def update_nearest_store():
""" clean the temp table """
t_query = f""" TRUNCATE TABLE {lat_long_temp_table} ;"""
rs_db.execute(query=t_query)
# Fixing the data type
lat_long_df[['nearest-store-id']] = lat_long_df[['nearest-store-id']].astype(int)
# fill the temp table
s3.write_df_to_db(df=lat_long_df[temp_table_info['column_name']], db=rs_db,
table_name=lat_long_temp_table, schema=None)
# Updating the data in table
_query = f"""
update
"{schema}"."{table_name}" t
set
"nearest-store-id" = s."nearest-store-id"
from
{lat_long_temp_table} s
where
( t.latitude = s.latitude and t.longitude = s.longitude)
"""
rs_db.execute(query=_query)
try:
store_df = get_store_data()
batch = 1
lat_long_df = get_lat_long_data(batch=batch)
if not lat_long_df.empty:
# Create temp table and update the nearest store
lat_long_temp_table = table_name.replace("-", "_") + "_temp"
rs_db.execute(query=f"DROP table IF EXISTS {lat_long_temp_table};")
_query = f"""
CREATE TEMP TABLE {lat_long_temp_table}
(
latitude VARCHAR(765) ENCODE lzo
,longitude VARCHAR(765) ENCODE lzo
,"nearest-store-id" INTEGER NOT NULL ENCODE az64
);
"""
rs_db.execute(query=_query)
temp_table_info = helper.get_table_info(db=rs_db, table_name=lat_long_temp_table,
schema=None)
while not lat_long_df.empty:
logger.info(f'Starting batch number:{batch}')
lat_long_nearest_store() # calculate the nearest store
update_nearest_store() # update the nearest store
logger.info(f'End of batch number:{batch}')
if not full_run:
break
batch += 1
lat_long_df = get_lat_long_data(batch=batch)
except Exception as e:
logger.exception(e)
finally:
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patient-nearest-store/lat-long-nearest-store.py | lat-long-nearest-store.py |
# =============================================================================
# author: [email protected]
# purpose: to populate store-group-drug-price
# =============================================================================
# Note - In case of removing hardcoded discount remove block1,block2,block3
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from dateutil.tz import gettz
import datetime
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-t', '--tolerance_percentage', default=20, type=int, required=False)
parser.add_argument('-ned', '--near_expiry_days', default=90, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
email_to = args.email_to
tolerance_percentage = args.tolerance_percentage
near_expiry_days = args.near_expiry_days
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("tolerance_percentage- " + str(tolerance_percentage))
logger.info("near_expiry_days- " + str(near_expiry_days))
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
status2 = False
if env == 'dev':
logger.info('development env setting schema and table accordingly')
schema2 = '`test-generico`'
table2 = '`store-group-drug-price-data`'
temp_sales_table = '`store-group-drug-price-data`'
elif env == 'stage':
logger.info('staging env setting schema and table accordingly')
schema2 = '`test-generico`'
table2 = '`store-group-drug-price-data`'
temp_sales_table = '`store-group-drug-price-data`'
elif env == 'prod':
logger.info('prod env setting schema and table accordingly')
schema2 = '`prod2-generico`'
table2 = '`store-group-drug-price`'
temp_sales_table = '`store-group-drug-price-copy`'
# =============================================================================
# Fetching Price - where Inventory is available
# =============================================================================
query_inv_price = """
select
b.*,
case
when b."max-per-deviation-from-max-mrp" > {tolerance_percentage} then b."weighted-mrp"
else b."mrp"
end as "final-mrp",
case
when b."max-per-deviation-from-max-mrp" > {tolerance_percentage} then b."weighted-selling-rate"
else b."selling-rate"
end as "final-selling-rate"
from
(
select
a.*,
ROUND(sum(a."quantity" * a."mrp") over (partition by a."store-group-id" ,
a."drug-id"
order by
a.mrp desc,
a."selling-rate" desc
rows between unbounded preceding and
unbounded following)/ sum(a."quantity") over (partition by a."store-group-id" ,
a."drug-id"
order by
a.mrp desc,
a."selling-rate" desc
rows between unbounded preceding and
unbounded following), 2) as "weighted-mrp",
ROUND(sum(a."quantity" * a."selling-rate") over (partition by a."store-group-id" ,
a."drug-id"
order by
a.mrp desc,
a."selling-rate" desc
rows between unbounded preceding and
unbounded following)/ sum(a."quantity") over (partition by a."store-group-id" ,
a."drug-id"
order by
a.mrp desc,
a."selling-rate" desc
rows between unbounded preceding and
unbounded following), 2) as "weighted-selling-rate",
max(a."per-deviation-from-max-mrp") over (partition by a."store-group-id" ,
a."drug-id"
order by
a.mrp desc,
a."selling-rate" desc
rows between unbounded preceding and
unbounded following) as "max-per-deviation-from-max-mrp"
from
(
select
row_number() over (partition by s."store-group-id" ,
i."drug-id"
order by
i.mrp desc,
i."selling-rate" desc) as "row",
s."store-group-id",
i."drug-id",
sum(i.quantity) as "quantity",
i.mrp,
i."selling-rate",
max(d."type") as "drug-type",
case
when i.mrp = 0 then 100
else
ROUND((1 - (i.mrp / (max(i.mrp) over (partition by s."store-group-id" ,
i."drug-id"
order by
i.mrp desc,
i."selling-rate" desc
rows between unbounded preceding and
unbounded following))))* 100, 2)
end as "per-deviation-from-max-mrp",
case
when i."selling-rate" = 0 then 100
else
ROUND((1 - (i."selling-rate" / (max(i."selling-rate") over (partition by s."store-group-id" ,
i."drug-id"
order by
i.mrp desc,
i."selling-rate"
rows between unbounded preceding and
unbounded following))))* 100, 2)
end as "per-deviation-from-max-selling-rate"
from
"prod2-generico"."inventory-1" i
inner join "prod2-generico".stores s
on
i."store-id" = s.id
inner join "prod2-generico".drugs d
on
i."drug-id" = d.id
where
i."franchisee-inventory" = 0
and d.schedule != 'h1'
and i.mrp >= i."selling-rate"
and (i.quantity >= 1 )
and i."expiry" > dateadd(day,{near_expiry_days},getdate())
group by
s."store-group-id" ,
i."drug-id" ,
i.mrp ,
i."selling-rate") a)b
""".format(tolerance_percentage=tolerance_percentage,near_expiry_days=near_expiry_days)
inventory_combination = rs_db.get_df(query_inv_price)
logger.info('Fetched current price data where inventory is available')
# =============================================================================
# block1 - Hardcoded discount for ethical and high-value-ethical - 15%
# =============================================================================
harcoded_discount = 15
multiply_mrp_by = ((100-harcoded_discount)/100)
logger.info(f'hardcoded discount - {harcoded_discount}')
logger.info(f'multiply_mrp_by - {multiply_mrp_by}')
inventory_combination.rename(columns = {'final-selling-rate':'final-selling-rate1'},inplace = True)
conditions = [inventory_combination['drug-type'].isin(['ethical','high-value-ethical'])]
choices = [f'{harcoded_discount}%']
inventory_combination['harcoded_discount'] = np.select(conditions, choices)
conditions = [inventory_combination['drug-type'].isin(['ethical','high-value-ethical']),~inventory_combination['drug-type'].isin(['ethical','high-value-ethical'])]
choices = [inventory_combination['final-mrp'].astype(float)*multiply_mrp_by,inventory_combination['final-selling-rate1']]
inventory_combination['final-selling-rate2'] = np.select(conditions, choices)
inventory_combination['final-selling-rate'] = inventory_combination['final-selling-rate2']
# =============================================================================
# In case of removing hardcoded discount remove block1,block2,block3
# =============================================================================
final_price_inventory = inventory_combination[inventory_combination['row']==1][['store-group-id','drug-id','final-mrp','final-selling-rate']]
final_price_inventory.rename(columns={'final-mrp':'mrp',
'final-selling-rate':'selling-rate'},inplace = True)
# =============================================================================
# flagged cases analysis
# =============================================================================
inventory_combination['store-group-drug'] = inventory_combination['store-group-id'].astype(str) + '-' + inventory_combination['drug-id'].astype(str)
problematic_store_group_drug = inventory_combination[inventory_combination['max-per-deviation-from-max-mrp']>tolerance_percentage]['store-group-drug'].unique().tolist()
inventory_available_total_cases = len(inventory_combination['store-group-drug'].unique())
inventory_available_flagged_cases = len(problematic_store_group_drug)
flag_percentage = round((inventory_available_flagged_cases/inventory_available_total_cases)*100,2)
logger.info(f'total inventory_available case - Store-group + Drug Combinations - {inventory_available_total_cases}')
logger.info(f'Flagged inventory_available case - Store-group + Drug Combinations - {inventory_available_flagged_cases}')
logger.info(f'flagged percentage - {flag_percentage}%')
flagged_inventory_combination = inventory_combination[inventory_combination['store-group-drug'].isin(problematic_store_group_drug)]
del flagged_inventory_combination['store-group-drug']
# flagged_inventory_combination.to_csv('D:\Store drug composition group\price_test_3.csv')
# =============================================================================
# fetching data for store-group sold but no current inventory
# =============================================================================
query_store_group_sold_but_no_inv= """
select
concat(invsale."store-group-id", concat('-', invsale."drug-id")) as "store-group-drug"
from
(
select
sale."store-group-id",
sale."drug-id" ,
inv."store-group-inventory"
from
(
select
st."store-group-id" ,
s."drug-id"
from
"prod2-generico"."prod2-generico".sales s
left join "prod2-generico".stores st
on
s."store-id" = st.id
where
date(s."created-at") > date(dateadd(d,
-30,
current_date))
and s."bill-flag" = 'gross'
and st."store-group-id" != 2
group by
st."store-group-id" ,
s."drug-id")sale
left join (
select
i."drug-id" ,
s."store-group-id" ,
sum(i.quantity) as "store-group-inventory"
from
"prod2-generico"."prod2-generico"."inventory-1" i
left join "prod2-generico".stores s
on
s.id = i."store-id"
where
s."store-group-id" != 2
and i."expiry" > dateadd(day,{near_expiry_days},getdate())
group by
s."store-group-id" ,
i."drug-id")inv
on
sale."drug-id" = inv."drug-id"
and sale."store-group-id" = inv."store-group-id")invsale
where
invsale."store-group-inventory" = 0
""".format(near_expiry_days=near_expiry_days)
store_group_sale_but_no_inv = rs_db.get_df(query_store_group_sold_but_no_inv)
logger.info('Fetched data for store-group sold but no current inventory')
store_group_drug = tuple(map(str,list(store_group_sale_but_no_inv['store-group-drug'].unique())))
logger.info(f'store-group sold but no current inventory cases - {len(store_group_drug)}')
# =============================================================================
# Fetching Price for store-group sold but no current inventory
# =============================================================================
query_sale_price = """
select
*
from
(
select
row_number() over (partition by s."store-group-id" ,
i."drug-id"
order by
i.mrp desc,
i."selling-rate" desc) as "row",
s."store-group-id",
i."drug-id",
sum(i.quantity) as "quantity",
i.mrp,
i."selling-rate",
max(d."type") as "drug-type"
from
"prod2-generico"."inventory-1" i
inner join "prod2-generico".stores s
on
i."store-id" = s.id
inner join "prod2-generico".drugs d
on
i."drug-id" = d.id
where
i."franchisee-inventory" = 0
and d.schedule != 'h1'
and i.mrp >= i."selling-rate"
-- and i."expiry" > dateadd(day,90,getdate())
and concat( s."store-group-id", concat('-', i."drug-id")) in {store_group_drug}
group by
s."store-group-id" ,
i."drug-id" ,
i.mrp ,
i."selling-rate")a
where
a."row" = 1
""".format(store_group_drug=store_group_drug)
non_invenotory_combination = rs_db.get_df(query_sale_price)
# =============================================================================
# block2 - Hardcoded discount for ethical and high-value-ethical - 15%
# =============================================================================
logger.info(f'hardcoded discount - {harcoded_discount}')
logger.info(f'multiply_mrp_by - {multiply_mrp_by}')
non_invenotory_combination.rename(columns = {'selling-rate':'selling-rate1'},inplace = True)
conditions = [non_invenotory_combination['drug-type'].isin(['ethical','high-value-ethical'])]
choices = [f'{harcoded_discount}%']
non_invenotory_combination['harcoded_discount'] = np.select(conditions, choices)
conditions = [non_invenotory_combination['drug-type'].isin(['ethical','high-value-ethical']),~non_invenotory_combination['drug-type'].isin(['ethical','high-value-ethical'])]
choices = [non_invenotory_combination['mrp'].astype(float)*multiply_mrp_by,non_invenotory_combination['selling-rate1']]
non_invenotory_combination['selling-rate2'] = np.select(conditions, choices)
non_invenotory_combination['selling-rate'] = non_invenotory_combination['selling-rate2']
# =============================================================================
# In case of removing hardcoded discount remove block1,block2,block3
# =============================================================================
logger.info('Fetched current price data for no current inventory available cases')
logger.info(f'price to update for non-inventory combinations for - {len(non_invenotory_combination["store-group-id"])} cases')
# Difference analysis - Why prices are not to be updated for all sold but no inventory cases
# a = pd.DataFrame(store_group_sale_but_no_inv['store-group-drug'].unique(), columns=['total'])
# b=a['total'].str.split('-',expand=True)
# b.rename(columns={0:'store_group_id_total',
# 1:'drug_id_total'},inplace=True)
# b['store_group_id_total'] = b['store_group_id_total'].astype(int)
# b['drug_id_total'] = b['drug_id_total'].astype(int)
# c = non_invenotory_combination[['store-group-id','drug-id']]
# c.rename(columns = {'store-group-id':'store_group_id_cal',
# 'drug-id':'drug_id_cal'},inplace =True)
# b = b.merge(c,left_on=['store_group_id_total','drug_id_total'],right_on=['store_group_id_cal','drug_id_cal'],how='left')
del non_invenotory_combination['row']
del non_invenotory_combination['quantity']
union = pd.concat([final_price_inventory, non_invenotory_combination[['store-group-id','drug-id','mrp','selling-rate']]])
# =============================================================================
# Fetching store-group and cluster combination
# =============================================================================
qc = """
select
sg.id as "store-group-id",
cluster."cluster-id",
sg."is-active" as "sg-is-active"
from
"prod2-generico"."store-groups" sg
left join
(
select
s."store-group-id" as "store-group-id",
sc."cluster-id"
from
"prod2-generico".features f
join "prod2-generico"."store-features" sf on
f.id = sf."feature-id"
join "prod2-generico"."store-clusters" sc on
sc."store-id" = sf."store-id"
join "prod2-generico".stores s
on
sc."store-id" = s.id
where
-- sf."feature-id" = 69
-- and sf."is-active" = 1
-- and
sc."is-active" = 1
group by
s."store-group-id",
sc."cluster-id")cluster
on
sg.id = cluster."store-group-id"
"""
store_group_clusters = rs_db.get_df(qc)
store_group_clusters['cluster-id'] = store_group_clusters['cluster-id'].apply(pd.to_numeric,
errors='ignore').astype('Int64')
store_group_clusters['store-group-id'] = store_group_clusters['store-group-id'].apply(pd.to_numeric,
errors='ignore').astype(
'Int64')
logger.info('Fetched total store-group and cluster combinations')
# =============================================================================
# Expanding store groups to clusters
# =============================================================================
# Note - Selling rate can be made 0 where cluster inventory is not available (Meaning Out of Stock)
# But those cases will be - inventory is available in store-group but not in cluster
# Final Decision is yet to be made on this, till that time Out of stock will be based on Store-group inventory
store_group_clusters_without_ndd = store_group_clusters[store_group_clusters['store-group-id']!=2]
store_group_clusters_without_ndd['cluster-id'].fillna(-987125,inplace = True)
store_group_without_cluster = pd.DataFrame(store_group_clusters_without_ndd['store-group-id'].unique(), columns = ['store-group-id'])
store_group_without_cluster['cluster-id'] = -987125
store_group_clusters_without_ndd = store_group_clusters_without_ndd.merge(store_group_without_cluster, on=['store-group-id','cluster-id'], how='outer')
union = union.merge(store_group_clusters_without_ndd[['store-group-id','cluster-id']], on='store-group-id',how='inner')
union['cluster-id'] = union['cluster-id'].replace(-987125, np.nan)
# union.to_csv('D:\Store drug composition group\price_test_5.csv')
# =============================================================================
# updating Temp table with current data for clusters
# =============================================================================
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
try:
temp_table_name = '`store-group-drug-price-data-temp`'
truncate_query = '''
DELETE FROM {schema2}.{temp_table_name} '''.format(schema2=schema2,temp_table_name=temp_table_name)
mysql_write.engine.execute(truncate_query)
logger.info('Existing store-group-drug-price-data-temp table Truncated')
union.to_sql(
name='store-group-drug-price-data-temp', con=mysql_write.engine,
if_exists='append',
chunksize=500, method='multi', index=False)
logger.info(' ')
logger.info('store-group-drug-price-data-temp table appended to MySQL')
# =============================================================================
# Updating price where Mismatch in calculated and current table
# =============================================================================
logger.info(' ')
logger.info('Updating for clusters')
for store_groups in store_group_clusters['store-group-id'].unique():
clusters = store_group_clusters[store_group_clusters['store-group-id']==store_groups]['cluster-id'].unique()
if (len(clusters)==1 and pd.isna(clusters[0])) or store_groups==2:
pass
else:
for cluster_id in sorted(clusters):
logger.info('store group - {}, cluster {} - started '.format(store_groups,cluster_id))
update1_query = """
UPDATE
{schema2}.{table2} sgdp
INNER JOIN {schema2}.{temp_table_name} sgdp2
ON
sgdp.`store-group-id` = sgdp2.`store-group-id`
and sgdp.`cluster-id` = {cluster_id}
and sgdp2.`cluster-id` = {cluster_id}
and sgdp.`drug-id` = sgdp2.`drug-id`
SET
sgdp.mrp = sgdp2.mrp,
sgdp.`selling-rate` = sgdp2.`selling-rate`
WHERE
sgdp.`store-group-id` != 2
and ( sgdp.mrp != sgdp2.mrp
OR sgdp.`selling-rate` != sgdp2.`selling-rate`)
""".format(cluster_id=cluster_id, schema2=schema2, table2=table2, temp_table_name=temp_table_name)
mysql_write.engine.execute(update1_query)
logger.info('store group - {} cluster {} - Update 1 MRP And Selling price '.format(store_groups, cluster_id))
# =============================================================================
# Updating selling rate to 0 where inventory is not present
# =============================================================================
update2_query = """
UPDATE
{schema2}.{table2} sgdp
LEFT JOIN {schema2}.{temp_table_name} sgdp2
ON
sgdp.`store-group-id` = sgdp2.`store-group-id`
and sgdp.`cluster-id` = {cluster_id}
and sgdp2.`cluster-id` = {cluster_id}
and sgdp.`drug-id` = sgdp2.`drug-id`
left join drugs d
on d.id = sgdp.`drug-id`
SET
sgdp.`selling-rate` = 0
WHERE
sgdp.`store-group-id` != 2
and d.schedule != 'h1'
and sgdp.`selling-rate` != 0
and sgdp.`cluster-id` = {cluster_id}
and sgdp.`store-group-id` = {store_groups}
and sgdp2.id is NULL
""".format(cluster_id=cluster_id, schema2=schema2, table2=table2, store_groups=store_groups, temp_table_name=temp_table_name)
mysql_write.engine.execute(update2_query)
logger.info('store group - {} cluster {} - Update 2 Selling price=0'.format(store_groups,cluster_id))
# =============================================================================
# Inserting data where data is not present
# =============================================================================
insert_query = """
INSERT
Into
{schema2}.{table2}
(
`store-group-id` ,
`drug-id` ,
`cluster-id` ,
mrp ,
`selling-rate` ,
`is-express` ,
`is-active`
)
(
SELECT
sgdpdt.`store-group-id` ,
sgdpdt.`drug-id` ,
sgdpdt.`cluster-id` ,
sgdpdt.mrp ,
sgdpdt.`selling-rate` ,
sgdpdt.`is-express` ,
sgdpdt.`is-active`
FROM
{schema2}.{temp_table_name} sgdpdt
left join {schema2}.{table2} sgdp
on
sgdpdt.`store-group-id` = sgdp.`store-group-id`
and sgdpdt.`cluster-id` = {cluster_id}
and sgdp.`cluster-id` = {cluster_id}
and sgdpdt.`drug-id` = sgdp.`drug-id`
WHERE
sgdp.id is NULL
and sgdpdt.`store-group-id`= {store_groups}
and sgdpdt.`cluster-id`= {cluster_id})
""".format(cluster_id=cluster_id, schema2=schema2, table2=table2, temp_table_name=temp_table_name, store_groups=store_groups)
mysql_write.engine.execute(insert_query)
logger.info('store group - {} cluster {} - Inserted data'.format(store_groups,cluster_id))
# =============================================================================
# Updating for Non clusters
# Updating price where Mismatch in calculated and current table
# =============================================================================
logger.info(' ')
logger.info('Updating for non clusters')
for store_groups in store_group_clusters['store-group-id'].unique():
if store_groups==2:
pass
else:
logger.info(' ')
logger.info('store group- {} started'.format(store_groups))
nc_update1_query = """
UPDATE
{schema2}.{table2} sgdp
INNER JOIN {schema2}.{temp_table_name} sgdp2
ON
sgdp.`store-group-id` = {store_groups}
and sgdp2.`store-group-id` = {store_groups}
and sgdp.`cluster-id` is NULL
and sgdp2.`cluster-id` is NULL
and sgdp.`drug-id` = sgdp2.`drug-id`
SET
sgdp.mrp = sgdp2.mrp,
sgdp.`selling-rate` = sgdp2.`selling-rate`
WHERE
sgdp.`store-group-id` != 2
and sgdp.`store-group-id` = {store_groups}
and ( sgdp.mrp != sgdp2.mrp
OR sgdp.`selling-rate` != sgdp2.`selling-rate`)
""".format(schema2=schema2, table2=table2, temp_table_name=temp_table_name, store_groups=store_groups)
mysql_write.engine.execute(nc_update1_query)
logger.info('store group - {} cluster Null - Update 1 MRP And Selling price '.format(store_groups))
# =============================================================================
# Updating selling rate to 0 where inventory is not present
# =============================================================================
nc_update2_query = """
UPDATE
{schema2}.{table2} sgdp
LEFT JOIN {schema2}.{temp_table_name} sgdp2
ON
sgdp.`store-group-id` = {store_groups}
and sgdp2.`store-group-id` = {store_groups}
and sgdp.`cluster-id` is NULL
and sgdp2.`cluster-id` is NULL
and sgdp.`drug-id` = sgdp2.`drug-id`
left join drugs d
on d.id = sgdp.`drug-id`
SET
sgdp.`selling-rate` = 0
WHERE
sgdp.`store-group-id` != 2
and d.schedule != 'h1'
and sgdp.`selling-rate` != 0
and sgdp.`cluster-id` is NULL
and sgdp.`store-group-id` = {store_groups}
and sgdp2.id is NULL
""".format(schema2=schema2, table2=table2, store_groups=store_groups,
temp_table_name=temp_table_name)
mysql_write.engine.execute(nc_update2_query)
logger.info('store group - {} cluster Null - Update 2 Selling price=0'.format(store_groups))
# =============================================================================
# Inserting data where data is not present
# =============================================================================
insert_query = """
INSERT
Into
{schema2}.{table2}
(
`store-group-id` ,
`drug-id` ,
`cluster-id` ,
mrp ,
`selling-rate` ,
`is-express` ,
`is-active`
)
(
SELECT
sgdpdt.`store-group-id` ,
sgdpdt.`drug-id` ,
sgdpdt.`cluster-id` ,
sgdpdt.mrp ,
sgdpdt.`selling-rate` ,
sgdpdt.`is-express` ,
sgdpdt.`is-active`
FROM
{schema2}.{temp_table_name} sgdpdt
left join {schema2}.{table2} sgdp
on
sgdpdt.`store-group-id` = {store_groups}
and sgdp.`store-group-id` = {store_groups}
and sgdpdt.`cluster-id` is NULL
and sgdp.`cluster-id` is NULL
and sgdpdt.`drug-id` = sgdp.`drug-id`
WHERE
sgdpdt.`store-group-id`= {store_groups}
and sgdpdt.`cluster-id` is NULL
and sgdp.id is NULL)
""".format( schema2=schema2, table2=table2,
temp_table_name=temp_table_name, store_groups=store_groups)
mysql_write.engine.execute(insert_query)
logger.info('store group - {} cluster NULL - Inserted data'.format(store_groups))
# =============================================================================
# This is used as safety Net, Can be scrapped in few days
# =============================================================================
logger.info('Start - Updating Selling price = 0 cases which are billed in last month, Temporary Solution')
sell_temp_query = """
update
{schema2}.{table2} s1
inner join {schema2}.{temp_sales_table} s2
on
s1.`store-group-id` = s2.`store-group-id`
and s1.`cluster-id` <=> s2.`cluster-id`
and s1.`drug-id` = s2.`drug-id`
set
s1.mrp = s2.mrp,
s1.`selling-rate` = s2.`selling-rate`
where
s1.`selling-rate` = 0
and s2.`selling-rate` != 0
and s2.`mrp` != 0
and s2.`selling-rate`<= s2.`mrp`;
""".format(schema2=schema2, table2=table2, temp_sales_table=temp_sales_table)
mysql_write.engine.execute(sell_temp_query)
logger.info('End - Updating Selling price = 0 cases which are billed in last month, Temporary Solution')
# =============================================================================
# block3 - Hardcoded discount for ethical and high-value-ethical - 15%
# =============================================================================
logger.info(f'hardcoded discount - {harcoded_discount}')
logger.info(f'multiply_mrp_by - {multiply_mrp_by}')
hardcoded_discount_query = """
update
{schema2}.{table2} sgdp
inner join {schema2}.drugs d
on
sgdp.`drug-id` = d.id
set
sgdp.`selling-rate` = sgdp.mrp*{multiply_mrp_by}
where
sgdp.`selling-rate` > 0
and sgdp.mrp > 0
and d.`type` in ('ethical', 'high-value-ethical') ;
""".format(schema2=schema2, table2=table2,multiply_mrp_by=multiply_mrp_by)
mysql_write.engine.execute(hardcoded_discount_query)
logger.info('End - Updating Selling price hardcoded discount')
# =============================================================================
# In case of removing hardcoded discount remove block1,block2,block3
# =============================================================================
status2 = True
except Exception as error:
logger.exception(error)
status2 = False
# =============================================================================
# Sending mail
# =============================================================================
if status2 is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
flagged_inv = s3.save_df_to_s3(df=flagged_inventory_combination, file_name='flagged_cases_{}.csv'.format(cur_date))
raw_inventory = s3.save_df_to_s3(df=inventory_combination, file_name='raw_inventory_{}.csv'.format(cur_date))
raw_sale = s3.save_df_to_s3(df=non_invenotory_combination, file_name='raw_sold_but_no_inventory_{}.csv'.format(cur_date))
email.send_email_file(subject=f"{env}-{status} : {table2} table updated",
mail_body=f"{table2} table update {status}, Time for job completion - {min_to_complete} mins\n"
f"total inventory_available case - Store-group + Drug Combinations - {inventory_available_total_cases}\n"
f"Flagged inventory_available case - Store-group + Drug Combinations - {inventory_available_flagged_cases}\n"
f"flagged percentage - {flag_percentage}%\n"
f"store-group sold but no current inventory cases - {len(store_group_drug)}\n"
f"price to update for non-inventory combinations for - {len(non_invenotory_combination['store-group-id'])} cases\n"
f"parameter used - \n"
f"tolerance_percentage - {tolerance_percentage}\n"
f"near expiry days parameter - {near_expiry_days}\n",
to_emails=email_to, file_uris=[flagged_inv,raw_inventory,raw_sale])
# Closing the DB Connection
rs_db.close_connection()
mysql_write.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-group-drug-price/store-group-drug-price.py | store-group-drug-price.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from dateutil.tz import gettz
import datetime
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
status2 = False
if env == 'dev':
logger.info('development env setting schema and table accordingly')
schema2 = '`test-generico`'
table2 = '`store-group-composition-master-data-`'
elif env == 'stage':
logger.info('staging env setting schema and table accordingly')
schema2 = '`test-generico`'
table2 = '`store-group-composition-master-data-`'
else:
logger.info('prod env setting schema and table accordingly')
schema2 = '`prod2-generico`'
table2 = '`store-group-composition-master`'
# TODO -- query fetches 12L record, reduce data size if possible
# =============================================================================
# Fetching Compositions for cluster
# =============================================================================
s2 = """
select
s."store-group-id",
sc."cluster-id" as "cluster-id",
i."drug-id" ,
max(d."release") as "release",
max(d."dosage-form") as "dosage-form" ,
max(d.composition) as "composition",
Case
when max(d."company-id") = 6984 then 'GOODAID'
ELSE 'other'
END AS "goodaid-flag",
CASE
when max(d."type") in ('ethical', 'high-value-ethical') THEN 'ethical'
WHEN max(d."type") in ('generic', 'high-value-generic') THEN 'generic'
ELSE 'others'
END AS "drug-type"
from
"prod2-generico"."inventory-1" i
inner join "prod2-generico"."store-clusters" sc on
i."store-id" = sc."store-id"
and sc."is-active" = 1
inner join "prod2-generico".stores s
on
i."store-id" = s.id
inner join "prod2-generico".drugs d on
i."drug-id" = d.id
where
i."franchisee-inventory" = 0
and i.quantity>0
group by
s."store-group-id",
sc."cluster-id" ,
i."drug-id"
"""
cluster_combination = rs_db.get_df(s2)
logger.info('Fetched data for clusters')
# =============================================================================
# Fetching Compositions for non cluster
# =============================================================================
s3 = """
select
s."store-group-id",
NULL as "cluster-id",
i."drug-id" ,
max(d."release") as "release",
max(d."dosage-form") as "dosage-form",
max(d.composition) as "composition",
Case
when max(d."company-id") = 6984 then 'GOODAID'
ELSE 'other'
END AS "goodaid-flag",
CASE
when max(d."type") in ('ethical', 'high-value-ethical') THEN 'ethical'
WHEN max(d."type") in ('generic', 'high-value-generic') THEN 'generic'
ELSE 'others'
END AS "drug-type"
from
"prod2-generico"."inventory-1" i
inner join "prod2-generico".stores s
on
i."store-id" = s.id
inner join "prod2-generico".drugs d on
i."drug-id" = d.id
where
i."franchisee-inventory" = 0
and i.quantity>0
group by
s."store-group-id",
i."drug-id"
"""
non_cluster_combination = rs_db.get_df(s3)
logger.info('Fetched data for non clusters')
union = pd.concat([cluster_combination, non_cluster_combination])
union = union[union['composition'].notna()]
# =============================================================================
# Calculating current composition master
# =============================================================================
def conversion_to_str_for_join(x):
if type(x)!= type(None) and x is not None and x != 'nan' and pd.notna(x):
return str(int(x))
else:
return str(x)
union['drug-id'] = union['drug-id'].apply(conversion_to_str_for_join)
union['cluster-id'] = union['cluster-id'].fillna('-123')
# ethical
mask = (((union['drug-type'] == 'ethical') | (union['drug-type'] == 'high-value-ethical') )& (union['composition'] != '') & (union['composition'].notna()))
ethical = union[mask]
ethical.reset_index(drop=True, inplace=True)
ethical = ethical.groupby(['store-group-id', 'cluster-id', 'composition', 'dosage-form', 'release']).agg(
{'drug-id': ','.join}).reset_index()
ethical.rename(columns={'drug-id': 'ethical-drug-id-list'}, inplace=True)
# generic
mask2 = ((union['drug-type'] == 'generic') & (union['composition'] != '') & (union['composition'].notna()))
generic = union[mask2]
generic.reset_index(drop=True, inplace=True)
generic = generic.groupby(['store-group-id', 'cluster-id', 'composition', 'dosage-form', 'release']).agg(
{'drug-id': ','.join}).reset_index()
generic.rename(columns={'drug-id': 'generic-drug-id-list-dummy'}, inplace=True)
# goodaid
mask3 = ((union['goodaid-flag'] == 'GOODAID') & (union['composition'] != '') & (union['composition'].notna()))
goodaid = union[mask3]
goodaid.reset_index(drop=True, inplace=True)
goodaid = goodaid.groupby(['store-group-id', 'cluster-id', 'composition', 'dosage-form', 'release']).agg(
{'drug-id': ','.join}).reset_index()
goodaid.rename(columns={'drug-id': 'goodaid-drug-id-list'}, inplace=True)
union = pd.merge(ethical, generic ,how = 'outer',on =['store-group-id', 'cluster-id', 'composition', 'dosage-form', 'release'])
union = pd.merge(union, goodaid, how='outer',
on=['store-group-id', 'cluster-id', 'composition', 'dosage-form', 'release'])
def drug_list_sorter_goodaid(x):
if type(x) == type(None) or x == 'nan' or pd.isna(x):
return x
else:
lst = x.split(',')
lst.sort(reverse=True)
lst = lst[:1]
sorted_csv = ",".join(lst)
return sorted_csv
# In Goodaid only show single entry
union['goodaid-drug-id-list'] = union['goodaid-drug-id-list'].apply(drug_list_sorter_goodaid)
# If goodaid is present then in generic only show goodaid
conditions = [
union['goodaid-drug-id-list'].notna(),
union['goodaid-drug-id-list'].isna()
]
choices = [ union['goodaid-drug-id-list'], union['generic-drug-id-list-dummy']]
union['generic-drug-id-list'] = np.select(conditions, choices)
# If generic is present then we won't show ethical
conditions = [
union['generic-drug-id-list'].notna(),
union['generic-drug-id-list'].isna()
]
choices = [ None, union['ethical-drug-id-list']]
union['ethical-drug-id-list'] = np.select(conditions, choices)
union = union[['store-group-id', 'cluster-id', 'release', 'dosage-form',
'composition', 'ethical-drug-id-list', 'generic-drug-id-list']]
union['cluster-id'] = union['cluster-id'].replace(['-123'],np.nan)
# Sorting drug-list for join with prod table and updating value where there is change
def drug_list_sorter(x):
if type(x) == type(None) or x == 'nan' or pd.isna(x):
return x
else:
lst = x.split(',')
lst.sort(reverse=True)
lst = lst[:5]
sorted_csv = ",".join(lst)
return sorted_csv
union['ethical-drug-id-list'] = union['ethical-drug-id-list'].apply(drug_list_sorter)
union['generic-drug-id-list'] = union['generic-drug-id-list'].apply(drug_list_sorter)
logger.info('Calculated composition master, ready to make changes on prod table')
# =============================================================================
# writing to Prod
# =============================================================================
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
status2 = False
try:
truncate_query = '''
DELETE
FROM
{}.{} sgcm
WHERE
sgcm.`store-group-id` != 2
'''.format(schema2, table2)
mysql_write.engine.execute(truncate_query)
logger.info(str(table2) + ' Existing table truncated, except where store-group-id is 2')
if env == 'dev':
table2 = 'store-group-composition-master-data-'
elif env == 'stage':
table2 = 'store-group-composition-master-data-'
elif env=='prod':
table2 = 'store-group-composition-master'
else:
table2 = 'store-group-composition-master-data-'
union.to_sql(
name=table2, con=mysql_write.engine,
if_exists='append',
chunksize=500, method='multi', index=False)
logger.info(' ')
logger.info(str(table2) + ' table appended to MySQL')
status2 = True
except Exception as error:
logger.info(str(table2) + 'table load failed')
finally:
if status2 is True:
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}- Success : {table2} table updated",
mail_body=f"{table2} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
# Closing the DB Connection
rs_db.close_connection()
mysql_write.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-group-drug-price/store-group-composition-master.py | store-group-composition-master.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import os
import sys
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.utils.consumer.crm_campaigns import CrmCampaigns # Import custom functions
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-dw', '--db_write', default="yes", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
db_write = args.db_write
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
# Instantiate the CRM campaigns class
cc = CrmCampaigns()
'''
# Connections
rs_db = DB()
rs_db.open_connection()
'''
# Global variable
# Run date
# run_date = datetime.today().strftime('%Y-%m-%d')
# Timezone aware
run_date = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d")
# run_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
# Custom campaigns start
# Read data
# Can come from SQL query or from .csv read from an s3 folder
def campaign_mandatory_steps(data_pass, run_date, last_n_days_bill_param=15,
last_n_days_call_param=30, exclude_store=[0]):
# Mandatory steps
# Remove Last 15 days billed already
data_pass = cc.no_bill_in_last_n_days(data_pass, run_date, last_n_days_param=last_n_days_bill_param)
# Should not have been called in last 30-days thru calling dashboard
data_pass = cc.no_call_in_last_n_days(data_pass, run_date, last_n_days_param=last_n_days_call_param)
data_pass = data_pass[~data_pass['store_id'].isin(exclude_store)]
# Read DND list
data_pass = cc.remove_dnd(data_pass)
return data_pass
def data_prep_pune_custom_campaign():
"""
"""
# Pune custom campaign
store_group_id_param = 3 # Pune
trans_start_d_param = (pd.to_datetime(run_date) - timedelta(days=120)).strftime("%Y-%m-%d")
trans_end_d_param = (pd.to_datetime(run_date) - timedelta(days=30)).strftime("%Y-%m-%d")
abv_lower_cutoff_param = 200
abv_upper_cutoff_param = 2000
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = """
select
a."store-id",
a."patient-id",
date(b."last-bill-date") as "last-bill-date",
b."average-bill-value"
from
(
select
rm."store-id",
rm."patient-id"
from
"retention-master" rm
inner join "stores-master" sm
on rm."store-id" = sm."id"
where
rm."is-repeatable" = True
and sm."store-group-id" = {0}
and date(rm."bill-date") between '{1}' and '{2}'
group by
rm."store-id",
rm."patient-id") a
inner join "patients-metadata-2" b
on
a."patient-id" = b."id"
where
date(b."last-bill-date") between '{1}' and '{2}'
and b."average-bill-value" between {3} and {4}
order by
a."store-id" asc,
b."average-bill-value" desc
""".format(store_group_id_param, trans_start_d_param, trans_end_d_param,
abv_lower_cutoff_param, abv_upper_cutoff_param)
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id', 'last_bill_date', 'average_bill_value'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info("Unique data length {}".format(len(data_base_c_grp)))
return data_base_c_grp
def data_prep_fofo_custom_campaign():
trans_start_d_param = (pd.to_datetime(run_date) - timedelta(days=120)).strftime("%Y-%m-%d")
trans_end_d_param = (pd.to_datetime(run_date) - timedelta(days=15)).strftime("%Y-%m-%d")
abv_lower_cutoff_param = 200
abv_upper_cutoff_param = 2000
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = """
select
a."store-id",
a."patient-id",
date(b."last-bill-date") as "last-bill-date",
b."average-bill-value"
from
(
select
rm."store-id",
rm."patient-id"
from
"retention-master" rm
inner join "stores-master" sm
on rm."store-id" = sm."id"
where
rm."is-repeatable" = True
and sm."franchisee-id" != 1
and date(rm."bill-date") between '{0}' and '{1}'
group by
rm."store-id",
rm."patient-id") a
inner join "patients-metadata-2" b
on
a."patient-id" = b."id"
where
date(b."last-bill-date") between '{0}' and '{1}'
and b."average-bill-value" between {2} and {3}
order by
a."store-id" asc,
b."average-bill-value" desc
""".format(trans_start_d_param, trans_end_d_param,
abv_lower_cutoff_param, abv_upper_cutoff_param)
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id', 'last_bill_date', 'average_bill_value'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info("Unique data length {}".format(len(data_base_c_grp)))
return data_base_c_grp
def data_prep_refill_campaign():
# For refill date filtering
run_date_plus7 = (pd.to_datetime(run_date) + timedelta(days=7)).strftime("%Y-%m-%d")
# For active patient filtering
run_date_minus25 = (pd.to_datetime(run_date) - timedelta(days=25)).strftime("%Y-%m-%d")
run_date_minus60 = (pd.to_datetime(run_date) - timedelta(days=60)).strftime("%Y-%m-%d")
# For last calling date filtering
run_date_minus30 = (pd.to_datetime(run_date) - timedelta(days=30)).strftime("%Y-%m-%d")
logger.info(f"Run date is {run_date}")
##########################################
# Reill data
##########################################
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
refill_q = """
SELECT
a."patient-id",
a."store-id",
a."drug-id",
b."category",
a."refill-date"
FROM
"retention-refill" a
LEFT JOIN "drugs" b
on a."drug-id" = b."id"
WHERE
a."refill-date" between '{0}' and '{1}'
""".format(run_date, run_date_plus7)
logger.info(refill_q)
cc.rs_db.execute(refill_q, params=None)
data_base: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data_base is None:
data_base = pd.DataFrame(columns=['patient_id', 'store_id', 'drug_id', 'refill_date'])
data_base.columns = [c.replace('-', '_') for c in data_base.columns]
logger.info(len(data_base))
logger.info("Length of refill data base is {}".format(len(data_base)))
# Chronic only - current logic
data_base_c = data_base[data_base['category'] == 'chronic'].copy()
logger.info("After Filtering chronic - Length of data base is "
"{}".format(len(data_base_c)))
##########################################
# Atleast 2 chronic drugs
##########################################
data_base_c_grp = data_base_c.groupby(['store_id',
'patient_id'])['drug_id'].count().reset_index()
logger.info("After grouping - Length of data base is {}".format(len(data_base_c_grp)))
data_base_c_grp = data_base_c_grp[data_base_c_grp['drug_id'] >= 2]
logger.info("After atleast 2drugs criteria filter - "
"Length of data base is {}".format(len(data_base_c_grp)))
data_base_c_grp = data_base_c_grp[['store_id', 'patient_id']]
data_base_c_grp = data_base_c_grp.drop_duplicates(subset='patient_id')
logger.info("After dropping duplicates - Length of data base is "
"{}".format(len(data_base_c_grp)))
##########################################
# Active customers window (-60 to -25)
# Was -45 to -15 originally
##########################################
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
pm_q = """
SELECT
id as "patient-id",
"average-bill-value"
FROM
"patients-metadata-2"
WHERE
date("last-bill-date") between '{0}' and '{1}'
""".format(run_date_minus60, run_date_minus25)
logger.info(pm_q)
cc.rs_db.execute(pm_q, params=None)
data_pm: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data_pm is None:
data_pm = pd.DataFrame(columns=['patient_id', 'average_bill_value'])
data_pm.columns = [c.replace('-', '_') for c in data_pm.columns]
logger.info(len(data_pm))
logger.info("Length of Active patients (-60 to -25) metadata - "
"fetched is {}".format(len(data_pm)))
# Merge with active customers
data_base_c_grp = data_base_c_grp.merge(data_pm, how='inner', on='patient_id')
logger.info("After merging with patients metadata - "
"Length of data base is {}".format(len(data_base_c_grp)))
##########################################
# ABV Filter
##########################################
data_base_c_grp = data_base_c_grp[data_base_c_grp['average_bill_value'].between(250, 1500)]
logger.info("Length of data base after ABV filtering - "
"length is {}".format(len(data_base_c_grp)))
return data_base_c_grp
def data_prep_premium_cum_hd_campaign():
# Single data for 2 campaigns, separated only by HD vs non-HD
# For active patient filtering
run_date_minus91 = (pd.to_datetime(run_date) - timedelta(days=91)).strftime("%Y-%m-%d")
run_date_minus180 = (pd.to_datetime(run_date) - timedelta(days=180)).strftime("%Y-%m-%d")
logger.info(f"Run date is {run_date}")
#######################################
# SQL Logic
#######################################
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
list_q = """
select
pm."id" as patient_id
from
"patients-metadata-2" pm
inner join (
select
"patient-id"
from
"customer-value-segment"
where
"value-segment" in ('platinum', 'gold', 'silver')
group by
"patient-id"
) seg
on
pm."id" = seg."patient-id"
where
pm."is-repeatable" is True
and (current_date - date(pm."last-bill-date")) between 91 and 180
and pm."average-bill-value" >= 250
group by
pm."id"
"""
logger.info(list_q)
cc.rs_db.execute(list_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['patient_id'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(f"Data fetched with length: {len(data)}")
# Final list
logger.info(f"Unique patients list length is: {len(data)}")
##########################################
# Active customers window (-180 to -91)
##########################################
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
pm_q = """
SELECT
id as "patient-id",
"average-bill-value"
FROM
"patients-metadata-2"
WHERE
date("last-bill-date") between '{0}' and '{1}'
""".format(run_date_minus180, run_date_minus91)
logger.info(pm_q)
cc.rs_db.execute(pm_q, params=None)
data_pm: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data_pm is None:
data_pm = pd.DataFrame(columns=['patient_id', 'average_bill_value'])
data_pm.columns = [c.replace('-', '_') for c in data_pm.columns]
logger.info(len(data_pm))
logger.info("Length of Active patients (-180 to -91) metadata - "
"fetched is {}".format(len(data_pm)))
# Merge with active customers
data_base_c_grp = data.merge(data_pm, how='inner', on='patient_id')
logger.info("After merging with patients metadata - "
"Length of data base is {}".format(len(data_base_c_grp)))
##########################################
# Latest store-id
##########################################
data_store = cc.patient_latest_store(data_base_c_grp)
########################################
# Merge
########################################
data_base_c_grp = data_base_c_grp.merge(data_store, how='inner', on='patient_id')
logger.info(f"After merging with store-id data - Length of data base is {len(data_base_c_grp)}")
##########################################
# Non-HD vs HD filtering
##########################################
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
hd_q = """
SELECT
"patient-id"
FROM
"retention-master" a
WHERE
date("bill-date") between '{0}' and '{1}'
and "hd-flag" is True
GROUP BY
"patient-id"
""".format(run_date_minus180, run_date_minus91)
cc.rs_db.execute(hd_q, params=None)
data_hd: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data_hd is None:
data_hd = pd.DataFrame(columns=['patient_id'])
data_hd.columns = [c.replace('-', '_') for c in data_hd.columns]
data_hd['hd_flag'] = 1
logger.info("Length of HD active 180-91 is - {}".format(len(data_hd)))
# Merge with premium set
data_base_c_grp = data_base_c_grp.merge(data_hd, how='left', on='patient_id')
data_base_c_grp['hd_flag'] = data_base_c_grp['hd_flag'].fillna(0)
logger.info("After merging with hd-data, length is {}".format(len(data_base_c_grp)))
logger.info("HD filtered data length is {}".format(data_base_c_grp['hd_flag'].sum()))
return data_base_c_grp
def data_prep_pilot_store_margin():
"""
Pilot Project extra discount
"""
store_ids_mulund = [2, 4, 230, 244, 264] # Mulund Cluster
store_ids_thane = [89, 126, 122, 139, 144, 145, 233, 304] # Thane Cluster
store_ids = store_ids_mulund + store_ids_thane
lost_customer_cutoff_1 = 30
lost_customer_cutoff_2 = 120
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = f"""
select
T1."store-id",
T1."patient-id",
T1."last-bill-date",
T1."average-bill-value"
from
(
select
"previous-store-id" as "store-id",
id as "patient-id",
date("last-bill-date") as "last-bill-date",
"average-bill-value",
rank() over (partition by "previous-store-id"
order by
"average-bill-value" desc) as "rank"
from
"patients-metadata-2" pm
where
"previous-store-id" in {tuple(store_ids)}
and datediff('days',
"last-bill-date",
current_date) >= {lost_customer_cutoff_1}
and datediff('days',
"last-bill-date",
current_date) <= {lost_customer_cutoff_2}) T1
where
T1."rank" <= 100;
"""
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id', 'last_bill_date', 'average_bill_value'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info("Unique data length {}".format(len(data_base_c_grp)))
return data_base_c_grp
"""""
def data_prep_diagnostic_calling():
# Calling for diagnostic
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q =
select
pm.id as "patient-id",
149 as "store-id",
1 as "priority"
from
"patients-metadata-2" pm
left join
(
select
distinct "patient-id"
from
redemption r
where
status in ('REDEMPTION', 'COMPLETED')
and datediff('days',
"redemption-date",
current_date)<= 30) T1 on
pm.id = T1."patient-id"
where
pm."is-chronic" = true
and pm."primary-disease" in ('anti-diabetic', 'cardiac', 'blood-related', 'vitamins-minerals-nutrients')
and pm."value-segment-anytime" in ('gold', 'silver', 'platinum')
and pm."behaviour-segment-anytime" not in ('singletripper', 'newcomer_singletripper', 'newcomer-singletripper', 'other_type')
and DATEDIFF('day', pm."last-bill-date", current_date)<= 90
and T1."patient-id" is null
order by
pm."last-bill-date"
limit 360;
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info("Unique data length {}".format(len(data_base_c_grp)))
return data_base_c_grp
"""""
def data_prep_churn_calling():
"""
Calling for inactive customer FOFO
"""
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = """select
t1."store-id",
t1."patient-id","average-bill-value"
from
(
select
pm."primary-store-id" as "store-id",
cc."patient-id" as "patient-id",pm."average-bill-value",
dense_rank() over (partition by pm."primary-store-id"
order by
cc."churn-prob" desc,
pm."last-bill-date" desc) as "rank"
from
"consumer-churn" cc
left join "patients-metadata-2" pm on
cc."patient-id" = pm.id
left join "stores-master" sm on
pm."primary-store-id" = sm.id
where
cc."created-at" = (select max("created-at") from "consumer-churn" cc)
and sm."franchisee-id" != 1) t1
where
"rank" <= 50;
"""
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info(f"Unique data length: {len(data_base_c_grp)}")
return data_base_c_grp
##############################################################
# Devansh Medical
#############################################################
def data_prep_churn_calling_devansh():
"""
Calling for inactive customer Devansh
"""
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = """select
"primary-store-id" as "store-id",
id as "patient-id" ,
"average-bill-value"
from
"prod2-generico"."patients-metadata-2" pm
where
pm."primary-store-id" = '331'
and "last-bill-date" <= CURRENT_DATE -60
order by
"average-bill-value" desc
"""
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info(f"Unique data length: {len(data_base_c_grp)}")
return data_base_c_grp
###################################################################################
# Loyal Chronic Pilot by Manish Ahire
###################################################################################
def data_prep_pilot_loyal_chronic():
"""
Pilot by Manish Ahire
"""
store_ids_mumbai = [144,223,233,242,257,4] # Mumbai
store_ids_pune = [246,266] # pune
store_ids = store_ids_mumbai + store_ids_pune
lost_customer_cutoff_1 = 90
lost_customer_cutoff_2 = 180
abv_lower_cutoff_param = 400
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = f"""
select
T1."store-id",
T1."patient-id",
T1."last-bill-date",
T1."average-bill-value"
from
(
select
pm."previous-store-id" as "store-id",
pm.id as "patient-id",
date(pm."last-bill-date") as "last-bill-date",
"average-bill-value",
rank() over (partition by pm."previous-store-id"
order by
pm."average-bill-value" desc) as "rank"
from
"prod2-generico"."patients-metadata-2" pm
inner join "prod2-generico"."retention-master" rm
on pm.id= rm."patient-id"
and rm.id = pm."last-bill-id" and rm."loyal-customer-flag" =1
where
pm."previous-store-id" in {tuple(store_ids)}
and datediff('days',
pm."last-bill-date",
current_date) >= {lost_customer_cutoff_1}
and pm."average-bill-value" >= {abv_lower_cutoff_param}
and pm."is-chronic"=1
and datediff('days',
"last-bill-date",
current_date) <= {lost_customer_cutoff_2}
) T1
where
T1."rank" <= 500;
"""
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id', 'last_bill_date', 'average_bill_value'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info("Unique data length {}".format(len(data_base_c_grp)))
return data_base_c_grp
###################################################################################
# Mix high value by Manish Ahire
###################################################################################
def data_prep_mix_high_value():
"""
Pilot by Manish Ahire
"""
store_ids_mumbai = [144,223,233,242,257,4] # Mumbai
store_ids_pune = [246,266] # pune
store_ids = store_ids_mumbai + store_ids_pune
lost_customer_cutoff_1 = 90
lost_customer_cutoff_2 = 120
abv_lower_cutoff_param = 400
read_schema = 'prod2-generico'
cc.rs_db.execute(f"set search_path to '{read_schema}'", params=None)
data_q = f"""
select
T1."store-id",
T1."patient-id",
T1."last-bill-date",
T1."average-bill-value"
from
(
select
pm."previous-store-id" as "store-id",
pm.id as "patient-id",
date(pm."last-bill-date") as "last-bill-date",
"average-bill-value",
rank() over (partition by pm."previous-store-id"
order by
pm."average-bill-value" desc) as "rank"
from
"prod2-generico"."patients-metadata-2" pm
inner join "prod2-generico"."retention-master" rm
on pm.id= rm."patient-id"
and rm.id = pm."last-bill-id" and rm."total-spend" >=400
where
pm."previous-store-id" in {tuple(store_ids)}
and datediff('days',
pm."last-bill-date",
current_date) >= {lost_customer_cutoff_1}
and datediff('days',
"last-bill-date",
current_date) <= {lost_customer_cutoff_2}
) T1
where
T1."rank" <= 100;
"""
logger.info(data_q)
cc.rs_db.execute(data_q, params=None)
data: pd.DataFrame = cc.rs_db.cursor.fetch_dataframe()
if data is None:
data = pd.DataFrame(columns=['store_id', 'patient_id', 'last_bill_date', 'average_bill_value'])
data.columns = [c.replace('-', '_') for c in data.columns]
logger.info(len(data))
data_base_c_grp = data.drop_duplicates(subset='patient_id')
logger.info("Unique data length {}".format(len(data_base_c_grp)))
return data_base_c_grp
#########################################################################
# Main execution starts:
#########################################################################
################################################
# Pune custom campaign
################################################
logger.info("Pune custom campaign starts")
data = data_prep_pune_custom_campaign()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=30, exclude_store=[331])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 32, 'Inactive customer', store_daily_limit_param=5,
default_sort_needed=True)
################################################
# FOFO custom campaign
################################################
logger.info("fofo custom campaign starts")
data = data_prep_fofo_custom_campaign()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=15, exclude_store=[331])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 2, 'FOFO Churn calling', store_daily_limit_param=5,
default_sort_needed=True)
################################################
# Thane & Mulund cluster margin hit pilot
################################################
logger.info("Thane and Mulund cluster discount pilot")
data = data_prep_pilot_store_margin()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=30, exclude_store=[331])
# Split Thane and Mulund data
data_mulund = data[data['store_id'].isin([2, 4, 230, 244, 264])].copy()
# DB Write
if db_write == 'yes':
cc.db_write(data_mulund, run_date, 39, '20% discount pilot on Mulund Cluster', store_daily_limit_param=10,
default_sort_needed=True)
data_thane = data[data['store_id'].isin([89, 126, 122, 139, 144, 145, 233, 304])].copy()
# DB Write
if db_write == 'yes':
cc.db_write(data_thane, run_date, 39, '18% discount pilot on Thane cluster', store_daily_limit_param=10,
default_sort_needed=True)
################################################
# Refill campaign
################################################
logger.info("Refill campaign starts")
data = data_prep_refill_campaign()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=24,
last_n_days_call_param=30, exclude_store=[331])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 21, 'Medicine Refill estimated', store_daily_limit_param=5,
default_sort_needed=True)
################################################
# Premium non-HD and HD campaign
################################################
logger.info("Premium non-hd and hd campaign starts")
data = data_prep_premium_cum_hd_campaign()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=30, exclude_store=[331])
# Split non-hd and hd data
data_nonhd = data[data['hd_flag'] != 1].copy()
# DB Write
if db_write == 'yes':
cc.db_write(data_nonhd, run_date, 26, 'Lost customer', store_daily_limit_param=10,
default_sort_needed=True)
data_hd = data[data['hd_flag'] == 1].copy()
# DB Write
if db_write == 'yes':
cc.db_write(data_hd, run_date, 28, 'Lost customer', store_daily_limit_param=5,
default_sort_needed=True)
"""""
################################################
# Diagnostic Calling
################################################
logger.info("Diagnostic Calling")
data = data_prep_diagnostic_calling()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=0,
last_n_days_call_param=30, exclude_store=[331])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 40, 'Diagnostic calling', store_daily_limit_param=180,
default_sort_needed=False)
"""""
################################################
# FOFO Churn Calling
################################################
logger.info("FOFO Churn Calling")
data = data_prep_churn_calling()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=30, exclude_store=[0])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 2, 'FOFO Churn calling', store_daily_limit_param=15,
default_sort_needed=True)
################################################
# Devansh Churn Calling
################################################
logger.info("Devansh Lost Customer Calling")
data = data_prep_churn_calling_devansh()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=30, exclude_store=[0])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 41, 'Devansh Lost Customer', store_daily_limit_param=20,
default_sort_needed=True)
################################################
# Loyal Chronic Pilot by Manish Ahire
################################################
logger.info("Loyal Chronic Pilot by Manish Ahire")
data = data_prep_pilot_loyal_chronic()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=60, exclude_store=[0])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 45, 'Loyal Chronic Pilot', store_daily_limit_param=30,
default_sort_needed=True)
################################################
# Mix High Value Pilot by Manish Ahire
################################################
logger.info("Mix High value by Manish Ahire")
data = data_prep_mix_high_value()
# Mandatory steps
data = campaign_mandatory_steps(data, run_date, last_n_days_bill_param=15,
last_n_days_call_param=45, exclude_store=[0])
# DB Write
if db_write == 'yes':
cc.db_write(data, run_date, 46, 'Mix High Value Lost Pilot', store_daily_limit_param=30,
default_sort_needed=True)
#################################################
# Closing the DB Connections
cc.close_connections()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/crm-campaigns/crm-campaigns.py | crm-campaigns.py |
import argparse
import datetime
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import numpy as np
import pandas as pd
# from memory_profiler import profile
status = {
"updated": "updated",
"pending": "pending",
"updating": "updating",
}
patients_metadata_table = "patients-metadata-2"
bill_metadata_table = "bills-1-metadata"
schema = "prod2-generico"
def insert_new_patients(db):
limit_str = f" limit {limit}; " if limit else ""
query = f'''
insert
into
"{schema}"."{patients_metadata_table}" (id,
"created-at",
"updated-at",
"created-by",
"etl-status"
)
select
p.id,
p."created-at",
convert_timezone('Asia/Calcutta', GETDATE()),
'etl-job',
'{status['pending']}'
from
"{schema}"."patients" p
inner join (
select
"patient-id"
from
"{schema}"."{bill_metadata_table}" bm
group by
"patient-id"
) bm1 on
bm1."patient-id" = p.id
left join "{schema}"."{patients_metadata_table}" pm on
pm.id = p.id
where
pm.id is null
-- and date(p."updated-at") between '2021-06-01' and '2021-11-30'
and (pm."etl-status" != '{status['updated']}'
or pm."etl-status" is null)
{limit_str}
'''
db.execute(query, params=None)
def mark_old_patients_pending(db):
# mark old patients etl-status pending if they have transacted
query = f"""
update
"{schema}"."{patients_metadata_table}" pm2
set
"etl-status" = '{status['pending']}',
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE())
from
(
select
pm.id
from
"{schema}"."{patients_metadata_table}" pm
inner join
"{schema}"."{bill_metadata_table}" bm on
pm.id = bm."patient-id"
where
pm."updated-at" < bm."updated-at" ) ab
where
pm2.id = ab.id;
"""
db.execute(query, params=None)
""" Sometimes jobs fails and updating count keeps increasing and we always get memory error, so to fix this mark
all updating status to pending """
query = f"""
update
"{schema}"."{patients_metadata_table}"
set
"etl-status" = 'pending'
where
"etl-status" = 'updating'
"""
db.execute(query, params=None)
def mark_pending_patients_updating(db):
limit_str = f"limit {batch_size} " if batch_size else ""
query = f"""
update
"{schema}"."{patients_metadata_table}" pm2
set
"etl-status" = '{status['updating']}'
from
(
select
pm.id
from
"{schema}"."{patients_metadata_table}" pm
where
"etl-status" = '{status['pending']}'
{limit_str}
) ab
where
pm2.id = ab.id;
"""
db.execute(query, params=None)
def get_to_be_updated_patients(db):
# ## Considering only changed patients
query = f'''
select
id,
"created-at",
"updated-at",
"created-by",
"etl-status"
from
"{schema}"."{patients_metadata_table}" pm
where
"etl-status" = '{status['updating']}'
'''
db.execute(query, params=None)
_changed_patients: pd.DataFrame = db.cursor.fetch_dataframe()
return _changed_patients
def update_bill_agg_fields(db):
query = f"""
update
"{schema}"."{patients_metadata_table}" t
set
-- "first-bill-date" = s."first-bill-date",
"last-bill-date" = s."last-bill-date",
-- "first-bill-id" = s."first-bill-id",
"last-bill-id" = s."last-bill-id",
"average-bill-value" = s."average-bill-value",
"total-quantity" = s."total-quantity",
"quantity-generic" = s."quantity-generic",
"quantity-chronic" = s."quantity-chronic",
"quantity-ethical" = s."quantity-ethical",
"quantity-rx" = s."quantity-rx",
"quantity-repeatable" = s."quantity-repeatable",
"quantity-goodaid" = s."quantity-goodaid",
"quantity-others-type" = s."quantity-others-type",
"number-of-bills" = s."number-of-bills",
"hd-bills" = s."hd-bills",
"is-repeatable" = s."is-repeatable",
"is-generic" = s."is-generic",
"is-chronic" = s."is-chronic",
"is-goodaid" = s."is-goodaid",
"is-ethical" = s."is-ethical",
"is-rx" = s."is-rx",
"is-others-type" = s."is-others-type",
"hd-flag" = s."hd-flag",
"ecom-flag" = s."ecom-flag",
"crm-flag" = s."crm-flag",
"pr-flag" = s."pr-flag",
"total-spend" = s."total-spend",
"spend-generic" = s."spend-generic",
"promo-min-bill-date" = s."promo-min-bill-date",
"hd-min-bill-date" = s."hd-min-bill-date",
"ecom-min-bill-date" = s."ecom-min-bill-date",
"pr-min-bill-date" = s."pr-min-bill-date",
"generic-min-bill-date" = s."generic-min-bill-date",
"goodaid-min-bill-date" = s."goodaid-min-bill-date",
"ethical-min-bill-date" = s."ethical-min-bill-date",
"chronic-min-bill-date" = s."chronic-min-bill-date",
"repeatable-min-bill-date" = s."repeatable-min-bill-date",
"others-type-min-bill-date" = s."others-type-min-bill-date",
"digital-payment-min-bill-date" = s."digital-payment-min-bill-date",
"rx-min-bill-date" = s."rx-min-bill-date",
"digital-payment-flag" = s."digital-payment-flag",
"total-mrp-value" = s."total-mrp-value",
"recency-customer-days" = s."recency-customer-days",
"system-age-days" = s."system-age-days",
"quantity-generic-pc" = s."quantity-generic-pc",
"quantity-chronic-pc" = s."quantity-chronic-pc",
"quantity-ethical-pc" = s."quantity-ethical-pc",
"quantity-repeatable-pc" = s."quantity-repeatable-pc",
"quantity-goodaid-pc" = s."quantity-goodaid-pc",
"quantity-others-type-pc" = s."quantity-others-type-pc",
"spend-generic-pc" = s."spend-generic-pc"
from
(
select
pm.id ,
-- min(bm."created-at") as "first-bill-date",
max(bm."created-at") as "last-bill-date",
-- min(bm.id) as "first-bill-id",
max(bm.id) as "last-bill-id",
round(sum(bm."total-spend")/ count(distinct bm.id), 4) as "average-bill-value",
sum(bm."total-quantity") as "total-quantity",
sum(bm."quantity-generic") as "quantity-generic",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 * sum(bm."quantity-generic")/
sum(bm."total-quantity"), 4) end as "quantity-generic-pc",
sum(bm."quantity-chronic") as "quantity-chronic",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 * sum(bm."quantity-chronic")/
sum(bm."total-quantity"), 4) end as "quantity-chronic-pc",
sum(bm."quantity-ethical") as "quantity-ethical",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 * sum(bm."quantity-ethical")/
sum(bm."total-quantity"), 4) end as "quantity-ethical-pc",
sum(bm."quantity-repeatable") as "quantity-repeatable",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 * sum(bm."quantity-repeatable")
/ sum(bm."total-quantity"), 4) end as "quantity-repeatable-pc",
sum(bm."quantity-goodaid") as "quantity-goodaid",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 * sum(bm."quantity-goodaid")/
sum(bm."total-quantity"), 4) end as "quantity-goodaid-pc",
sum(bm."quantity-others-type") as "quantity-others-type",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 *
sum(bm."quantity-others-type")/ sum(bm."total-quantity"), 4) end as "quantity-others-type-pc",
sum(bm."quantity-generic" + bm."quantity-ethical") as "quantity-rx",
case when sum(bm."total-quantity") in (0, null) then -1 else round(100.0 * sum(bm."quantity-generic" +
bm."quantity-ethical")/ sum(bm."total-quantity"), 4) end as "quantity-rx-pc",
count(distinct bm.id) as "number-of-bills",
count(distinct (case when bm."hd-flag" is true then bm.id else null end)) as "hd-bills",
case when count(distinct bm.id) in (0, null) then -1 else round(100.0 * count(distinct (case when
bm."hd-flag" is true then bm.id else null end))/ count(distinct bm.id), 4) end as "hd-bills-pc",
bool_or(bm."is-repeatable") as "is-repeatable",
bool_or(bm."is-generic") as "is-generic",
bool_or(bm."is-chronic") as "is-chronic",
bool_or(bm."is-goodaid") as "is-goodaid",
bool_or(bm."is-ethical") as "is-ethical",
bool_or(bm."is-rx") as "is-rx",
bool_or(bm."is-others-type") as "is-others-type",
bool_or(bm."hd-flag") as "hd-flag",
bool_or(bm."ecom-flag") as "ecom-flag",
bool_or(bm."crm-flag") as "crm-flag",
bool_or(bm."pr-flag") as "pr-flag",
bool_or(bm."digital-payment-flag") as "digital-payment-flag",
sum(bm."total-spend") as "total-spend",
sum(bm."spend-generic") as "spend-generic",
case when sum(bm."total-spend") in (0, null) then -1 else round(100.0 * sum(bm."spend-generic")/
sum(bm."total-spend")) end as "spend-generic-pc",
min(case when bm."promo-code-id" is not null then bm."created-at" else null end) as
"promo-min-bill-date",
min(case when bm."hd-flag" is true then bm."created-at" else null end) as "hd-min-bill-date",
min(case when bm."ecom-flag" is true then bm."created-at" else null end) as "ecom-min-bill-date",
min(case when bm."crm-flag" is true then bm."created-at" else null end) as "crm-min-bill-date",
min(case when bm."pr-flag" is true then bm."created-at" else null end) as "pr-min-bill-date",
min(case when bm."is-generic" is true then bm."created-at" else null end) as "generic-min-bill-date",
min(case when bm."is-goodaid" is true then bm."created-at" else null end) as "goodaid-min-bill-date",
min(case when bm."is-ethical" is true then bm."created-at" else null end) as "ethical-min-bill-date",
min(case when bm."is-chronic" is true then bm."created-at" else null end) as "chronic-min-bill-date",
min(case when bm."is-repeatable" is true then bm."created-at" else null end) as
"repeatable-min-bill-date",
min(case when bm."is-others-type" is true then bm."created-at" else null end) as
"others-type-min-bill-date",
min(case when bm."digital-payment-flag" is true then bm."created-at" else null end) as
"digital-payment-min-bill-date",
min(case when bm."is-rx" is true then bm."created-at" else null end) as "rx-min-bill-date",
sum(bm."total-mrp-value") as "total-mrp-value",
case
when max(bm."created-at") = '0101-01-01' then null
else datediff(day,
max(bm."created-at"),
current_date)
end as "recency-customer-days",
case
when min(bm."created-at") = '0101-01-01' then null
else datediff(day,
min(bm."created-at"),
current_date)
end as "system-age-days"
from
"{schema}"."{patients_metadata_table}" pm
inner join "{schema}"."{bill_metadata_table}" bm on
pm.id = bm."patient-id"
where
pm."etl-status" = '{status['updating']}'
group by
pm.id
) s
where
t.id = s.id;
"""
db.execute(query, params=None)
def update_diagnostic_customer(db):
query = f"""
update
"{schema}"."{patients_metadata_table}" t
set
"is-diagnostic-customer" = 1
from
(
select
r."patient-id"
from
"{schema}"."{patients_metadata_table}" pm
inner join "{schema}"."redemption" r on
pm.id = r."patient-id"
where
r.status in ('REDEMPTION', 'COMPLETED')
and
pm."etl-status" = '{status['updating']}'
and
(pm."is-diagnostic-customer" != 1 or pm."is-diagnostic-customer" is NULL)
group by
r."patient-id"
) s
where
t.id = s."patient-id";
"""
db.execute(query, params=None)
def get_customer_feedback(db):
# get customer feedback data (nps)
query = f"""
select
p.id,
f.rating,
f.suggestion,
f."store-id",
s."name" as "store-name",
f."created-at"
from
"{schema}"."{patients_metadata_table}" pm
inner join "{schema}".patients p on
p.id = pm.id
inner join "{schema}".feedback f on
f.phone = p.phone
inner join "{schema}".stores s on
f."store-id" = s."id"
where pm."etl-status" = '{status['updating']}'
"""
db.execute(query, params=None)
nps: pd.DataFrame = db.cursor.fetch_dataframe()
if not isinstance(nps, type(None)) and len(nps):
nps.head(2)
nps['created-at'] = pd.to_datetime(nps['created-at'])
nps['nps-rating-date'] = nps['created-at'].dt.strftime("%Y-%m-%d")
nps['is-nps'] = True
nps = nps.sort_values(by=['id', 'created-at'], ascending=[True, False])
# Keep only latest entry
nps['rank'] = nps.groupby(['id']).cumcount() + 1
nps = nps[nps['rank'] == 1]
nps.drop('rank', axis='columns', inplace=True)
nps = nps.rename(
columns={
'rating': 'latest-nps-rating',
'suggestion': 'latest-nps-rating-comment',
'nps-rating-date': 'latest-nps-rating-date',
'store-id': 'latest-nps-rating-store-id',
'store-name': 'latest-nps-rating-store-name'
}
)
else:
nps = pd.DataFrame(
columns=['id', 'created-at', 'nps-rating-date', 'is-nps', 'latest-nps-rating',
'latest-nps-rating-comment',
'latest-nps-rating-date', 'latest-nps-rating-store-id',
'latest-nps-rating-store-name'])
return nps
def get_referral_count(db):
# Referral count
query = f"""
select
a."patient-id" as id,
SUM(b."total-used") as "referred-count"
from
"{schema}"."{patients_metadata_table}" pm
left join
"{schema}"."patients-promo-codes" a on
pm.id = a."patient-id"
left join "{schema}"."promo-codes" b on
a."promo-code-id" = b."id"
where
b."code-type" = 'referral'
and pm."etl-status" = '{status['updating']}'
group by
a."patient-id"
"""
db.execute(query=query)
_referral: pd.DataFrame = db.cursor.fetch_dataframe()
return _referral
def get_patient_bills(db):
# ## Primary Store, System Age Days and Recency Customer Days
query = f"""
select
pm.id,
bm."store-id",
bm.id as "bill-id",
bm."bill-year",
bm."bill-month",
bm."bill-date",
bm."created-at",
bm."total-spend"
from
"{schema}"."{patients_metadata_table}" pm
inner join
"{schema}"."{bill_metadata_table}" bm on
pm.id = bm."patient-id"
where
pm."etl-status" = '{status['updating']}'
"""
db.execute(query=query)
_patient_bills: pd.DataFrame = db.cursor.fetch_dataframe()
return _patient_bills
def get_patient_drugs(db):
# ## Number of drug and primary disease
query = f"""
select
b."patient-id" ,
b.id as "bill-id",
bi."inventory-id",
i."drug-id"
from
"{schema}"."{patients_metadata_table}" pm
inner join "{schema}"."bills-1" b on
pm.id = b."patient-id"
inner join "{schema}"."bill-items-1" bi on
b.id = bi."bill-id"
inner join "{schema}"."inventory-1" i on
bi."inventory-id" = i.id
inner join "{schema}".drugs d on
i."drug-id" = d.id
where
pm."etl-status" = '{status['updating']}';
"""
db.execute(query=query)
_patient_drugs: pd.DataFrame = db.cursor.fetch_dataframe()
return _patient_drugs
def get_drug_subgroup(db):
# primary disease calculation
query = f"""
select
a."id" as "drug-id",
c."subgroup"
from
"{schema}".drugs a
inner join "{schema}"."composition-master-molecules-master-mapping" b on
a."composition-master-id" = b."composition-master-id"
inner join "{schema}"."molecule-master" c on
b."molecule-master-id" = c."id"
group by
a."id",
c."subgroup"
"""
db.execute(query=query)
_drug_subgroup: pd.DataFrame = db.cursor.fetch_dataframe()
return _drug_subgroup
def get_referral_code(db):
# Get referral code for patient
query = f"""
SELECT
"patient-id" as "id",
"promo-code" as "referral-code"
FROM
(
SELECT
ppc."patient-id",
row_number() over (partition by ppc."patient-id" order by ppc."id" ASC) as rank_entry,
pc."promo-code"
FROM
"{schema}"."patients-promo-codes" ppc
INNER JOIN
"{schema}"."promo-codes" pc
ON
ppc."promo-code-id" = pc."id"
WHERE
pc."code-type" = 'referral') sub
left join
"{schema}"."{patients_metadata_table}" pm
on
pm.id = sub."patient-id"
where
sub.rank_entry = 1
and
pm."etl-status" = '{status['updating']}'"""
db.execute(query=query)
referral_code: pd.DataFrame = db.cursor.fetch_dataframe()
return referral_code
def get_value_segment_anytime(db):
q1 = f"""
select
value."patient-id" as "patient-id",
value."value-segment" as "value-segment"
from
"{schema}"."customer-value-segment" value
left join
"{schema}"."{patients_metadata_table}" pm
on
pm.id = value."patient-id"
where
pm."etl-status" = '{status['updating']}'
group by
value."patient-id",
value."value-segment"
"""
data_vs = db.get_df(q1)
data_vs.columns = [c.replace('-', '_') for c in data_vs.columns]
data_vs['value_segment_rank'] = data_vs['value_segment'].map(
{'platinum': 3, 'gold': 2, 'silver': 1, 'others': 0})
data_vs = data_vs.sort_values(by=['patient_id', 'value_segment_rank'], ascending=[True, False])
data_vs['rank'] = data_vs.groupby(['patient_id']).cumcount() + 1
data_vs_r1 = data_vs[data_vs['rank'] == 1]
data_vs_f = data_vs_r1[['patient_id', 'value_segment']]
data_vs_f.columns = ['id', 'value-segment-anytime']
return data_vs_f
def get_behaviour_segment_anytime(db):
q1 = f"""
select
behaviour."patient-id" as "patient-id",
behaviour."behaviour-segment" as "behaviour-segment"
from
"{schema}"."customer-behaviour-segment" behaviour
left join
"{schema}"."{patients_metadata_table}" pm
on
pm.id = behaviour."patient-id"
where
pm."etl-status" = '{status['updating']}'
group by
behaviour."patient-id",
behaviour."behaviour-segment"
"""
data_bs = db.get_df(q1)
data_bs.columns = [c.replace('-', '_') for c in data_bs.columns]
data_bs['behaviour_segment_rank'] = data_bs['behaviour_segment'].map({'super': 7,
'regular': 6,
'generic_heavy': 5,
'ethical_heavy': 4,
'other_type': 3,
'singletripper': 2,
'newcomer_repeat': 1,
'newcomer_singletripper': 0
})
data_bs = data_bs.sort_values(by=['patient_id', 'behaviour_segment_rank'],
ascending=[True, False])
data_bs['rank'] = data_bs.groupby(['patient_id']).cumcount() + 1
data_bs_r1 = data_bs[data_bs['rank'] == 1]
data_bs_f = data_bs_r1[['patient_id', 'behaviour_segment']]
data_bs_f.columns = ['id', 'behaviour-segment-anytime']
return data_bs_f
def update_data_in_patients_metadata_table(db, s3, patient_data):
# ## Create temp table and update (nps and other) from that
patient_temp_table = patients_metadata_table.replace("-", "_") + "_temp"
db.execute(query=f"DROP table IF EXISTS {patient_temp_table};")
query = f"""
CREATE TEMP TABLE {patient_temp_table}
(
id INTEGER ENCODE az64
,"is-nps" bool
,"latest-nps-rating" INTEGER ENCODE az64
,"latest-nps-rating-comment" VARCHAR(1500) ENCODE lzo
,"latest-nps-rating-store-id" INTEGER ENCODE az64
,"latest-nps-rating-store-name" VARCHAR(765) ENCODE lzo
,"latest-nps-rating-date" date ENCODE az64
,"referred-count" int ENCODE az64
,"primary-store-id" INTEGER ENCODE az64
,"num-drugs" INTEGER ENCODE az64
,"primary-disease" VARCHAR(100) ENCODE lzo
,"avg-purchase-interval" numeric
,"std-purchase-interval" numeric
-- ,"value-segment-calculation-date" date
-- ,"value-segment" VARCHAR(50)
,"previous-bill-date" DATE ENCODE az64
,"previous-store-id" INTEGER ENCODE az64
-- ,"min-bill-date-in-month" DATE ENCODE az64
-- ,"store-id-month" INTEGER ENCODE az64
,"referral-code" VARCHAR(765) ENCODE lzo
,"value-segment-anytime" varchar(255) ENCODE lzo
,"behaviour-segment-anytime" varchar(255) ENCODE lzo
,"first-bill-date" timestamp ENCODE az64
,"first-bill-id" INTEGER ENCODE az64
,PRIMARY KEY (id)
);
"""
db.execute(query=query)
patient_temp_table_info = helper.get_table_info(db=db, table_name=patient_temp_table,
schema=None)
# ### Fixing the data types
patient_data['latest-nps-rating'] = patient_data['latest-nps-rating'].fillna(-1).astype('int64')
patient_data['latest-nps-rating-store-id'] = patient_data['latest-nps-rating-store-id'].fillna(
-1).astype('int64')
patient_data['referred-count'] = patient_data['referred-count'].fillna(-1).astype('int64')
patient_data['num-drugs'] = patient_data['num-drugs'].fillna(-1).astype('int64')
patient_data['previous-store-id'] = patient_data['previous-store-id'].fillna(-1).astype('int64')
s3.write_df_to_db(
df=patient_data[list(dict.fromkeys(patient_temp_table_info['column_name']))],
db=db, table_name=patient_temp_table, schema=None
)
# ## Updating the data in patient-metadata-2 table
query = f"""
update
"{schema}"."{patients_metadata_table}" t
set
"is-nps" = s."is-nps",
"latest-nps-rating" = s."latest-nps-rating",
"latest-nps-rating-comment" = s."latest-nps-rating-comment",
"latest-nps-rating-store-id" = s."latest-nps-rating-store-id",
"latest-nps-rating-store-name" = s."latest-nps-rating-store-name",
"latest-nps-rating-date" = s."latest-nps-rating-date",
"referred-count" = s."referred-count",
"primary-store-id" = s."primary-store-id",
"num-drugs" = s."num-drugs",
"primary-disease" = s."primary-disease",
"avg-purchase-interval" = s."avg-purchase-interval",
"std-purchase-interval" = s."std-purchase-interval",
-- "value-segment-calculation-date" = s."value-segment-calculation-date",
-- "value-segment" = s."value-segment",
"previous-bill-date" = s."previous-bill-date",
"previous-store-id" = s."previous-store-id",
-- "min-bill-date-in-month" = s."min-bill-date-in-month",
-- "store-id-month" = s."store-id-month",
"referral-code" = s."referral-code",
"value-segment-anytime" = s."value-segment-anytime",
"behaviour-segment-anytime" = s."behaviour-segment-anytime",
"first-bill-date" = s."first-bill-date",
"first-bill-id" = s."first-bill-id",
"etl-status" = 'updated'
from
{patient_temp_table} s
where
t.id = s.id;
"""
db.execute(query=query)
# @profile
def process_batch(changed_patients, db, s3, pg_db):
""" updating some fields directly from bills-1-metadata table """
update_bill_agg_fields(db=db)
update_diagnostic_customer(db=db)
nps = get_customer_feedback(db=db)
referral = get_referral_count(db=db)
patient_bills = get_patient_bills(db=db)
referral_code = get_referral_code(db=db)
value_segment_anytime = get_value_segment_anytime(db=db)
behaviour_segment_anytime = get_behaviour_segment_anytime(db=db)
# purchase interval calculation
patient_bills_2 = patient_bills.sort_values(
by=["id", "created-at"]) # soring on patient id and bill-created-at
patient_bills_2['bill-date'] = patient_bills_2['bill-date'].apply(
lambda x: x.strftime("%Y-%m-%d"))
# Fetch previous bill date, against every bill
patient_bills_2['previous-bill-date'] = patient_bills_2.groupby("id")['bill-date'].shift(1)
patient_bills_2['purchase-interval'] = (
pd.to_datetime(patient_bills_2['bill-date']) - pd.to_datetime(
patient_bills_2['previous-bill-date'])).dt.days
patient_bills_avg_std = patient_bills_2.groupby(['id']).agg(
{'purchase-interval': ['mean', 'std']})
patient_bills_avg_std = patient_bills_avg_std.reset_index(col_level=1)
patient_bills_avg_std.columns = patient_bills_avg_std.columns.droplevel(0)
patient_bills_avg_std.columns = ['id', 'avg-purchase-interval', 'std-purchase-interval']
# ### Primary Store
# Patient store wise, NOB and Total spend
patient_store_agg = patient_bills.groupby(
['id', 'store-id']).agg({'bill-id': 'nunique', 'total-spend': 'sum'}).reset_index()
patient_store_agg = patient_store_agg.rename(
columns={'bill-id': 'store-bills', 'total-spend': 'store-spend'})
patient_store_agg['rank'] = patient_store_agg.sort_values(
['store-bills', 'store-spend'], ascending=[False, False]).groupby(['id']).cumcount() + 1
# Shortlist 1 store per patient
patient_primary_store = patient_store_agg[patient_store_agg['rank'] == 1]
patient_primary_store = patient_primary_store.rename(columns={'store-id': 'primary-store-id'})
# ### Previous Bill Date and Store id
previous_store_bill = patient_bills_2.sort_values(
by=["id", "created-at"], ascending=[True, False]).drop_duplicates(subset='id')
previous_store_bill = previous_store_bill.rename(columns={'store-id': 'previous-store-id'})
# First bill date and first bill id calculation
patient_first_bill = patient_bills_2.groupby('id').head(1)[['id', "bill-id", "created-at"]]
patient_first_bill.rename(columns={"bill-id": "first-bill-id", "created-at": "first-bill-date"},
inplace=True)
# # last bill date and last bill id calculation
# patient_last_bill = patient_bills_2.groupby('id').tail(1)[['id', "bill-id", "created-at"]]
# patient_last_bill.rename(columns={"bill-id": "last-bill-id", "created-at": "last-bill-date"}, inplace=True)
#
# Number of drug calculation
patient_drugs = get_patient_drugs(db=db)
patient_drug_agg = patient_drugs.groupby(['patient-id']).agg(
{'drug-id': "nunique"}).reset_index().rename(
columns={'drug-id': 'num-drugs', 'patient-id': 'id'})
drug_subgroup = get_drug_subgroup(db=db)
# Merge subgroups, take only relevant columns
patient_drugs_count = patient_drugs.groupby(
['patient-id', 'drug-id'])['inventory-id'].count().reset_index().rename(
columns={'inventory-id': 'count'})
patient_drugs_subgroup_count = patient_drugs_count.merge(drug_subgroup, how='left',
on=['drug-id'])
# Sub subgroup instances in each patient
patient_subgroup = patient_drugs_subgroup_count.groupby(
['patient-id', 'subgroup'])['count'].sum().reset_index().rename(
columns={'count': 'drug-count'})
# Rank on use
patient_subgroup = patient_subgroup.sort_values(by=['patient-id', 'drug-count'],
ascending=[True, False])
patient_subgroup['rank'] = patient_subgroup.groupby(['patient-id']).cumcount() + 1
# keep top2 subgroups
patient_subgroup_top_2 = patient_subgroup[patient_subgroup['rank'] <= 2]
# Make 2 columns, first for rank1, other for rank2
patient_subgroup_top_2_pivot = patient_subgroup_top_2.pivot(index='patient-id', columns='rank',
values='subgroup').reset_index()
patient_subgroup_top_2_pivot = patient_subgroup_top_2_pivot.reset_index(drop=True)
patient_subgroup_top_2_pivot.columns = ['patient-id', 'disease-rank1', 'disease-rank2']
# Assignment of primary disease
# If rank1 is not others, then rank1 as it is
# If rank1 is others, and rank2 is null, then rank1 as it is
# If rank1 is others, and rank2 is something, then rank2
patient_subgroup_top_2_pivot['primary-disease'] = np.where(
(
(patient_subgroup_top_2_pivot['disease-rank1'] == 'others') &
(patient_subgroup_top_2_pivot['disease-rank2'].isnull() is False)
),
patient_subgroup_top_2_pivot['disease-rank2'],
patient_subgroup_top_2_pivot['disease-rank1']
)
patient_subgroup_top_2_pivot.head(2)
patient_primary_disease = patient_subgroup_top_2_pivot[
['patient-id', 'primary-disease']].rename(
columns={'patient-id': 'id'})
# patient_value_segment = get_patient_value_segment(db=db)
# patient_behaviour_segment = get_patient_behaviour_segment(changed_patients=changed_patients, pg_db=pg_db)
# Merging all data points
patient_data = changed_patients[['id']]
# ### Feedback (nps)
patient_data = patient_data.merge(nps, how='left', on=['id'])
# ### Referral
patient_data = patient_data.merge(referral, how='left', on=['id'])
# ### Referral Primary Store
patient_data = patient_data.merge(patient_primary_store[['id', 'primary-store-id']], how='left',
on='id')
# ### Primary Disease
patient_data = patient_data.merge(patient_primary_disease, how='left', on='id')
# ### Drug count
patient_data = patient_data.merge(patient_drug_agg, how='left', on='id')
# ### Average and Standard Purchase Interval
patient_data = patient_data.merge(patient_bills_avg_std, how='left', on='id')
# ### Previous store id
patient_data = patient_data.merge(
previous_store_bill[['id', 'previous-bill-date', 'previous-store-id']],
how='left', on='id')
# first bill id and date
patient_data = patient_data.merge(patient_first_bill, how='left', on='id')
patient_data = patient_data.merge(referral_code, how='left', on='id')
patient_data = patient_data.merge(value_segment_anytime, how='left', on='id')
patient_data = patient_data.merge(behaviour_segment_anytime, how='left', on='id')
patient_data['is-nps'] = patient_data['is-nps'].fillna(False)
""" Finally updating the batch data in table """
update_data_in_patients_metadata_table(db, s3, patient_data)
def update_value_segment():
# Extracting max date from patients metadata for value_seg
patient_val = f'''
select
max("value-segment-calculation-date") as max_date
from
"{schema}"."patients-metadata-2" '''
rs_db.execute(query=patient_val, params=None)
c_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
# Extracting max date from customer value segment
cust_val = f'''
select
max("segment-calculation-date") as max_date
from
"{schema}"."customer-value-segment" '''
rs_db.execute(query=cust_val, params=None)
d_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
value_max_date = c_data['max_date'][0] # max value segment date in patients metadata
patient_val_max_dt = d_data['max_date'][0] # max value segment date in patients metadata
cur_nmz_date = (datetime.datetime.today()).replace(day=1).strftime('%Y-%m-%d') # current date
if str(value_max_date) == str(cur_nmz_date):
if str(cur_nmz_date) != str(patient_val_max_dt):
logger.info('Condition passed for update: value-segment')
rs_db.execute('Begin;')
update_q1 = f'''
update
"{schema}"."patients-metadata-2"
set
"value-segment"=cbs."value-segment",
"value-segment-calculation-date"=cbs."segment-calculation-date"
from
"{schema}"."patients-metadata-2" pm2
inner join
"{schema}"."customer-value-segment" cbs
on
pm2."id"=cbs."patient-id"
where
cbs."segment-calculation-date" = DATE_TRUNC('month',CURRENT_DATE);
'''
update_q2 = f'''
update
"{schema}"."patients-metadata-2"
set
"value-segment"=null,
"value-segment-calculation-date"=null
from
"{schema}"."patients-metadata-2" pm2
where
pm2."value-segment-calculation-date" != DATE_TRUNC('month',CURRENT_DATE);
'''
rs_db.execute(query=update_q1, params=None)
rs_db.execute(query=update_q2, params=None)
rs_db.execute('commit;')
logger.info('value-segment Update Successful')
def update_behaviour_segment():
# Extracting max date from customer behaviour segment
query = f'''
select
max("segment-calculation-date") as max_date
from
"{schema}"."customer-behaviour-segment" '''
rs_db.execute(query=query, params=None)
f_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
# Extracting max date from patients metadata for behaviour_seg
patient_beh = f'''
select
max("behaviour-segment-calculation-date") as max_date
from
"{schema}"."patients-metadata-2" '''
rs_db.execute(query=patient_beh, params=None)
b_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
beh_max_date = f_data['max_date'][0] # max date from customer value segment
patient_beh_max_date = b_data['max_date'][0] # max behaviour segment date in patients metadata
cur_nmz_date = (datetime.datetime.today()).replace(day=1).strftime('%Y-%m-%d') # current date
if str(beh_max_date) == str(cur_nmz_date):
if str(cur_nmz_date) != str(patient_beh_max_date):
logger.info('Condition passed for update: behaviour-segment')
rs_db.execute('Begin;')
update_q1 = f'''
update
"{schema}"."patients-metadata-2"
set
"behaviour-segment"=cbs."behaviour-segment",
"behaviour-segment-calculation-date"=cbs."segment-calculation-date"
from
"{schema}"."patients-metadata-2" pm2
inner join
"{schema}"."customer-behaviour-segment" cbs
on pm2."id"=cbs."patient-id"
where
cbs."segment-calculation-date" = DATE_TRUNC('month',CURRENT_DATE);
'''
update_q2 = f'''
update
"{schema}"."patients-metadata-2"
set
"behaviour-segment"= null,
"behaviour-segment-calculation-date"= null
from
"{schema}"."patients-metadata-2" pm2
where
pm2."behaviour-segment-calculation-date" != DATE_TRUNC('month',CURRENT_DATE);
'''
rs_db.execute(query=update_q1, params=None)
rs_db.execute(query=update_q2, params=None)
rs_db.execute('commit;')
logger.info('behaviour-segment update successful')
# @profile
def main(db, s3, pg_db):
insert_new_patients(db)
mark_old_patients_pending(db)
is_still_pending = True
count = 1
while is_still_pending:
mark_pending_patients_updating(db=db)
logger.info(f"batch: {count}, mark_pending_patients_updating done.")
changed_patients = get_to_be_updated_patients(db=db)
if isinstance(changed_patients, type(None)) or changed_patients.empty:
is_still_pending = False
logger.info("Completed all batches.")
else:
process_batch(changed_patients=changed_patients, db=db, s3=s3, pg_db=pg_db)
logger.info(f"batch: {count}, process_batch done.")
count += 1
""" Updating the value and behaviour-segment """
update_value_segment()
update_behaviour_segment()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-b', '--batch_size', default=100, type=int, required=False,
help="batch size")
parser.add_argument('-l', '--limit', default=None, type=int, required=False,
help="Total patients to process")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info("I am in the right code.")
batch_size = args.batch_size
limit = args.limit
logger.info(f"env: {env}, limit: {limit}, batch_size: {batch_size}")
rs_db = DB(read_only=False)
rs_db.open_connection()
pg_db = None
_s3 = S3()
""" calling the main function """
main(db=rs_db, s3=_s3, pg_db=pg_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patients-metadata/patients_metadata.py | patients_metadata.py |
# Warnings
from warnings import filterwarnings as fw
import pandas as pd
fw('ignore')
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.email.email import Email
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-scm', '--email_to_sc', default="[email protected]", type=str, required=False)
parser.add_argument('-odc', '--oos_day_count', default=5, type=str, required=False)
parser.add_argument('-iud', '--inv_update_day', default=2, type=str, required=False)
parser.add_argument('-debug', '--debug_mode', default='N', type=str, required=False)
parser.add_argument('-as', '--active_stores', default="all", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
# parameters
email_to = args.email_to
email_to = email_to.split(',')
email_to_sc = args.email_to_sc
odc = args.oos_day_count
iud = args.inv_update_day
debug = args.debug_mode
active_stores = args.active_stores
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
read_schema = "prod2-generico"
email = Email()
if active_stores == 'all':
stores_q = """select
id
from
"prod2-generico".stores s
where
"is-active" = 1
and category = 'retail';"""
all_stores = rs_db.get_df(stores_q)
active_stores = all_stores['id'].unique()
active_stores = tuple(map(int, active_stores))
else:
active_stores = active_stores.split(',') + ['0']
active_stores = tuple(map(int, active_stores))
################################################################
###################### Helper Functions ########################
################################################################
def store_info_func(store_id):
"""
This basic function return store name and emails
"""
rs_db_h = DB(read_only=True)
rs_db_h.open_connection()
store_q = f"""select
sm.id,
sm.store as "store-name",
sm."store-email",
sm."line-manager",
sm."line-manager-email",
sm.abo,
sm."abo-email",
sm."store-manager",
sm."store-manager-email",
s."franchisee-email"
from
"{read_schema}"."stores-master" sm
left join "{read_schema}".stores s on sm.id = s.id
where sm.id = {store_id};
"""
store_info = rs_db.get_df(store_q)
try:
store_info = store_info.iloc[0]
store_name = store_info["store-name"]
store_mails = []
for col in ["store-email", "line-manager-email", "abo-email", "store-manager-email", "franchisee-email"]:
if '@' in str(store_info[col]):
store_mails.append(store_info[col])
except:
store_name = "store not exist"
store_mails = ["[email protected]"]
return store_name, store_mails
logger.info(f"script running for stores_ids : {active_stores}")
for store_id in active_stores:
logger.info(f"store name : {store_info_func(store_id)[0]}")
logger.info(f"store emails : {store_info_func(store_id)[1]}")
#################################################################
###################### OOS and Inventory ########################
#################################################################
logger.info("OOS and Inventory Issue Started")
oos_q = f"""
select
t_final.*,
d."drug-name" as "drug-name"
from
(
select
t."store-id",
t."drug-id",
min(t."closing-date") "from-date",
max(t."closing-date") "to-date",
datediff('days', min(t."closing-date"), max(t."closing-date")) + 1 as "days-count"
from
(
select
* ,
row_number() over (partition by "store-id",
"drug-id"
order by
"closing-date" desc) as "rn",
dateadd('days',
"rn",
"closing-date") "plus-date"
from
"{read_schema}"."out-of-shelf-drug-level" oosdl
where
"oos-count" = 1
and "max-set" = 'Y'
and "mature-flag" = 'Y') t
where
date("plus-date") = current_date
and t."store-id" in {active_stores}
group by
t."store-id",
t."drug-id"
having
"days-count" > {odc}) t_final
left join
"{read_schema}".stores s on
t_final."store-id" = s.id
left join
"{read_schema}".drugs d on
t_final."drug-id" = d.id
left join "{read_schema}"."drug-substitution-mapping" dsm1 on
t_final."drug-id" = dsm1."drug-id"
left join (
select
i."store-id",
dsm2."group",
sum(quantity) "total-quantity"
from
"{read_schema}"."inventory-1" i
left join "{read_schema}"."drug-substitution-mapping" dsm2 on
i."drug-id" = dsm2."drug-id"
group by
i."store-id",
dsm2."group") t_dsm on
t_dsm."group" = dsm1."group" and t_final."store-id" = t_dsm."store-id"
where
t_dsm."total-quantity" = 0;
"""
sales_loss_q = f"""
select
tw."store-id",
tw."drug-id",
min(tw."closing-date") "from-date",
max(tw."closing-date") "to-date",
datediff('days', min(tw."closing-date"), max(tw."closing-date")) + 1 as "days-count",
sum(s."revenue-value") as "total-sales"
from
(
select
*,
rank() over (partition by "store-id",
"drug-id"
order by
"plus-date" desc) as "latest-rank"
from
(
select
* ,
row_number() over (partition by "store-id",
"drug-id"
order by
"closing-date" desc) as "rn",
dateadd('days',
"rn",
"closing-date") "plus-date"
from
"{read_schema}"."out-of-shelf-drug-level" oosdl
where
"oos-count" = 0) t ) tw
left join "{read_schema}".sales s on
date(s."created-at") = tw."closing-date"
and s."drug-id" = tw."drug-id"
and s."store-id" = tw."store-id"
where
"latest-rank" = 1
and "plus-date" != current_date
group by
tw."store-id",
tw."drug-id";
"""
oos = rs_db.get_df(oos_q)
sales_loss = rs_db.get_df(sales_loss_q)
sales_loss['avg-per-day-sales'] = sales_loss['total-sales'] / sales_loss['days-count']
sales_loss = sales_loss[['store-id', 'drug-id', 'avg-per-day-sales']]
oos = pd.merge(oos, sales_loss, how='left', on=['store-id', 'drug-id'])
oos = oos.dropna(subset=['avg-per-day-sales'])
oos['avg-per-day-sales'] = oos['avg-per-day-sales'].astype(float).round(2)
oos['sales-loss'] = oos['avg-per-day-sales'] * oos['days-count']
# Let's solve issues for top 20 only for a day
oos = oos.sort_values('sales-loss', ascending=False).head(20)
store_ids = oos['store-id'].unique()
oos_mail_body = """
Hey {store_name},
There are some drugs which are out of stock on your store for very long time
Plus these are the drugs which don't have any alternative available on store
Possible issues are as listed :
1. Auto Short not triggered
2. Short in market
3. Quantity in locked state (Store inventory)
Because of these specific drugs OOS of your store is high
Your daily task for today is resolve these issues :
Step 1. Copy-Paste drug name from the sheet into compare tab
Step 2. Check its best alternative - 1 Generic, 1 Ethical
Step 3a). Create a manual short of the best alternative in 1 quantity ( 1 Ethical, 1 Generic)
Step 3b) If an Alternate is not available just raise MS for the same drug which is mentioned.
"""
logger.info(f"OOS mail sending to following stores {store_ids}")
if debug == 'N':
for store_id in store_ids:
store_name, store_emails = store_info_func(store_id)
for other_email in email_to:
store_emails.append(other_email)
if store_name != "store not exist":
store_emails.append(email_to_sc)
file_name = 'OOS_Drugs.xlsx'
file_path = s3.write_df_to_excel(data={'Drugs': oos[oos['store-id'] == store_id]}, file_name=file_name)
email.send_email_file(subject="Store Daily Insight 1 : Unavailability Issue",
mail_body=oos_mail_body.format(store_name=store_name),
to_emails=store_emails,
file_uris=[],
file_paths=[file_path])
logger.info(f"OOS issue mail sent to {store_name} store on following emails : {store_emails}")
if debug == 'Y':
store_name, store_emails = 'Debugger', ['[email protected]']
file_name = 'OOS_Drugs.xlsx'
file_path = s3.write_df_to_excel(data={'Drugs': oos}, file_name=file_name)
email.send_email_file(subject="Store Daily Insight 1 : Unavailability Issue",
mail_body=oos_mail_body.format(store_name=store_name),
to_emails=store_emails,
file_uris=[],
file_paths=[file_path])
inv_q = f"""
select
i."store-id",
s."name" as "store-name",
i."drug-id",
d."drug-name",
sum(i.quantity) as "quantity",
sum(i."locked-quantity") as "locked-quantity",
sum(i."locked-for-check") as "locked-for-check",
sum(i."locked-for-audit") as "locked-for-audit",
sum(i."locked-for-return") as "locked-for-return",
sum(i."locked-for-transfer") as "locked-for-transfer",
sum(i."extra-quantity") as "extra-quantity",
max(i."updated-at") as "updated-at"
from
"{read_schema}"."inventory-1" i
left join "{read_schema}".stores s on
s.id = i."store-id"
left join "{read_schema}".drugs d on
d.id = i."drug-id"
where i."store-id" in {active_stores}
group by
i."store-id",
s."name",
i."drug-id",
d."drug-name"
having
sum(i."quantity") = 0
and sum(i."locked-quantity" + i."locked-for-check" + i."locked-for-audit" + i."locked-for-return" + i."locked-for-transfer" + i."extra-quantity") > 0
and max(i."updated-at") <= current_date - {iud}
order by
"updated-at"
limit 20;
"""
inv = rs_db.get_df(inv_q)
# Taking out all stores
store_ids = inv['store-id'].unique()
inv_mail_body = """
Hey {store_name},
There are some drugs in your store where system available quantity is shown as 0,
so not available for sale, but their quantity is stuck in locked state
since long time which cause trouble in triggering Auto Short
Your daily task for today is resolve these issues : Please reach out to ABO or tech-support for any doubts.
> Unlock mentioned drugs
"""
logger.info(f"Inventory mail sending to following stores {store_ids}")
if debug == 'N':
for store_id in store_ids:
store_name, store_emails = store_info_func(store_id)
for other_email in email_to:
store_emails.append(other_email)
file_name = 'Locked State Drugs.xlsx'
file_path = s3.write_df_to_excel(data={'Drugs': inv[inv['store-id'] == store_id]}, file_name=file_name)
email.send_email_file(subject="Store Daily Insight 2 : Inventory locked Issue",
mail_body=inv_mail_body.format(store_name=store_name),
to_emails=store_emails,
file_uris=[],
file_paths=[file_path])
logger.info(f"Inventory issue mail sent to {store_name} store on following emails : {store_emails}")
if debug == 'Y':
store_name, store_emails = 'Debugger', ['[email protected]']
file_name = 'Locked State Drugs.xlsx'
file_path = s3.write_df_to_excel(data={'Drugs': inv}, file_name=file_name)
email.send_email_file(subject="Store Daily Insight 2 : Inventory locked Issue",
mail_body=inv_mail_body.format(store_name=store_name),
to_emails=store_emails,
file_uris=[],
file_paths=[file_path])
logger.info("OOS and Inventory Issue Finished")
#################################################################
###################### Substitution ############################
#################################################################
logger.info("Generic Issue Started")
gen_shift_q = """
select
"primary-store-id" as "store-id",
(case
when "post-generic-quantity" > 0 then 'generic-shift'
when "post-generic-quantity" = 0
and "post-ethical-quantity" > 0 then 'still-ethical'
else 'bought others'
end
) "behaviour-shift",
count(distinct t1."patient-id") "patient-count"
from
(
select
"patient-id",
sum("quantity-ethical") "pre-ethical-quantity",
sum("quantity-generic") "pre-generic-quantity"
from
"prod2-generico"."retention-master" rm
where
"created-at" < date_trunc('month', current_date)
and "created-at" >= dateadd('month',
-3,
date_trunc('month', current_date))
group by
"patient-id"
having
"pre-ethical-quantity" > 0
and "pre-generic-quantity" = 0 ) t1
inner join
(
select
"patient-id",
"primary-store-id",
sum("quantity-ethical") "post-ethical-quantity",
sum("quantity-generic") "post-generic-quantity"
from
"prod2-generico"."retention-master" rm
where
"created-at" >= date_trunc('month', current_date)
group by
"primary-store-id",
"patient-id") t2
on
t1."patient-id" = t2."patient-id"
group by
"primary-store-id",
"behaviour-shift"
"""
gen_shift = rs_db.get_df(gen_shift_q)
gen_shift_dist = pd.crosstab(index=gen_shift['store-id'],
columns=gen_shift['behaviour-shift'],
values=gen_shift['patient-count'],
aggfunc='sum', normalize='index')
# i = issue
gen_shift_dist_i = gen_shift_dist[gen_shift_dist['generic-shift'] < gen_shift_dist['generic-shift'].quantile(0.05)]
store_ids = gen_shift_dist_i.index
store_ids = list(set.intersection(set(store_ids), set(active_stores)))
logger.info(f"Active generic subs mail sending to following stores {store_ids}")
for store_id in store_ids:
store_name, store_emails = store_info_func(store_id)
if debug == 'Y':
store_emails = ['[email protected]']
else:
for other_email in email_to:
store_emails.append(other_email)
gen_mail_body = f"""
Hey {store_name} Store,
Your store is in Top stores where generic substitution is lower for active consumer
System average for active customer generic shift is : {gen_shift_dist['generic-shift'].quantile(0.5).round(4) * 100} %
Your store performance : {gen_shift_dist_i.loc[store_id]['generic-shift'].round(4) * 100} %
Your Daily task to resolve this issues :
> Focus on substitution for active customers
> Pitch Generic to customer whose generic affinity is lower (Visible on billing panel)
"""
file_name = 'Substitution active consumer.xlsx'
file_path = s3.write_df_to_excel(data={'Drugs': gen_shift_dist_i.loc[store_id]}, file_name=file_name)
email.send_email_file(subject="Store Daily Insight 3 : Active Consumer substitution Issue",
mail_body=gen_mail_body,
to_emails=store_emails,
file_uris=[],
file_paths=[file_path])
logger.info(f"Active generic subs issue mail sent to {store_name} store on following emails : {store_emails}")
logger.info("Generic Issue Finished")
#################################################################
############################ Sales #############################
#################################################################
logger.info("Sales De-growth Issue Started")
sale_tq = """select
s."store-id",
sum(case
when s."created-at" >= date_trunc('month', current_date)
and s."created-at" <= current_date then s."revenue-value" else 0 end) "MTD-Sales",
sum(case when s."created-at" >= dateadd('month', -1, date_trunc('month', current_date))
and s."created-at" <= dateadd('month', -1, current_date) then s."revenue-value" else 0 end) "LMTD-Sales",
"MTD-Sales" - "LMTD-Sales" as "sales-diff",
("MTD-Sales" - "LMTD-Sales") / "LMTD-Sales" as "perc diff"
from
"prod2-generico".sales s
left join "prod2-generico".stores s2 on
s."store-id" = s2.id
where
s2."is-active" = 1
and s2."franchisee-id" = 1
group by
s."store-id"
having
"sales-diff" < 0
and "LMTD-Sales" != 0
and min(s."created-at") < dateadd('month', -1, date_trunc('month', current_date))
order by
5
limit 10"""
sale_t = rs_db.get_df(sale_tq)
store_ids = sale_t["store-id"].unique()
store_ids = list(set.intersection(set(store_ids), set(active_stores)))
for store_id in store_ids:
store_name, store_emails = store_info_func(store_id)
if debug == 'Y':
store_emails = ['[email protected]']
else:
for other_email in email_to:
store_emails.append(other_email)
target = sale_t[sale_t["store-id"] == store_id]["sales-diff"].values[0]
sales_mail_body = f"""
Hey {store_name} Store,
Your store is in Top stores in terms of sales de-growth
To exit from this phase you need to complete this sales target for today : {abs(target)}
"""
email.send_email_file(subject="Store Daily Insight 4 : Sales target for de-growing stores",
mail_body=sales_mail_body,
to_emails=store_emails,
file_uris=[],
file_paths=[])
logger.info("Sales De-growth Issue Finished")
#################################################################
############################ Substitution ######################
#################################################################
logger.info("Substitution Issue Started")
sub_q = """
select
"store-id",
"store-name",
composition,
sum(case when "substitution-status" = 'substituted' then 1.0 * quantity end)/ sum(case when "substitution-status" in ('substituted', 'not-substituted') then quantity end) as "substitution"
from
"prod2-generico".sales s
where
date("created-at") between current_date - 90 and current_date
group by
"store-id",
"store-name",
composition
having
"substitution" is not null;
"""
sub = rs_db.get_df(sub_q)
sub['substitution'] = sub['substitution'].astype(float)
sub05 = sub.groupby('composition', as_index=False).agg({'substitution': lambda x: x.quantile(0.05)})
sub05.rename(columns={'substitution': 'sub_05'}, inplace=True)
sub_system = sub.groupby('composition', as_index=False).agg({'substitution': lambda x: x.quantile(0.5)})
sub_system.rename(columns={'substitution': 'System Average'}, inplace=True)
sub = pd.merge(sub, sub05, on=['composition'], how='inner')
sub = pd.merge(sub, sub_system, on=['composition'], how='inner')
sub = sub[sub['substitution'] < sub['sub_05']]
sub.drop(columns=['sub_05'], inplace=True)
sub = sub.sort_values('substitution', ascending=True).head(50)
sub['substitution'] = sub['substitution'].apply(lambda x: str(x * 100) + '%')
sub['System Average'] = sub['System Average'].apply(lambda x: str(x * 100) + '%')
store_ids = sub['store-id'].unique()
store_ids = list(set.intersection(set(store_ids), set(active_stores)))
for store_id in store_ids:
store_name, store_emails = store_info_func(store_id)
if debug == 'Y':
store_emails = ['[email protected]']
else:
for other_email in email_to:
store_emails.append(other_email)
sub_mail_body = f"""
Hey {store_name} Store,
There are some composition where your store is not performing well in terms of substitution,
compare to other stores
Please download the list and try to do active substitution for mentioned composition
"""
file_name = 'Substitution Composition.xlsx'
file_path = s3.write_df_to_excel(data={'Drugs': sub[sub['store-id'] == store_id]}, file_name=file_name)
email.send_email_file(subject="Store Daily Insight 5 : Composition substitution",
mail_body=sub_mail_body,
to_emails=store_emails,
file_uris=[],
file_paths=[file_path])
logger.info("Substitution Issue Finished")
#################################################################
######################### SKUs short in Market ##################
#################################################################
## Adding query for now - Create module after feedback from Soumya and Saniya
# select
# sm.store,
# t1.*,
# d.id as "best-substitute-drug-id",
# d."drug-name" as "best-substitute-drug-name"
# from
# (
# select
# sbx."store-id",
# sbx."drug-id",
# sbx."drug-name",
# max(sbx."created-at") "last-failed"
# from
# (
# select
# *,
# (case
# when status in ('lost', 'failed', 'declined') then 1.0
# else 0.0
# end) "status-code",
# rank() over( partition by "store-id",
# "drug-id"
# order by
# "created-at" desc) as "ORank"
# from
# "short-book-1" sb
# where
# date("created-at") between current_date-30 and current_date
# ) sbx
# where
# sbx."orank" <= 3
# and sbx."drug-id" is not null
# group by
# sbx."store-id",
# sbx."drug-id",
# sbx."drug-name"
# having
# avg(sbx."status-code")= 1
# and max(sbx."orank") >= 3
# ) t1
# left join "drug-substitution-mapping" dsm1 on
# t1."drug-id" = dsm1."drug-id"
# left join (
# select
# i."store-id",
# dsm2."group",
# dsm2."drug-id",
# sum(i.quantity) as "total-quantity",
# dense_rank() over( partition by i."store-id",
# dsm2."group"
# order by
# "total-quantity" desc) as "gRank"
# from
# "drug-substitution-mapping" dsm2
# left join "inventory-1" i on
# dsm2."drug-id" = i."drug-id"
# group by
# i."store-id",
# dsm2."group",
# dsm2."drug-id"
# having
# sum(i.quantity) > 0) t2 on
# t2."group" = dsm1."group"
# and t2."store-id" = t1."store-id"
# left join drugs d on
# t2."drug-id" = d.id
# left join "drug-order-info" doi on
# t1."drug-id" = doi."drug-id"
# and t1."store-id" = doi."store-id"
# left join "stores-master" sm on
# sm.id = t1."store-id"
# where
# t2."gRank" = 1
# and t1."drug-id" != t2."drug-id"
# and doi."as-active" = 1
# and doi."max" > 0
# order by
# 5 desc
# limit 20; | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-intelligence/store-intelligence.py | store-intelligence.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "promo"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"promo-code",
"promo-code-type",
"promo-eligibility",
"promo-discount-type",
"promo-min-purchase",
"campaign-id",
"campaign-name"
)
select
pc.id ,
pc."created-by",
pc."created-at",
pc."updated-by",
pc."updated-at" ,
pc."promo-code" as "promo-code" ,
pc."code-type" as "promo-code-type",
pc."type" as "promo-eligibility",
pc."discount-type" as "promo-discount-type",
pc."min-purchase" as "promo-min-purchase",
pc."campaign-id" as "campaign-id",
c."campaign" as "campaign-name"
from
"prod2-generico"."promo-codes" pc
left join "prod2-generico".campaigns c on
pc."campaign-id" = c.id
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/promo/promo.py | promo.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--runtime_date_exp', default="0101-01-01", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
runtime_date_exp = args.runtime_date_exp
email_to = args.email_to
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
# Run date
if runtime_date_exp != '0101-01-01':
run_date = runtime_date_exp
else:
# run_date = datetime.today().strftime('%Y-%m-%d')
# Timezone aware
run_date = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d")
# runtime_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
# Period end date
# Paramatrize it
period_end_d_ts = datetime.strptime(run_date, '%Y-%m-%d') - timedelta(days=1)
period_end_d = period_end_d_ts.strftime('%Y-%m-%d')
# data to be fetched
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
sales_q = """
SELECT
"patient-id",
"drug-id",
"created-date" as "bill-date"
FROM
"sales"
WHERE "created-date" <= '{0}'
""".format(period_end_d)
# AND "store-id" = 2
sales_q = sales_q.replace('`', '"')
logger.info(sales_q)
data_s = rs_db.get_df(query=sales_q)
data_s.columns = [c.replace('-', '_') for c in data_s.columns]
logger.info(len(data_s))
logger.info("Data length is : {}".format(len(data_s)))
data_s['bill_date'] = pd.to_datetime(data_s['bill_date'])
# Drop duplicates
data_s = data_s.drop_duplicates()
logger.info("Data length after dropping duplicates is : {}".format(len(data_s)))
# Sort data
data_s = data_s.sort_values(by=['patient_id', 'drug_id', 'bill_date'])
# Previous bill date
data_s['prev_bill_date'] = data_s.groupby(['patient_id', 'drug_id'])['bill_date'].shift(1)
data_s['purchase_interval'] = (data_s['bill_date'] - data_s['prev_bill_date']).dt.days
# Group at patient_id, drug_id
data_s_grp = data_s.groupby(['patient_id', 'drug_id']).agg({'purchase_interval': ['count', 'mean', 'std']}
).reset_index()
data_s_grp.columns = ['patient_id', 'drug_id', 'count_interval', 'mean_interval', 'std_interval']
data_s_grp['cov'] = np.round(data_s_grp['std_interval'] / data_s_grp['mean_interval'], 2)
data_s_grp = data_s_grp.round(2)
logger.info("Length of data grp is {}".format(len(data_s_grp)))
# Remove cases where cov is NULL
data_s_grp = data_s_grp[~data_s_grp['cov'].isnull()]
logger.info("Length of data grp - after removing null cases, is {}".format(len(data_s_grp)))
# DB upload columns
final_cols = ['patient_id', 'drug_id',
'count_interval',
'mean_interval', 'std_interval',
'cov']
data_export = data_s_grp[final_cols]
# For redshift specific
# Convert int columns to int
for i in ['patient_id', 'drug_id']:
data_export[i] = data_export[i].fillna(0).astype(int)
logger.info(data_export.columns)
################################
# DB WRITE
###############################
write_schema = 'prod2-generico'
write_table_name = 'patient-drug-interval'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id', 'created-at', 'updated-at'])]
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# Mandatory lines
data_export['created-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['created-by'] = 'etl-automation'
data_export['updated-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['updated-by'] = 'etl-automation'
# Truncate and append
rs_db_write.execute(f"set search_path to '{write_schema}'", params=None)
truncate_q = f"""
DELETE FROM
"{write_table_name}"
"""
rs_db_write.execute(truncate_q)
# Write to DB
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patient-drug-interval/patient-drug-interval.py | patient-drug-interval.py |
import argparse
import datetime
import os
import sys
from functools import reduce
import pandas as pd
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'lnd-ma-stl'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# Read from gsheet
gs = GoogleSheet()
ma_stl_data = gs.download(data={
"spreadsheet_id": "1Csw_FvpGxPNdUMmRXr1k26J0YEY-QKaElxu_jTTksQE",
"sheet_name": "Form Responses 1",
"listedFields": []
})
df = pd.DataFrame(ma_stl_data)
# Correct data types
df[['timestamp', 'start date', 'date of verification checklist']] = df[
['timestamp', 'start date', 'date of verification checklist']] \
.apply(pd.to_datetime, errors='coerce')
df['enter score '] = df['enter score '].apply(pd.to_numeric, errors='coerce')
df['select result'] = df['select result'].str.lower()
# separate out training calender
training_calender = df[(df['type of form'] == 'Training Calendar')]
# combine multiple course columns into one column
training_calender['course0_1_2_3_4'] = training_calender[['courses','courses.1','courses.2','courses.3']].apply(
lambda x: ''.join(x.dropna().astype(str)), axis=1)
training_calender = training_calender[
['timestamp', 'type of form', 'store name', 'employee code',
'designation', 'start date', 'course0_1_2_3_4']]
training_calender.rename(columns={'course0_1_2_3_4': 'course'}, inplace=True)
training_calender = training_calender.groupby(['type of form', 'store name',
'employee code', 'course']) \
.agg({'timestamp': 'max', 'start date': 'max'}).reset_index()
# announced verification attempt 1
verif_check_announced1 = df[(df['type of form'] == 'Verification Checklist') & (
df['verification checklist type'] == 'Announced - Attempt 1 (By STL)')]
verif_check_announced1 = verif_check_announced1[
['date of verification checklist', 'employee code.1', 'date of joining', 'select role',
'select verification checklist',
'enter score ', 'select result']]
verif_check_announced1 = verif_check_announced1.groupby(['employee code.1',
'select verification checklist']) \
.agg({'date of verification checklist': 'max',
'enter score ': 'max', 'select result': 'max'}) \
.reset_index()
verif_check_announced1 = verif_check_announced1.rename(
columns={'employee code.1': 'employee code', 'select verification checklist': 'course',
'enter score ': 'an1_score', 'select result': 'an1_result',
'date of verification checklist': 'an1_date_of_verification'})
# announced verification attempt 2
verif_check_announced2 = df[(df['type of form'] == 'Verification Checklist') & (
df['verification checklist type'] == 'Announced - Attempt 2 (By STL)')]
verif_check_announced2 = verif_check_announced2[
['date of verification checklist', 'employee code.1', 'date of joining', 'select role',
'select verification checklist',
'enter score ', 'select result']]
verif_check_announced2 = verif_check_announced2.groupby(['employee code.1',
'select verification checklist']).agg(
{'date of verification checklist': 'max', 'enter score ':
'max', 'select result': 'max'}).reset_index()
verif_check_announced2 = verif_check_announced2.rename(
columns={'employee code.1': 'employee code', 'select verification checklist': 'course',
'enter score ': 'an2_score', 'select result': 'an2_result',
'date of verification checklist': 'an2_date_of_verification'})
# announced verification attempt 3
verif_check_announced3 = df[(df['type of form'] == 'Verification Checklist') & (
df['verification checklist type'] == 'Announced - Attempt 3 (By STL)')]
verif_check_announced3 = verif_check_announced3[
['date of verification checklist', 'employee code.1',
'date of joining', 'select role',
'select verification checklist',
'enter score ', 'select result']]
verif_check_announced3 = verif_check_announced3.groupby(['employee code.1',
'select verification checklist']).agg(
{'date of verification checklist': 'max', 'enter score ': 'max', 'select result': 'max'}).reset_index()
verif_check_announced3 = verif_check_announced3.rename(
columns={'employee code.1': 'employee code', 'select verification checklist': 'course',
'enter score ': 'an3_score', 'select result': 'an3_result',
'date of verification checklist': 'an3_date_of_verification'})
# Unannounced verification attempt 1
verif_check_unannounced1 = df[(df['type of form'] == 'Verification Checklist') & (
df['verification checklist type'] == 'Unannounced - Attempt 1 (By SBO)')]
verif_check_unannounced1 = verif_check_unannounced1[
['date of verification checklist', 'employee code.1',
'date of joining', 'select role',
'select verification checklist',
'enter score ', 'select result']]
verif_check_unannounced1 = verif_check_unannounced1.groupby(
['employee code.1', 'select verification checklist']).agg(
{'date of verification checklist': 'max', 'enter score ': 'max', 'select result': 'max'}).reset_index()
verif_check_unannounced1 = verif_check_unannounced1.rename(
columns={'employee code.1': 'employee code', 'select verification checklist': 'course',
'enter score ': 'un1_score', 'select result': 'un1_result',
'date of verification checklist': 'un1_date_of_verification'})
# Unannounced verification attempt 2
verif_check_unannounced2 = df[(df['type of form'] == 'Verification Checklist') & (
df['verification checklist type']
== 'Unannounced - Attempt 2 (By SBO)')]
verif_check_unannounced2 = verif_check_unannounced2[
['date of verification checklist', 'employee code.1', 'date of joining', 'select role',
'select verification checklist', 'enter score ', 'select result']]
verif_check_unannounced2 = verif_check_unannounced2.groupby(
['employee code.1', 'select verification checklist']).agg(
{'date of verification checklist': 'max', 'enter score ': 'max', 'select result': 'max'}).reset_index()
verif_check_unannounced2 = verif_check_unannounced2.rename(
columns={'employee code.1': 'employee code', 'select verification checklist': 'course',
'enter score ': 'un2_score', 'select result': 'un2_result',
'date of verification checklist': 'un2_date_of_verification'})
# Unannounced verification attempt 3
verif_check_unannounced3 = df[(df['type of form'] == 'Verification Checklist') & (
df['verification checklist type'] == 'Unannounced - Attempt 3 (By SBO)')]
verif_check_unannounced3 = verif_check_unannounced3[
['date of verification checklist', 'employee code.1', 'date of joining', 'select role',
'select verification checklist', 'enter score ', 'select result']]
verif_check_unannounced3 = verif_check_unannounced3.groupby(
['employee code.1', 'select verification checklist']).agg(
{'date of verification checklist': 'max', 'enter score ':
'max', 'select result': 'max'}).reset_index()
verif_check_unannounced3 = verif_check_unannounced3.rename(
columns={'employee code.1': 'employee code', 'select verification checklist': 'course',
'enter score ': 'un3_score', 'select result': 'un3_result',
'date of verification checklist': 'un3_date_of_verification'})
# Joining all data frames to training calender
dfs = [training_calender, verif_check_announced1,
verif_check_announced2, verif_check_announced3,
verif_check_unannounced1, verif_check_unannounced2,
verif_check_unannounced3]
lnd_ma_stl = reduce(lambda left, right: pd.merge(left, right,
on=["employee code", "course"], how='left'), dfs)
lnd_ma_stl.columns = [c.replace(' ', '_') for c in lnd_ma_stl.columns]
lnd_ma_stl.columns = [c.lower() for c in lnd_ma_stl.columns]
# etl
lnd_ma_stl['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
lnd_ma_stl['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
lnd_ma_stl['created-by'] = 'etl-automation'
lnd_ma_stl['updated-by'] = 'etl-automation'
lnd_ma_stl.columns = [c.replace('_', '-') for c in lnd_ma_stl.columns]
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=lnd_ma_stl[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/lnd_ma_stl/lnd_ma_stl.py | lnd_ma_stl.py |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
status1 = False
try:
current_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date() -datetime.timedelta(days=1)
customer_return_query = f"""
select
a."billed-at",
a."returned-at",
b."patient-id",
b."store-id",
f."name" as "store-name",
c."drug-id",
d."drug-name",
d.type,
d.category,
e."drug-grade",
a."return-id",
a."inventory-id",
a."bill-id",
a."returned-quantity",
a."return-reason",
(a."rate" * a."returned-quantity") as "return-value"
from
"prod2-generico"."customer-return-items-1" a
left join
"prod2-generico"."customer-returns-1" b on
a."return-id" = b."id"
left join
"prod2-generico"."inventory-1" c on
c."id" = a."inventory-id"
left join
"prod2-generico"."drugs" d on
d."id" = c."drug-id"
left join
"prod2-generico"."drug-order-info" e on
e."store-id" = b."store-id"
and e."drug-id" = c."drug-id"
left join
"prod2-generico"."stores" f on
f."id" = b."store-id"
where a."returned-at" = {current_date}
""".format(current_date= current_date)
customer_return = rs_db.get_df(customer_return_query)
logger.info('Customer return data loaded')
if len(customer_return)!= 0:
customer_return['billed-date'] = customer_return['billed-at'].dt.date
customer_return['returned-date'] = customer_return['returned-at'].dt.date
customer_return['return-time-hrs'] = (customer_return['returned-at'] - customer_return['billed-at'])/pd.Timedelta('1s')/60/60
else:
customer_return['billed-date'] = current_date
customer_return['returned-date'] = current_date
customer_return['return-time-hrs'] = 0
customer_return = customer_return[[
'billed-date', 'returned-date', 'patient-id', 'store-id',
'store-name', 'drug-id', 'drug-name', 'type', 'category',
'drug-grade', 'return-id', 'inventory-id', 'bill-id',
'returned-quantity', 'return-reason',
'return-value', 'return-time-hrs'
]]
truncate_query = '''
delete from "prod2-generico"."glue-customer-return"
where date("returned-date") = {current_date}
'''.format(current_date= current_date )
rs_db_write.execute(truncate_query)
logger.info('glue-customer-return data deleted for yesterday to avoid duplication in case of multiple runs')
customer_return['uploaded-at']=datetime.datetime.now(tz=gettz('Asia/Kolkata'))
schema = 'prod2-generico'
table_name = 'glue-customer-return'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
s3.write_df_to_db(df=customer_return[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info('Customer return data written to redshift')
status1 = True
except:
status1 = False
if status1:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f'{env} - {status} - {table_name} updated',
mail_body=f" {table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/glue-customer-returns/glue-customer-returns.py | glue-customer-returns.py |
import argparse
import sys
import re
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import dateutil
from dateutil.tz import gettz
from zeno_etl_libs.helper.email.email import Email
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.tokenize import word_tokenize, RegexpTokenizer, sent_tokenize
from nltk.corpus import stopwords
import time
import datetime
import os
import nltk
nltk.download('stopwords')
nltk.download('punkt') # divides a whole text data into sentences
nltk.download('vader_lexicon')
import tweepy
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--full_run', default=0, type=int, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
email_to = args.email_to
env = args.env
full_run = args.full_run
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'zeno-tweets'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# max of data
table_q = """
select
max("tweet-created-at") max_exp
from
"prod2-generico"."zeno-tweets"
"""
max_exp_date = rs_db.get_df(table_q)
max_exp_date['max_exp'].fillna(np.nan, inplace=True)
print(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
print(max_exp_date)
# params
if full_run or max_exp_date == 'NaN':
start = '2017-05-13'
else:
start = max_exp_date
start = dateutil.parser.parse(start)
print(start)
# defining keys and tokens
consumer_key = 'c57SU7sulViKSmjsOTi4kTO3W'
consumer_secret = 'cNT3yk5ibQ315AWNCJHgE9ipCGlM1XnenHZu9cBWaVL3q7fPew'
access_token = '796747210159517701-DhOBQgwzeb6q4eXlI4WjwPRJH1CuEIT'
access_token_secret = 'sMrnPZ4ExI8um43wquUvFEUCTyY61HYRf7z3jv00ltXlt'
# making api connection
# authentication
def auth(consumer_key, consumer_secret, access_token, access_token_secret):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
api = auth(consumer_key, consumer_secret, access_token, access_token_secret)
# remove url
def remove_url(txt):
"""Replace URLs found in a text string with nothing
(i.e. it will remove the URL from the string).
Parameters
----------
txt : string
A text string that you want to parse and remove urls.
Returns
-------
The same txt string with url's removed.
"""
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split())
# searching for the keyword in tweeter and tokenizing it
def tweet(search_term, count=100000):
# Create a custom search term and define the number of tweets
tweets = api.search_tweets(search_term, count=count)
# Remove URLs
tweets_no_urls = [remove_url(tweet.text) for tweet in tweets]
# lowercase
tweet_data = [sent_tokenize(x.lower()) for x in tweets_no_urls]
tweet_data = pd.DataFrame(data=tweet_data, columns=['tweetext'])
tweet_att = [[search_term, x.lang, x.user.location, x.created_at, x.id, x.user.name,
x.user.followers_count, x.user.friends_count, x.text, x.place, x.user.time_zone] for x in tweets]
tweet_att = pd.DataFrame(data=tweet_att, columns=['search_term', 'lang', 'loc', 'created-at', 'id', 'username',
'followers', 'friends', 'og tweet', 'place', 'Tz'])
final_data = pd.concat([tweet_data, tweet_att], axis=1)
return final_data
# removing stopwords
def remove_sw(sent, corpus):
stop_words = set(stopwords.words(corpus))
word_tokens = word_tokenize(sent)
filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
filtered_sentence = ' '.join(filtered_sentence)
return [filtered_sentence]
# finding sentiment intensity analyzer
def sentiment_analyser(lst):
sid = SentimentIntensityAnalyzer()
sentiment = [sid.polarity_scores(x) for x in lst]
neg = [sid.polarity_scores(x)['neg'] for x in lst]
neu = [sid.polarity_scores(x)['neu'] for x in lst]
pos = [sid.polarity_scores(x)['pos'] for x in lst]
comp = [sid.polarity_scores(x)['compound'] for x in lst]
return neg[0], neu[0], pos[0], comp[0]
# running all above functions
def run_all(search_term, count=1000000):
print("API handshake successful")
print("Searching for term ", search_term)
tweet_data = tweet(search_term, count=count)
print(tweet_data)
# print(tweet_data)
print("Removing stopwords")
sw = 'english'
if tweet_data.empty:
return tweet_data
else:
tweet_data['tweetext_filter'] = tweet_data['tweetext'].apply(lambda x: remove_sw(x, sw), 1)
print(tweet_data)
print("Analysing sentiment for ", search_term)
print(tweet_data)
tweet_data['neg', 'neu', 'pos', 'comp'] = tweet_data['tweetext_filter'].apply(lambda x: sentiment_analyser(x), 1)
tweet_data[['neg', 'neu', 'pos', 'comp']] = tweet_data['neg', 'neu', 'pos', 'comp'].apply(pd.Series)
tweet_data.drop(columns=('neg', 'neu', 'pos', 'comp'), inplace=True)
# sentiment, neg, neu, pos, comp = sentiment_analyser(tweets)
# df = build_df(pos,neg,neu,comp, tweets)
print('Done \n')
return tweet_data
search_terms = ['#zeno_health','@zeno_health']
tws = pd.DataFrame()
try:
for search_term in search_terms:
tw = run_all(search_term, count=1000000)
tws = pd.concat([tws, tw], axis=0)
print('Done')
tws = tws[((tws['lang'].isin(['en', 'hi']) & (~tws['tweetext'].str.startswith('rt'))))]
except BaseException as e:
print('failed on_status,', str(e))
time.sleep(3)
tws
if tws.empty:
print('DataFrame is empty!')
exit()
tws = tws[
['og tweet', 'id', 'created-at', 'search_term', 'lang', 'loc', 'username', 'followers', 'friends', 'neg', 'neu',
'pos', 'comp']]
dict = {'id': 'tweet-id',
'og tweet': 'tweet',
'search_term': 'search-term',
'lang': 'language',
'loc': 'location',
'created-at': 'tweet-created-at',
'pos': 'positive-sentiment',
'neu': 'neutral-sentiment',
'neg': 'negative-sentiment',
'comp': 'compound-sentiment'}
tws.rename(columns=dict, inplace=True)
tws['tweet-created-at'] = pd.to_datetime(tws['tweet-created-at']). \
dt.tz_convert('Asia/Kolkata').dt.tz_localize(None)
# etl
tws['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
tws['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
tws['created-by'] = 'etl-automation'
tws['updated-by'] = 'etl-automation'
tws['tweet-type'] = np.where(tws['negative-sentiment'] >= tws['positive-sentiment'], 'Detractor', 'Promoter')
tws_mail = tws[['tweet-id', 'tweet', 'tweet-created-at', 'search-term', 'language', 'location', 'username', 'followers',
'friends', 'tweet-type']]
tws_mail = tws_mail.sort_values(by=['tweet-type'], ascending=True)
print(tws_mail)
tws_mail = tws_mail[(tws_mail['tweet-created-at'] > start)]
tws = tws[(tws['tweet-created-at'] > start)]
if tws.empty:
print('DataFrame is empty!')
exit()
tws.columns = [c.replace('_', '-') for c in tws.columns]
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
print(start)
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "tweet-created-at" >'{start}' '''
print(truncate_query)
rs_db.execute(truncate_query)
s3.write_df_to_db(df=tws[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
file_name = 'Zeno_Tweets.xlsx'
file_path = s3.write_df_to_excel(data={'Zeno Tweets': tws_mail}, file_name=file_name)
email = Email()
# file_path ='/Users/Lenovo/Downloads/utter.csv'
email.send_email_file(subject="Zeno Tweets",
mail_body='Zeno Tweets',
to_emails=email_to, file_uris=[], file_paths=[file_path])
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/zeno-tweets/zeno-tweets.py | zeno-tweets.py |
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
import argparse
import pandas as pd
import datetime
import numpy as np
import json
import os
import re
from datetime import date
from datetime import datetime
import dateutil.relativedelta
import numpy as np
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info('Script Manager Initialized')
rs_db = DB()
s3 = S3()
rs_db.open_connection()
snapshot_date = datetime.now().date()
inv_query='''
select
a."store-id" ,
a."franchisee-id",
a."store-name" as "store-name",
a."type" ,
a.category ,
a."drug-grade" ,
sum(case when a."inventory-oh" <> 0 then 1 else 0 end) as "drug-count",
SUM(a."inventory-oh") as "inventory-oh" ,
SUM(a."inventory-value-oh") as "inventory-value-oh",
SUM(a."min-inv-value") as "min-inv-value",
SUM(a."ss-inv-value") as "ss-inv-value",
SUM(a."max-inv-value") as "max-inv-value"
FROM
(select
inv."store-id" ,
str."franchisee-id" ,
str."name" as "store-name",
d."type" ,
d.category ,
di."drug-grade" ,
inv."drug-id",
(CASE
WHEN
str."franchisee-id"=1 then sum(inv.quantity + inv."locked-for-return" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" + inv."locked-quantity")
WHEN str."franchisee-id"!=1 then SUM(inv."locked-quantity")
END) as "inventory-oh",
(CASE
WHEN
str."franchisee-id"=1 then sum(inv.ptr * (inv.quantity + inv."locked-for-return" + inv."locked-for-audit" + inv."locked-for-check" + inv."locked-for-transfer" + inv."locked-quantity"))
WHEN str."franchisee-id"!=1 then sum(inv.ptr * ( inv."locked-quantity"))
END) as "inventory-value-oh",
coalesce(avg(di."min") * avg(inv.ptr), 0) as "min-inv-value",
coalesce(avg(di."safe-stock") * avg(inv.ptr), 0) as "ss-inv-value",
coalesce(avg(di."max") * avg(inv.ptr), 0) as "max-inv-value"
from
"prod2-generico"."prod2-generico"."inventory-1" inv
left join "prod2-generico"."prod2-generico"."invoice-items-1" ii on
inv."invoice-item-id" = ii.id
left join "prod2-generico".drugs d
on
inv."drug-id" = d.id
left join "prod2-generico"."prod2-generico"."drug-order-info" di
on
inv."drug-id" = di."drug-id"
and inv."store-id" = di."store-id"
left join "prod2-generico".stores str
on
inv."store-id" = str.id
group by
inv."store-id" ,
str."name" ,
str."franchisee-id" ,
d."type" ,
d.category ,
di."drug-grade",inv."drug-id") a
group by a."store-id" ,
a."store-name" ,
a."franchisee-id",
a."type" ,
a.category ,
a."drug-grade"
'''
stores=rs_db.get_df(query=inv_query)
stores[['inventory-value-oh', 'min-inv-value',
'ss-inv-value', 'max-inv-value']]=stores[['inventory-value-oh', 'min-inv-value',
'ss-inv-value', 'max-inv-value']].astype(np.float64)
stores['snapshot-date'] = snapshot_date
created_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
stores['created-at']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
stores['updated-at']=datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
stores['created-by'] = 'etl-automation'
stores['updated-by'] = 'etl-automation'
#Truncate the Query
logger.info('Delete for current date ')
truncate_query = '''
delete from "prod2-generico"."store-inventory-sns"
where date("snapshot-date") = '{snapshot_date}'
'''.format(snapshot_date=snapshot_date)
rs_db.execute(truncate_query)
stores.columns = [c.replace('_', '-') for c in stores.columns]
schema = "prod2-generico"
table_name = "store-inventory-sns"
table_info = helper.get_table_info(db=rs_db
, table_name=table_name, schema=schema)
logger.info('Writing to table')
s3.write_df_to_db(df=stores[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
status=True
if status==True:
script_status="Success"
else:
script_status="Failed"
email = Email()
email.send_email_file(subject=f"store_inventory {snapshot_date} {script_status}",
mail_body=f"store inventory job status: {script_status} ",
to_emails=email_to)
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store_inventory/store-inventory-sns.py | store-inventory-sns.py |
import argparse
import os
import sys
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.parameter.job_parameter import parameter
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default=["[email protected]"], type=str, required=False)
parser.add_argument('-fr', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
logger.info(f"env: {env}")
read_schema = "prod2-generico"
rs_db = DB()
rs_db.open_connection()
s3 = S3()
######################################################################
########################## Gross Sales ################################
######################################################################
gross_q = f"""
select
b."store-id",
s."name" as "store-name",
s."franchisee-id",
sum(bi.rate * bi.quantity) as "sales_e",
sum(case when (pc."created-by" != '[email protected]' and pc."code-type" != 'referral' ) then bi."promo-discount" else 0 end) as "cat-promo",
"sales_e" - "cat-promo" as "actual-sales"
from
"{read_schema}"."bills-1" b
left join "{read_schema}"."bill-items-1" bi on
b.id = bi."bill-id"
left join "{read_schema}"."promo-codes" pc on
b."promo-code-id" = pc.id
left join "{read_schema}".stores s on
b."store-id" = s.id
where
date(b."created-at") = current_date - 1
group by
b."store-id",
s."name",
s."franchisee-id";
"""
gross_sales = rs_db.get_df(query=gross_q)
return_q = f"""
select
b."store-id",
s."name" as "store-name",
s."franchisee-id",
sum(case
when (pc."created-by" != '[email protected]'
and pc."code-type" != 'referral' ) then cri."return-value"
else (rate * "returned-quantity")
end ) "actutal-return"
from
"{read_schema}"."customer-return-items-1" cri
left join "{read_schema}"."bills-1" b on
cri."bill-id" = b.id
left join "{read_schema}"."promo-codes" pc on
b."promo-code-id" = pc.id
left join "{read_schema}".stores s on
b."store-id" = s.id
where
date(cri."returned-at") = current_date - 1
group by
b."store-id",
s."name",
s."franchisee-id";
"""
return_sales = rs_db.get_df(query=return_q)
sales = pd.merge(gross_sales, return_sales, on=['store-id', 'store-name', 'franchisee-id'], how='left')
sales.drop(columns=['sales_e', 'cat-promo'], inplace=True)
sales['net-sales'] = sales['actual-sales'] - sales['actutal-return']
file_name = 'Sales_Report.xlsx'
file_path = s3.write_df_to_excel(data={'Store_Level_Sales': sales
}, file_name=file_name)
sales_body = f"""
Hey,
Here is the Daily Sales Report
Overall Sales : {sales['net-sales'].sum()}
COCO Sales : {sales[sales["franchisee-id"]==1]['net-sales'].sum()}
FOFO Sales : {sales[sales["franchisee-id"]!=1]['net-sales'].sum()}
File can be downloaded for Store_Level Sales
Thanks
"""
email = Email()
email.send_email_file(subject=f"Sales Report Data : {dt.today().date() - timedelta(days=1)}",
mail_body=sales_body,
to_emails=email_to, file_uris=[], file_paths=[file_path]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/daily-metric-automailer/daily-metric-automailer.py | daily-metric-automailer.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger, send_logs_via_email
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
from dateutil.relativedelta import relativedelta
from zeno_etl_libs.queries.dead_stock import dead_stock_queries
from zeno_etl_libs.queries.dead_stock.dead_stock_categorisation import dead_stock_categorization, dead_data_prep, dead_value_bucket
from zeno_etl_libs.utils.doid_write import doid_custom_write
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-d', '--days', default=90, type=int, required=False)
parser.add_argument('-ed', '--expiry_days', default=150, type=int, required=False)
parser.add_argument('-fed', '--fofo_expiry_days', default=270, type=int, required=False)
parser.add_argument('-ned', '--npi_expiry_days', default=180, type=int, required=False)
parser.add_argument('-emc', '--expiry_month_cutoff', default=1, type=int, required=False)
parser.add_argument('-jn', '--job_name', default=None, type=str, required=False)
parser.add_argument('-lem', '--log_email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-gif', '--goodaid_inclusion_flag', default=1, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
days = args.days
expiry_days = args.expiry_days
fofo_expiry_days = args.fofo_expiry_days
npi_expiry_days = args.npi_expiry_days
expiry_month_cutoff = args.expiry_month_cutoff
job_name = args.job_name
log_email_to = args.log_email_to.split(",")
goodaid_inclusion_flag = args.goodaid_inclusion_flag
os.environ['env'] = env
logger = get_logger(level = 'INFO')
logger.info(f"env: {env}")
#secrets = config.secrets
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("days - " + str(days))
logger.info("expiry_days - " + str(expiry_days))
logger.info("fofo_expiry_days - " + str(fofo_expiry_days))
logger.info("npi_expiry_days - " + str(npi_expiry_days))
logger.info("expiry_month_cutoff - " + str(expiry_month_cutoff))
logger.info("job_name - " + str(job_name))
logger.info("log_email_to - " + str(log_email_to))
logger.info("goodaid_inclusion_flag - " + str(goodaid_inclusion_flag))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
status = 'Failed'
doid_missed_entries = pd.DataFrame()
try :
# get stores open date
stores_date_query = '''
select
s.id as "store-id",
s."franchisee-id" ,
date(s."opened-at") as "opened-date",
case
when s."franchisee-id" = 1
and DATEDIFF(d,
s."opened-at",
current_date)>= 182 then 'old'
when s."franchisee-id" != 1
and DATEDIFF(d,
s."opened-at",
current_date)>= 90 then 'old'
else 'new'
end as "store-age-flag"
from
"prod2-generico".stores s
where
s."opened-at" != '0101-01-01 00:00:00.000'
'''
stores_date = rs_db.get_df(stores_date_query)
stores_list = stores_date.loc[
stores_date['store-age-flag'] == 'old', 'store-id']
trucate_inv_query = '''
DELETE FROM "prod2-generico"."dead-stock-inventory"
'''
truncate_sns_query = '''
delete from "prod2-generico"."dead-stock-inventory-sns"
where "snapshot-date" = CURRENT_DATE + 1
'''
insert_sns_query = '''
insert
into
"prod2-generico"."dead-stock-inventory-sns"
select
CURRENT_DATE + 1 as "snapshot-date",
"inventory-type",
"store-id",
"store-name",
"drug-type",
"drug-grade",
sum(quantity) as quantity,
sum(value) as value,
sum("locked-quantity") as "locked-quantity",
sum("locked-value") as "locked-value"
from
"prod2-generico"."dead-stock-inventory"
group by
"inventory-type",
"store-id",
"store-name",
"drug-type",
"drug-grade"
'''
sales = pd.DataFrame()
inventory = pd.DataFrame()
store_inventory_sales = pd.DataFrame()
'''Getting sales and inventory data by store '''
for store_id in sorted(stores_date['store-id'].unique()):
logger.info('Loading data for store ' + str(store_id))
sales_store, inventory_store, store_inventory_sales_store = dead_data_prep(
store_id, days, logger, connection = rs_db)
sales = sales.append(sales_store)
inventory = inventory.append(inventory_store)
store_inventory_sales = store_inventory_sales.append(
store_inventory_sales_store)
# GA drugs inclusion flag
if int(goodaid_inclusion_flag)==0:
logger.info('removing GA drugs from categorisation')
goodaid_drug_query = '''
select
d.id as "drug-id"
from
"prod2-generico".drugs d
where
d."company-id" = 6984
'''
goodaid_drugs = rs_db.get_df(goodaid_drug_query)
goodaid_drug_id = tuple(map(int,goodaid_drugs['drug-id'].unique()))
sales = sales[~sales['drug-id'].isin(goodaid_drug_id)]
inventory = inventory[~inventory['drug-id'].isin(goodaid_drug_id)]
store_inventory_sales = store_inventory_sales[~store_inventory_sales['drug-id'].isin(goodaid_drug_id)]
logger.info('removed GA drugs from categorisation')
else:
logger.info('not removing GA drugs from categorisation')
'''Inventory categorisation into different buckets'''
zippin_inventory, store_drug_no_sale, store_drug_with_sale,expiry_barcodes, return_barcodes, rotation_barcodes, fifo_barcodes = dead_stock_categorization(
sales, inventory, store_inventory_sales,
stores_list, logger, days, expiry_days, fofo_expiry_days,connection = rs_db)
rs_db_write.execute(trucate_inv_query)
schema = 'prod2-generico'
table_name = 'dead-stock-inventory'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
expiry_barcodes['uploaded-at']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
expiry_barcodes['created-date']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
expiry_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']] = expiry_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
s3.write_df_to_db(df=expiry_barcodes[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
return_barcodes['uploaded-at']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
return_barcodes['created-date']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
return_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']] = return_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
s3.write_df_to_db(df=return_barcodes[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
rotation_barcodes['uploaded-at']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
rotation_barcodes['created-date']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
rotation_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']] = rotation_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
s3.write_df_to_db(df=rotation_barcodes[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
fifo_barcodes['uploaded-at']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
fifo_barcodes['created-date']= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
fifo_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']] = fifo_barcodes[['invoice-item-id','invoice-id','distributor-id','short-book-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
s3.write_df_to_db(df=fifo_barcodes[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
rs_db_write.execute(truncate_sns_query)
rs_db_write.execute(insert_sns_query)
# Rotation drugs to be appended in omit_ss_reset table
omit_drug_store = rotation_barcodes[["drug-id",
"store-id"]].drop_duplicates()
omit_drug_store["updated-at"] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
omit_drug_store["created-at"] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
omit_drug_store["created-by"] = '[email protected]'
omit_drug_store["updated-by"] = '[email protected]'
omit_drug_store["start-date"] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
omit_drug_store["end-date"] = (datetime.datetime.now(tz=gettz('Asia/Kolkata')) + datetime.timedelta(
days=npi_expiry_days)).strftime('%Y-%m-%d')
omit_drug_store["is-active"] = 1
omit_drug_store["reason"] = 'NPI'
schema = 'prod2-generico'
table_name = 'omit-ss-reset'
# Uncomment following part once omit-ss-reset table is transferred to DSS
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
s3.write_df_to_db(df=omit_drug_store[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
# set max=0 for npi drugs in DOID
npi_store_drugs = omit_drug_store[["store-id", "drug-id"]]
npi_store_drugs.columns = [c.replace('-', '_') for c in npi_store_drugs.columns]
doid_missed_entries = doid_custom_write(npi_store_drugs, logger)
# save email attachements to s3
curr_date = str(datetime.date.today())
doid_missed_entries_uri = s3.save_df_to_s3(doid_missed_entries,
file_name=f"doid_missed_entries_{curr_date}.csv")
# Commenting Value Bucketing because relevant tables were not getting updated in DSS also
# '''Value bucketing done once a week: Monday morning'''
# if datetime.datetime.now().date().weekday() == 0:
#
# store_line_query= """
# select
# sm.id as "store-id",
# sm.line
# from
# "prod2-generico"."stores-master" sm
# """
# store_line = rs_db.get_df(store_line_query)
#
# rotation_barcodes = rotation_barcodes.merge(
# store_line, on='store-id', how='inner')
#
# rotation_barcodes['type-flag'] = np.select(
# [rotation_barcodes['drug-type'] == 'ethical',
# rotation_barcodes['drug-type'] == 'generic'],
# ['ethical', 'generic'], default='others'
# )
# rotation_value_bucket = rotation_barcodes.groupby(
# ['line']).apply(dead_value_bucket).reset_index()
# if 'level_1' in rotation_value_bucket.columns:
# rotation_value_bucket.drop('level_1', axis=1, inplace=True)
# if 'level-1' in rotation_value_bucket.columns:
# rotation_value_bucket.drop('level-1', axis=1, inplace=True)
# week_date = str(datetime.datetime.now().date())
# rotation_value_bucket['week-date'] = week_date
# engine.execute(
# '''delete from rotation_value_bucket where week_date = '{}'
# '''.format(week_date))
# rotation_value_bucket.to_sql(
# name='rotation_value_bucket', con=engine, if_exists='append',
# chunksize=500, method='multi', index=False)
# Commenting expiry_monthly_ss because this part was commented in DSS also
# '''Expired/Near Expiry barcode shapshot for first of the month'''
# # commenting out expiry snapshot as it is manual now
# '''
# current_date = datetime.datetime.now().date()
# if current_date.day == 1:
# logger.info('Month beginning snapshot for expired/near expiry')
# expiry_date_limit = (
# current_date + relativedelta(
# months=expiry_month_cutoff, days=-current_date.day))
# expiry_monthly_ss = expiry_barcodes[
# expiry_barcodes.expiry <= expiry_date_limit]
# expiry_monthly_ss['snapshot_date'] = str(current_date)
# expiry_monthly_ss.to_sql(
# name='expiry_monthly_ss', con=engine, if_exists='append',
# chunksize=500, method='multi', index=False)
status = 'Success'
except:
status = 'Failed'
logger.info('table load failed')
doid_missed_entries_uri = None
table_name = None
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
if status == 'Success':
email.send_email_file(subject=f"{env}-{status} : {table_name}",
mail_body=f"{table_name} update {status}, Time for job completion - {min_to_complete} mins,"
f"DOID missed entries: {doid_missed_entries.shape[0]} ",
to_emails=email_to, file_uris=[doid_missed_entries_uri])
elif status=='Failed':
email.send_email_file(subject=f"{env}-{status} : {table_name}",
mail_body=f"{table_name} update {status}, Time for job completion - {min_to_complete} mins,",
to_emails=email_to, file_uris=[])
# send_logs_via_email(job_name=job_name, email_to=log_email_to)
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/dead-stock/dead-stock-main.py | dead-stock-main.py |
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import json
import os
import time
import datetime as dt
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
from zeno_etl_libs.helper.parameter.job_parameter import parameter
from zeno_etl_libs.db.db import PostGre
from zeno_etl_libs.logger import get_logger
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'env', 'in_vpc', 'instance_type',
'timeout_in_sec', 'parameters', 'script_name',
'script_location', 'job_param_id', 'batch_size'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
env = args['env']
os.environ['env'] = env
logger = get_logger()
in_vpc = args['in_vpc']
instance_type = args['instance_type']
timeout_in_sec = args['timeout_in_sec']
parameters = args['parameters']
parameters = json.loads(parameters)
logger.info(type(parameters))
parameters['env'] = env
script_name = args['script_name']
script_location = args['script_location']
# check job params
job_params = parameter.get_params(job_id=int(args['job_param_id']))
reset_stores = job_params["reset_stores"]
batch_size = int(args['batch_size'])
# batch_size cannot be 0
if batch_size <= 0:
batch_size = 1
if reset_stores == [0]:
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
reset_date = dt.date.today().strftime("%Y-%m-%d")
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
)
""".format(reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
# get scheduled stores and create batch split
reset_stores = reset_store_ops['store_id'].unique().tolist()
store_batch_split = [reset_stores[i:i+batch_size]
for i in range(0, len(reset_stores), batch_size)]
else:
store_batch_split = [reset_stores[i:i+batch_size]
for i in range(0, len(reset_stores), batch_size)]
# spawn sagemaker instances for each batch
for batch_stores in store_batch_split:
run_batch = store_batch_split.index(batch_stores) + 1
tot_batch = len(store_batch_split)
# add to parameters
parameters["run_batch"] = run_batch
parameters["tot_batch"] = tot_batch
parameters["batch_stores"] = batch_stores
run_notebook.execute_notebook(
image=env + "-notebook-runner",
input_path=f"s3://aws-{env}-glue-assets-921939243643-ap-south-1/artifact/sagemaker-jobs/scripts/" + script_location + "/" + script_name,
output_prefix=run_notebook.get_output_prefix(),
notebook=script_location + '/' + script_name,
parameters=parameters,
role="arn:aws:iam::921939243643:role/service-role/AmazonSageMaker-ExecutionRole-20220412T145187",
env=env,
instance_type=instance_type,
session=None,
in_vpc=in_vpc,
timeout_in_sec=int(timeout_in_sec)
)
# to have a time difference for preventing same sagemaker job name
time.sleep(1)
job.init(args['JOB_NAME'], args)
job.commit() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sagemaker-trigger/sagemaker-trigger-ipc.py | sagemaker-trigger-ipc.py |
import argparse
import datetime
import json
import os
import sys
from datetime import datetime as dt
from datetime import timedelta
import dateutil
import pandas as pd
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MongoDB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
# connections
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--data', default=None, type=str, required=False)
# data = {"end": "2021-12-31", "start": "2021-12-01", "full_run": 1, "alternate_range": 0}
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
data = args.data
logger.info(f"data: {data}")
data = json.loads(data) if data else {}
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
s3 = S3()
# table info
schema = 'prod2-generico'
table_name = 'ecomm-outbound-connected-log'
date_field = 'call-created-at'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# params
job_data_params = data
if job_data_params['full_run']:
start = '2017-05-13'
elif job_data_params['alternate_range']:
start = job_data_params['start']
else:
start = str(dt.today().date() - timedelta(days=1))
# convert date to pymongo format
start = dateutil.parser.parse(start)
# Read Generico crm table
db = mg_client['generico-crm']
collection = db["exotelOutgoingCallLogs"].find(
{"CallType": {"$in": ["zeno-order-list", "zeno-order-details"]}, "status": "connected",
"createdAt": {"$gte": start}})
callog_outbound = pd.DataFrame(list(collection))
callog_outbound['call_attempt'] = callog_outbound.sort_values(['createdAt'], ascending=[True]) \
.groupby(['order_id']) \
.cumcount() + 1
callog_outbound = callog_outbound[(callog_outbound['call_attempt'] <= 3)]
callog_outbound['order_id'] = callog_outbound['order_id'].astype(int)
callog_outbound = callog_outbound[['order_id', 'order_number', 'createdAt', 'updatedAt',
'CallFrom', 'call_attempt']]
dict = {'createdAt': 'call-created-at',
'updatedAt': 'call-updated-at',
'CallFrom': 'call-from'}
callog_outbound.rename(columns=dict, inplace=True)
# order id to patient id
zo_q = """
select
id as "order_id",
"patient-id"
from
"prod2-generico"."zeno-order" zo
where
date("created-at")>= '2020-12-25'
"""
zo = rs_db.get_df(zo_q)
call_outbound_log = pd.merge(callog_outbound, zo, how='left', on=["order_id"])
call_outbound_log = call_outbound_log.drop_duplicates()
call_outbound_log.columns = call_outbound_log.columns.str.lower()
call_outbound_log['patient-id'] = call_outbound_log['patient-id'].astype('Int64')
call_outbound_log['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
call_outbound_log['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
call_outbound_log['created-by'] = 'etl-automation'
call_outbound_log['updated-by'] = 'etl-automation'
call_outbound_log.columns = [c.replace('_', '-') for c in call_outbound_log.columns]
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "{date_field}">'{start}' '''
logger.info(truncate_query)
rs_db.execute(truncate_query)
""" seek the data """
logger.info(call_outbound_log.head(1))
file_s3_uri_save = s3.save_df_to_s3(df=call_outbound_log[table_info['column_name']], file_name="call_outbound_log.csv")
# s3.write_to_db_from_s3_csv(table_name=table_name,
# file_s3_uri=file_s3_uri_save,
# db=rs_db, schema=schema)
s3.write_df_to_db(df=call_outbound_log[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm_outbound_connected_log/ecomm_outbound_connected_log.py | ecomm_outbound_connected_log.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "substitutable-compositions"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"composition",
"generic-flag"
)
select
d."composition-master-id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
cm.composition as "composition",
count(distinct(case when (d."type" = 'generic') then c."drug-id" end)) as "generic-flag"
from
"prod2-generico"."drugs" d
inner join "prod2-generico"."prod2-generico"."composition-master" cm on
cm.id = d."composition-master-id"
inner join
"prod2-generico"."inventory-1" c on
c."drug-id" = d."id"
inner join
"prod2-generico"."bill-items-1" a
on
c."id" = a."inventory-id"
where
d."composition-master-id" is not null
and d."type" = 'generic'
group by
d."composition-master-id",
cm."composition";
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."substitutable-compositions";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/substitutable-compositions/substitutable-compositions.py | substitutable-compositions.py |
import argparse
# this is to include zeno_etl_libs in the python search path on the run time
import sys
sys.path.append('../../../..')
import pandas as pd
import numpy as np
import time
import os
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
# ## Take the new records from bills and insert them into bills-metadata table
bill_metadata_table = "bills-1-metadata"
patients_metadata_table = "patients-metadata-2"
status = {
"updated": "updated",
"pending": "pending",
"updating": "updating",
}
def insert_new_bills(db, limit, start_date, end_date):
limit_str = f"limit {limit}" if limit else ""
if start_date and end_date:
date_filter = f""" and date(b."created-at") between '{start_date}' and '{end_date}' """
else:
date_filter = ""
query = f'''
insert into
"prod2-generico"."{bill_metadata_table}" (
id,
"patient-id",
"zippin-serial",
"store-id",
"doctor-id",
"promo-code-id",
"promo-flag",
"promo-discount",
"payment-method",
"redeemed-points",
"total-cashback",
"zenocare-amount",
"created-by",
"created-at",
"updated-at",
"bill-date",
"bill-year",
"bill-month",
"bill-day",
"etl-status" )
select
b.id,
b."patient-id",
b."zippin-serial",
b."store-id",
b."doctor-id",
b."promo-code-id",
case when b."promo-code-id" is null then false else true end ,
b."promo-discount",
b."payment-method",
b."redeemed-points",
b."total-cashback",
b."zenocare-amount",
b."created-by",
b."created-at",
convert_timezone('Asia/Calcutta', GETDATE()),
trunc(b."created-at"),
extract(year from b."created-at"),
extract(month from b."created-at"),
extract(day from b."created-at"),
'{status['pending']}'
from
"prod2-generico"."bills-1" b
left join "prod2-generico"."{bill_metadata_table}" bm on
bm.id = b.id
where
bm.id is null
{date_filter}
and (bm."etl-status" != '{status['updated']}'
or bm."etl-status" is null)
order by b.id asc
{limit_str}
'''
db.execute(query, params=None)
def mark_old_affected_bills_also_pending(db, start_date, end_date):
# # Take the effect of below tables
# - "bills-1"
# - "patients-store-orders"
# - "bills-items-1"
# - "inventory-1"
# - "drugs"
if start_date and end_date:
date_filter = f""" and date(bm."created-at") between '{start_date}' and '{end_date}' """
else:
date_filter = ""
query = f"""
update
"prod2-generico"."{bill_metadata_table}" bm2
set
"etl-status" = '{status['pending']}',
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE())
from
(
select
f.id
from
"prod2-generico"."{bill_metadata_table}" bm
inner join
"prod2-generico"."bills-1" f on
bm.id = f.id
inner join "prod2-generico"."bill-items-1" a on
bm."id" = a."bill-id"
inner join "prod2-generico"."inventory-1" b on
a."inventory-id" = b."id"
inner join "prod2-generico".drugs d on
b."drug-id" = d.id
left join "prod2-generico"."patients-store-orders" pso on
bm.id = NVL(pso."bill-id" , 0)
where
((bm."updated-at" < f."updated-at")
or
(bm."updated-at" < a."updated-at")
or
(bm."updated-at" < b."updated-at")
or
(bm."updated-at" < d."updated-at")
or
(bm."updated-at" < pso."updated-at"))) ab
where
bm2.id = ab.id;
"""
db.execute(query, params=None)
""" Sometimes jobs fails and updating count keeps increasing and we always get memory error,
so to fix this mark all updating status to pending """
query = f"""
update
"prod2-generico"."{bill_metadata_table}"
set
"etl-status" = 'pending'
where
"etl-status" = 'updating'
"""
db.execute(query, params=None)
def mark_pending_bills_updating(db, batch_size):
query = f"""
update
"prod2-generico"."{bill_metadata_table}" bm2
set
"etl-status" = '{status['updating']}'
from
(
select
bm.id
from
"prod2-generico"."{bill_metadata_table}" bm
where
"etl-status" = '{status['pending']}'
limit {batch_size} ) ab
where
bm2.id = ab.id;
"""
db.execute(query, params=None)
def get_changed_bills(db):
# ## Considering only updating bills
query = f'''
select
id,
"patient-id",
"zippin-serial",
"store-id",
"doctor-id",
"promo-code-id",
"promo-discount",
"total-cashback",
"zenocare-amount",
"payment-method",
"redeemed-points",
"created-by",
"created-at",
"updated-at",
"bill-date",
"bill-year",
"bill-month",
"bill-day",
"promo-flag",
"digital-payment-flag",
"etl-status"
from
"prod2-generico"."{bill_metadata_table}" bm3
where
"etl-status" = '{status['updating']}'
order by bm3.id asc
'''
db.execute(query, params=None)
_changed_bills: pd.DataFrame = db.cursor.fetch_dataframe()
return _changed_bills
def get_numbered_bills(db):
# ## Min bill date logic to get month difference and month rank
query = f'''
select
bm.id,
bm."patient-id",
bm."created-at" ,
row_number () over (partition by bm."patient-id" order by "created-at" asc) as row_num,
bm."bill-year",
bm."bill-month",
bm."bill-date",
bm."store-id"
from
"prod2-generico"."{bill_metadata_table}" bm
inner join
(
select
"patient-id"
from
"prod2-generico"."{bill_metadata_table}"
where
"etl-status" = '{status['updating']}'
group by
"patient-id") p on
bm."patient-id" = p."patient-id"
'''
db.execute(query, params=None)
_numbered_bills: pd.DataFrame = db.cursor.fetch_dataframe()
return _numbered_bills
def get_pr_hd_ecom_flags(db):
# PR, HD, Ecom flags
query = f"""
select
bm.id,
bool_or(case when pso."patient-request-id" is null then false else true end) as "pr-flag",
bool_or(case when pso."order-type" = 'delivery' then true else false end) as "hd-flag",
bool_or(case when pso."order-source" = 'zeno' then true else false end) as "ecom-flag",
bool_or(case when pso."order-source" = 'crm' then true else false end) as "crm-flag"
from
"prod2-generico"."{bill_metadata_table}" bm
left join "prod2-generico"."patients-store-orders" pso on
pso."bill-id" = bm.id
where
bm."etl-status" = '{status['updating']}'
group by
bm.id
"""
db.execute(query, params=None)
_pr_hd_ecom_bills: pd.DataFrame = db.cursor.fetch_dataframe()
return _pr_hd_ecom_bills
def get_doctor_data(db):
query = f"""
select
bm.id ,
d."name" as "doctor-name"
from
"prod2-generico"."{bill_metadata_table}" bm
left join "prod2-generico".doctors d on
bm."doctor-id" = d.id
where
bm."etl-status" = '{status['updating']}'
"""
db.execute(query, params=None)
_doctors: pd.DataFrame = db.cursor.fetch_dataframe()
return _doctors
def get_item_drug_inv(db):
# ## bill item, drug, inventory data
query = f"""
select
bm.id ,
bi."inventory-id",
bi."quantity",
bi."rate",
i."drug-id" ,
i."purchase-rate" ,
i.mrp ,
i.ptr ,
i.expiry ,
d."drug-name" ,
d."type" ,
d."drug-name",
d."type" as "drug-type",
d.category as "drug-category",
(case when
d."type" ='generic' and d.category ='chronic' then 1 else 0
end) as "is-generic-chronic",
d."repeatability-index" ,
d.composition ,
d.schedule ,
d."company-id" ,
d.company ,
d.pack
from
"prod2-generico"."{bill_metadata_table}" bm
inner join "prod2-generico"."bill-items-1" bi on
bm.id = bi."bill-id"
inner join "prod2-generico"."inventory-1" i on
bi."inventory-id" = i.id
inner join "prod2-generico".drugs d on
i."drug-id" = d.id
where
bm."etl-status" = '{status['updating']}';
"""
db.execute(query=query)
_item_drug_inv: pd.DataFrame = db.cursor.fetch_dataframe()
return _item_drug_inv
def update_target_table(db, bills1_temp):
# Updating the Destination table using temp table
target = bill_metadata_table
source = bills1_temp
query = f"""
update "prod2-generico"."{target}" t
set "month-diff" = s."month-diff",
"pr-flag" = s."pr-flag",
"hd-flag" = s."hd-flag",
"ecom-flag" = s."ecom-flag",
"crm-flag" = s."crm-flag",
"doctor-name" = s."doctor-name",
"total-spend" = s."total-spend",
"spend-generic" = s."spend-generic",
"spend-goodaid" = s."spend-goodaid",
"spend-ethical" = s."spend-ethical",
"spend-others-type" = s."spend-others-type",
"num-drugs" = s."num-drugs",
"quantity-generic" = s."quantity-generic",
"quantity-goodaid" = s."quantity-goodaid",
"quantity-ethical" = s."quantity-ethical",
"quantity-chronic" = s."quantity-chronic",
"quantity-repeatable" = s."quantity-repeatable",
"quantity-others-type" = s."quantity-others-type",
"is-generic" = s."is-generic",
"is-goodaid" = s."is-goodaid",
"is-ethical" = s."is-ethical",
"is-chronic" = s."is-chronic",
"is-generic-chronic" = s."is-generic-chronic",
"is-repeatable" = s."is-repeatable",
"is-others-type" = s."is-others-type",
"is-rx" = s."is-rx",
"total-quantity" = s."total-quantity",
"total-mrp-value" = s."total-mrp-value",
"total-purchase-rate-value" = s."total-purchase-rate-value",
"total-ptr-value" = s."total-ptr-value",
-- "promo-flag" = s."promo-flag",
"digital-payment-flag" = s."digital-payment-flag",
"zippin-serial" = s."zippin-serial",
"month-bill-rank" = s."month-bill-rank",
"min-bill-date-in-month" = s."min-bill-date-in-month",
"store-id-month" = s."store-id-month",
"normalized-date" = s."normalized-date",
"etl-status" = '{status['updated']}'
from "{source}" s
where t.id = s.id;
"""
db.execute(query=query)
def mark_affected_patients_pending(db, bills1_temp):
# ## Update the patients-metadata etl-status
query = f"""
update
"prod2-generico"."{patients_metadata_table}" t
set
"etl-status" = '{status['pending']}'
from
{bills1_temp} s
where
t.id = s."patient-id"
and s."etl-status" = '{status['updating']}';
"""
db.execute(query=query)
def process_batch(changed_bills, db, s3):
changed_bills['digital-payment-flag'] = np.where(
changed_bills['payment-method'].isin(['', ' ', 'cash', 'cheque']), False, True)
# print(changed_bills.head(1).transpose())
numbered_bills = get_numbered_bills(db)
first_bill = numbered_bills[numbered_bills['row_num'] == 1].rename(
columns={"created-at": "min-created-at"})[['patient-id', 'min-created-at']]
# first_bill.head(2)
# Month bill rank
bill_rank_month = numbered_bills.sort_values(by=['patient-id', 'bill-year', 'bill-month', 'bill-date']).copy()
bill_rank_month['month-bill-rank'] = bill_rank_month.groupby(
['patient-id', 'bill-year', 'bill-month']).cumcount() + 1
# bill_rank_month.head(2)
bill_rank_month_min = bill_rank_month[bill_rank_month['month-bill-rank'] == 1][
['patient-id', 'bill-year', 'bill-month', 'bill-date', 'store-id']].rename(
columns={'bill-date': 'min-bill-date-in-month', 'store-id': 'store-id-month'})
# bill_rank_month_min.head(2)
pr_hd_ecom_bills = get_pr_hd_ecom_flags(db)
# pr_hd_ecom_bills.head(1)
doctors = get_doctor_data(db)
item_drug_inv = get_item_drug_inv(db)
# Measured fields
item_drug_inv['total-spend'] = item_drug_inv['rate'].astype('float') * item_drug_inv['quantity'].astype('float')
item_drug_inv['total-mrp-value'] = item_drug_inv['mrp'].astype('float') * item_drug_inv['quantity'].astype(
'float')
item_drug_inv['total-purchase-rate-value'] = item_drug_inv['purchase-rate'].astype('float') * item_drug_inv[
'quantity'].astype('float')
item_drug_inv['total-ptr-value'] = item_drug_inv['ptr'].astype('float') * item_drug_inv[
'quantity'].astype(
'float')
# Quantity fields
item_drug_inv['quantity-generic'] = np.where(item_drug_inv['drug-type'] == 'generic',
item_drug_inv['quantity'], 0)
item_drug_inv['quantity-goodaid'] = np.where(item_drug_inv['company'] == 'GOODAID',
item_drug_inv['quantity'], 0)
item_drug_inv['quantity-ethical'] = np.where(item_drug_inv['drug-type'] == 'ethical',
item_drug_inv['quantity'], 0)
item_drug_inv['quantity-others-type'] = np.where(
~item_drug_inv['drug-type'].isin(['generic', 'ethical']),
item_drug_inv['quantity'], 0)
item_drug_inv['quantity-chronic'] = np.where(item_drug_inv['drug-category'] == 'chronic',
item_drug_inv['quantity'], 0)
item_drug_inv['quantity-generic-chronic'] = np.where(item_drug_inv['is-generic-chronic'] == 1,
item_drug_inv['quantity'], 0)
item_drug_inv['quantity-repeatable'] = np.where(
((item_drug_inv['repeatability-index'] >= 80) | (
(item_drug_inv['drug-category'] == 'chronic') & (
item_drug_inv['repeatability-index'] >= 40))),
item_drug_inv['quantity'], 0)
# Spend columns
item_drug_inv['spend-generic'] = np.where(item_drug_inv['drug-type'] == 'generic',
item_drug_inv['total-spend'], 0)
item_drug_inv['spend-goodaid'] = np.where(item_drug_inv['company'] == 'GOODAID',
item_drug_inv['total-spend'], 0)
item_drug_inv['spend-ethical'] = np.where(item_drug_inv['drug-type'] == 'ethical', item_drug_inv['total-spend'], 0)
item_drug_inv['spend-others-type'] = np.where(~item_drug_inv['drug-type'].isin(['generic', 'ethical']),
item_drug_inv['total-spend'], 0)
# aggregation at bill level
bills_level_data = item_drug_inv.groupby(['id']).agg(
{'total-spend': 'sum',
'total-mrp-value': 'sum',
'total-purchase-rate-value': 'sum',
'total-ptr-value': 'sum',
'spend-generic': 'sum',
'spend-goodaid': 'sum',
'spend-ethical': 'sum',
'spend-others-type': 'sum',
'drug-id': 'nunique',
'quantity': 'sum',
'quantity-generic': 'sum',
'quantity-generic-chronic': 'sum',
'quantity-goodaid': 'sum',
'quantity-ethical': 'sum',
'quantity-others-type': 'sum',
'quantity-chronic': 'sum',
'quantity-repeatable': 'sum'}).reset_index()
bills_level_data = bills_level_data.rename(columns={'drug-id': 'num-drugs', 'quantity': 'total-quantity'})
# bill is generic or not
bills_level_data['is-generic'] = np.where(bills_level_data['quantity-generic'] > 0, 1, 0)
# Patient is GOODAID or not
bills_level_data['is-goodaid'] = np.where(bills_level_data['quantity-goodaid'] > 0, 1, 0)
# Patient is ethical or not
bills_level_data['is-ethical'] = np.where(bills_level_data['quantity-ethical'] > 0, 1, 0)
# Patient is Others type or not
bills_level_data['is-others-type'] = np.where(bills_level_data['quantity-others-type'] > 0, 1, 0)
# Patient is RX or not
bills_level_data['is-rx'] = np.where(
(bills_level_data['quantity-generic'] + bills_level_data['quantity-ethical']) > 0, 1, 0)
# Patient is chronic or not
bills_level_data['is-chronic'] = np.where(bills_level_data['quantity-chronic'] > 0, 1, 0)
# Patient is repeatable or not
bills_level_data['is-repeatable'] = np.where(bills_level_data['quantity-repeatable'] > 0, 1, 0)
bills_level_data['is-generic-chronic'] = np.where(
bills_level_data['quantity-generic-chronic'] > 0, 1, 0)
# ## Merging data
# ### month difference data
# transformed_bills = pd.DataFrame()
transformed_bills = changed_bills.merge(first_bill, how='inner', on=['patient-id'])
# print(transformed_bills.head(1).transpose())
transformed_bills['month-diff'] = helper.month_diff(
transformed_bills['created-at'], transformed_bills['min-created-at'])
transformed_bills = transformed_bills.drop(columns=['min-created-at'])
# ### PR, HD flags Data
transformed_bills = transformed_bills.merge(pr_hd_ecom_bills, how="left", left_on='id', right_on='id')
transformed_bills.head(2)
# ### Doctor Data
transformed_bills = transformed_bills.merge(doctors, how="left", left_on='id', right_on='id')
transformed_bills.head(2)
# ### Drug and inventory data
transformed_bills = transformed_bills.merge(bills_level_data, how="left", left_on='id', right_on='id')
# transformed_bills.columns
# ### Month bill rank
transformed_bills = transformed_bills.merge(bill_rank_month[['id', 'month-bill-rank']], how='left', on=['id'])
# ### Month bill rank min date
transformed_bills = transformed_bills.merge(
bill_rank_month_min[['patient-id', 'bill-year', 'bill-month', 'min-bill-date-in-month', 'store-id-month']],
how='left', on=['patient-id', 'bill-year', 'bill-month']
)
# ### Normalise date
transformed_bills['normalized-date'] = transformed_bills['created-at'].dt.date.values.astype(
'datetime64[M]').astype('datetime64[D]')
transformed_bills['normalized-date'] = transformed_bills['normalized-date'].dt.date
# ### Final column selection
table_info = helper.get_table_info(db=db, table_name=bill_metadata_table, schema='prod2-generico')
"""correcting the column order"""
transformed_bills = transformed_bills[table_info['column_name']]
# ## Updating the data in the target table using temp table
helper.drop_table(db=db, table_name=bill_metadata_table.replace('-', '_') + "_temp")
""" Creating temp table """
bills1_temp = helper.create_temp_table(db=db, table=bill_metadata_table)
# fillna(-1)
for col in [
'num-drugs', 'promo-code-id', 'total-quantity', 'quantity-generic', 'quantity-goodaid',
"quantity-ethical", "quantity-chronic", "quantity-repeatable", "quantity-others-type"
]:
transformed_bills[col] = transformed_bills[col].fillna(-1).astype('int64')
# transformed_bills['num-drugs'] = transformed_bills['num-drugs'].fillna(-1).astype('int64')
# transformed_bills['promo-code-id'] = transformed_bills['promo-code-id'].fillna(-1).astype('int64')
# transformed_bills['total-quantity'] = transformed_bills['total-quantity'].fillna(-1).astype('int64')
# transformed_bills['quantity-generic'] = transformed_bills['quantity-generic'].fillna(-1).astype('int64')
# transformed_bills['quantity-goodaid'] = transformed_bills['quantity-goodaid'].fillna(-1).astype('int64')
# transformed_bills["quantity-ethical"] = transformed_bills["quantity-ethical"].fillna(-1).astype('int64')
# transformed_bills["quantity-chronic"] = transformed_bills["quantity-chronic"].fillna(-1).astype('int64')
# transformed_bills["quantity-repeatable"] = transformed_bills["quantity-repeatable"].fillna(-1).astype('int64')
# transformed_bills["quantity-others-type"] = transformed_bills["quantity-others-type"].fillna(-1).astype('int64')
# fillna(0)
for col in [
'is-generic', 'is-goodaid', 'is-ethical', 'is-chronic', 'is-repeatable', 'is-others-type',
'is-rx', 'is-generic-chronic'
]:
transformed_bills[col] = transformed_bills[col].fillna(0).astype('int64')
# transformed_bills['is-generic'] = transformed_bills['is-generic'].fillna(0).astype('int64')
# transformed_bills['is-goodaid'] = transformed_bills['is-goodaid'].fillna(0).astype('int64')
# transformed_bills['is-ethical'] = transformed_bills['is-ethical'].fillna(0).astype('int64')
# transformed_bills['is-chronic'] = transformed_bills['is-chronic'].fillna(0).astype('int64')
# transformed_bills['is-repeatable'] = transformed_bills['is-repeatable'].fillna(0).astype('int64')
# transformed_bills['is-others-type'] = transformed_bills['is-others-type'].fillna(0).astype('int64')
# transformed_bills['is-rx'] = transformed_bills['is-rx'].fillna(0).astype('int64')
ts = time.time()
s3.write_df_to_db(df=transformed_bills, table_name=bills1_temp, db=db)
print(f"total time: {time.time() - ts}")
""" updating the bill metadata table """
update_target_table(db, bills1_temp)
# """ mark affected patients pending """
# mark_affected_patients_pending(db, bills1_temp)
def get_pending_count(db):
query = f"""
select
count(id)
from
"prod2-generico"."{bill_metadata_table}"
where
"etl-status" = '{status['pending']}'
"""
db.execute(query, params=None)
_pending: pd.DataFrame = db.cursor.fetch_dataframe()
return _pending
def main(db, s3, limit, batch_size, start_date, end_date):
still_pending = True
insert_new_bills(db, limit, start_date, end_date)
print("insert_new_bills, done.")
# get_pending_count(db)
mark_old_affected_bills_also_pending(db, start_date, end_date)
print("mark_old_affected_bills_also_pending done.")
# get_pending_count(db)
count = 1
while still_pending:
mark_pending_bills_updating(db, batch_size)
print("mark_pending_bills_updating done.")
changed_bills = get_changed_bills(db)
print("get_changed_bills done.")
if isinstance(changed_bills, type(None)) or changed_bills.empty:
still_pending = False
print("Completed all batches.")
else:
process_batch(changed_bills=changed_bills, db=db, s3=s3)
print(f"process_batch done: {count}.")
count += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-b', '--batch_size', default=500000, type=int, required=False, help="batch size")
parser.add_argument('-l', '--limit', default=None, type=int, required=False, help="Total bills to process")
parser.add_argument('-sd', '--start_date', default=None, type=str, required=False)
parser.add_argument('-ed', '--end_date', default=None, type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
batch_size = args.batch_size
limit = args.limit
# This is for new bills
start_date = args.start_date
end_date = args.end_date
logger.info(f"env: {env}, limit: {limit}, batch_size: {batch_size}")
rs_db = DB()
rs_db.open_connection()
_s3 = S3()
""" calling the main function """
main(db=rs_db, s3=_s3, limit=limit, batch_size=batch_size, start_date=start_date, end_date=end_date)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/bills-metadata/bill_metadata.py | bill_metadata.py |
import os
import sys
import argparse
import pandas as pd
import datetime as dt
import numpy as np
from dateutil.tz import gettz
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
from zeno_etl_libs.helper import helper
def main():
s3 = S3()
# Read from GSheet
gs = GoogleSheet()
spreadsheet_id = "1AymJanamWzBk8zZ7UrHGerVpXXaVBt-bpplyghJTD5A"
ast_data = gs.download(data={
"spreadsheet_id": spreadsheet_id,
"sheet_name": "Sheet1",
"listedFields": []})
df_sheet = pd.DataFrame(ast_data)
df_sheet.columns = [c.replace('_', '-') for c in df_sheet.columns]
df_sheet['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_sheet['created-by'] = '[email protected]'
df_sheet['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_sheet['updated-by'] = '[email protected]'
table_info = helper.get_table_info(db=rs_db_write,
table_name='dc-distributor-mapping',
schema=write_schema)
columns = list(table_info['column_name'])
df_sheet = df_sheet[columns] # required column order
s3.write_df_to_db(df=df_sheet,
table_name='dc-distributor-mapping',
db=rs_db_write, schema=write_schema)
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
rs_db = DB()
rs_db_write = DB(read_only=False)
# open RS connection
rs_db.open_connection()
rs_db_write.open_connection()
""" calling the main function """
main()
# close RS connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/distributor_ranking2/upload_gsheet_to_rs_table.py | upload_gsheet_to_rs_table.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.distributor_ranking2.distributor_ranking_calc import \
ranking_calc_dc, ranking_calc_franchisee
from zeno_etl_libs.utils.distributor_ranking2.tech_processing import \
process_tech_df
def main(debug_mode, reset_date, time_interval_dc, time_interval_franchisee,
volume_fraction, franchisee_ranking_active, franchisee_stores,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl, s3,
rs_db_read, rs_db_write, read_schema, write_schema):
mysql_write = MySQL(read_only=False)
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# define empty variables to return in case of fail
final_ranks_franchisee = pd.DataFrame()
ranked_features_franchisee = pd.DataFrame()
dc_evaluated = []
franchisee_stores_evaluated = []
# ensure all weights adds upto 1
sum_all_wts = sum(list(as_ms_weights_dc_drug_lvl.values())) + \
sum(list(as_ms_weights_dc_type_lvl.values())) + \
sum(list(pr_weights_dc_drug_lvl.values())) + \
sum(list(pr_weights_dc_type_lvl.values())) + \
sum(list(weights_franchisee_drug_lvl.values())) + \
sum(list(weights_franchisee_type_lvl.values()))
if sum_all_wts == 6:
logger.info("All input weights add upto 1 | Continue Execution")
else:
logger.info("Input weights does not add upto 1 | Stop Execution")
return status, reset_date, dc_evaluated, franchisee_stores_evaluated
try:
# calculate ranks
logger.info("Calculating Zippin DC-level Ranking")
ranked_features_dc, final_ranks_dc = ranking_calc_dc(
reset_date, time_interval_dc, as_ms_weights_dc_drug_lvl,
as_ms_weights_dc_type_lvl, pr_weights_dc_drug_lvl,
pr_weights_dc_type_lvl, logger, db=rs_db_read, schema=read_schema)
if franchisee_ranking_active == 'Y':
logger.info("Calculating Franchisee-level Ranking")
ranked_features_franchisee, \
final_ranks_franchisee = ranking_calc_franchisee(
reset_date, time_interval_franchisee, franchisee_stores,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl,
logger, db=rs_db_read, schema=read_schema)
else:
logger.info("Skipping Franchisee-level Ranking")
# process ranked dfs to tech required format
distributor_ranking_rules, \
distributor_ranking_rule_values = process_tech_df(
final_ranks_dc, final_ranks_franchisee, volume_fraction)
# combine rank df and feature df (dc & franchisee)
final_ranks = pd.concat([final_ranks_dc, final_ranks_franchisee], axis=0)
ranked_features = pd.concat([ranked_features_dc, ranked_features_franchisee], axis=0)
ranked_features.rename(
{"partial_dc_id": "dc_id", "partial_distributor_id": "distributor_id",
"partial_distributor_credit_period": "distributor_credit_period",
"partial_distributor_name": "distributor_name"}, axis=1, inplace=True)
final_ranks.rename(
{"partial_dc_id": "dc_id"}, axis=1, inplace=True)
# for email info
dc_evaluated = distributor_ranking_rules["dc_id"].unique().tolist()
franchisee_stores_evaluated = distributor_ranking_rules[
"store_id"].unique().tolist()
# adding required fields in tech df
distributor_ranking_rules['rule_start_date'] = reset_date
distributor_ranking_rules['is_active'] = 1
distributor_ranking_rules['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
distributor_ranking_rules['created_by'] = 'etl-automation'
# adding required fields in ds-internal df
final_ranks.loc[:, 'reset_date'] = reset_date
final_ranks['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
final_ranks['created_by'] = 'etl-automation'
final_ranks['updated_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
final_ranks['updated_by'] = 'etl-automation'
ranked_features.loc[:, 'reset_date'] = reset_date
ranked_features['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ranked_features['created_by'] = 'etl-automation'
ranked_features['updated_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ranked_features['updated_by'] = 'etl-automation'
# formatting column names
distributor_ranking_rule_values.columns = [c.replace('_', '-') for c in
distributor_ranking_rule_values.columns]
distributor_ranking_rules.columns = [c.replace('_', '-') for c in
distributor_ranking_rules.columns]
final_ranks.columns = [c.replace('_', '-') for c in final_ranks.columns]
ranked_features.columns = [c.replace('_', '-') for c in ranked_features.columns]
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
logger.info("Writing to table: distributor-ranking2-features")
table_info = helper.get_table_info(db=rs_db_write,
table_name='distributor-ranking2-features',
schema=write_schema)
columns = list(table_info['column_name'])
ranked_features = ranked_features[columns] # required column order
s3.write_df_to_db(df=ranked_features,
table_name='distributor-ranking2-features',
db=rs_db_write, schema=write_schema)
logger.info("Writing to table: distributor-ranking2-final-ranks")
table_info = helper.get_table_info(db=rs_db_write,
table_name='distributor-ranking2-final-ranks',
schema=write_schema)
columns = list(table_info['column_name'])
final_ranks = final_ranks[columns] # required column order
s3.write_df_to_db(df=final_ranks,
table_name='distributor-ranking2-final-ranks',
db=rs_db_write, schema=write_schema)
logger.info("Writing table to RS-DB completed!")
mysql_write.open_connection()
logger.info("Updating table to MySQL")
try:
index_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rules`',
con=mysql_write.connection).values[0]) + 1
redundant_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rule-values`',
con=mysql_write.connection).values[0]) + 1
except:
index_increment = 1
redundant_increment = 1
logger.info(f"Incremented distributor-ranking-rules by {index_increment}")
logger.info(f"Incremented distributor-ranking-rule-values by {redundant_increment}")
distributor_ranking_rules['id'] = distributor_ranking_rules['id'] + index_increment
distributor_ranking_rule_values['distributor-ranking-rule-id'] = distributor_ranking_rule_values[
'distributor-ranking-rule-id'] + index_increment
distributor_ranking_rule_values['id'] = distributor_ranking_rule_values['id'] + redundant_increment
logger.info("Setting existing rules to inactive")
mysql_write.engine.execute("UPDATE `distributor-ranking-rules` SET `is-active` = 0")
# mysql_write.engine.execute("SET FOREIGN_KEY_CHECKS=0") # use only in staging
logger.info("Writing to table: distributor-ranking-rules")
distributor_ranking_rules.to_sql(
name='distributor-ranking-rules',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
logger.info("Writing to table: distributor-ranking-rule-values")
distributor_ranking_rule_values.to_sql(
name='distributor-ranking-rule-values',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
# mysql_write.engine.execute("SET FOREIGN_KEY_CHECKS=1") # use only in staging
logger.info("Updating table to MySQL completed!")
mysql_write.close()
else:
logger.info("Writing to RS-DB & MySQL skipped")
status = 'Success'
logger.info(f"Distributor Ranking code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Distributor Ranking code execution status: {status}")
return status, reset_date, dc_evaluated, franchisee_stores_evaluated
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-rd', '--reset_date', default="YYYY-MM-DD", type=str,
required=False)
parser.add_argument('-ti', '--time_interval_dc', default=90, type=int,
required=False)
parser.add_argument('-tif', '--time_interval_franchisee', default=180, type=int,
required=False)
parser.add_argument('-vf', '--volume_fraction', default="0.5-0.3-0.2", type=str,
required=False)
parser.add_argument('-fra', '--franchisee_ranking_active', default="Y", type=str,
required=False)
parser.add_argument('-fs', '--franchisee_stores', default=[319, 320],
nargs='+', type=int, required=False)
parser.add_argument('-amwdcdl', '--as_ms_weights_dc_drug_lvl',
default="{'margin':0.5,'ff':0.5}",
type=str, required=False)
parser.add_argument('-amwdctl', '--as_ms_weights_dc_type_lvl',
default="{'margin':0.3,'ff':0.3, 'portfolio_size':0.4}",
type=str, required=False)
parser.add_argument('-prwdcdl', '--pr_weights_dc_drug_lvl',
default="{'margin':0.4,'ff':0.6}",
type=str, required=False)
parser.add_argument('-prwdctl', '--pr_weights_dc_type_lvl',
default="{'margin':0.2,'ff':0.4, 'portfolio_size':0.4}",
type=str, required=False)
parser.add_argument('-wfdl', '--weights_franchisee_drug_lvl',
default="{'margin':0.5,'ff':0.5}",
type=str, required=False)
parser.add_argument('-wftl', '--weights_franchisee_type_lvl',
default="{'margin':0.3,'ff':0.3, 'portfolio_size':0.4}",
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
# JOB EXCLUSIVE PARAMS
debug_mode = args.debug_mode
reset_date = args.reset_date
time_interval_dc = args.time_interval_dc
time_interval_franchisee = args.time_interval_franchisee
volume_fraction = args.volume_fraction
franchisee_ranking_active = args.franchisee_ranking_active
franchisee_stores = args.franchisee_stores
as_ms_weights_dc_drug_lvl = args.as_ms_weights_dc_drug_lvl
as_ms_weights_dc_type_lvl = args.as_ms_weights_dc_type_lvl
pr_weights_dc_drug_lvl = args.pr_weights_dc_drug_lvl
pr_weights_dc_type_lvl = args.pr_weights_dc_type_lvl
weights_franchisee_drug_lvl = args.weights_franchisee_drug_lvl
weights_franchisee_type_lvl = args.weights_franchisee_type_lvl
# EVALUATE REQUIRED JSON PARAMS
as_ms_weights_dc_drug_lvl = literal_eval(as_ms_weights_dc_drug_lvl)
as_ms_weights_dc_type_lvl = literal_eval(as_ms_weights_dc_type_lvl)
pr_weights_dc_drug_lvl = literal_eval(pr_weights_dc_drug_lvl)
pr_weights_dc_type_lvl = literal_eval(pr_weights_dc_type_lvl)
weights_franchisee_drug_lvl = literal_eval(weights_franchisee_drug_lvl)
weights_franchisee_type_lvl = literal_eval(weights_franchisee_type_lvl)
if reset_date == 'YYYY-MM-DD':
reset_date = dt.date.today()
else:
reset_date = dt.datetime.strptime(reset_date, "%Y-%m-%d").date()
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
""" calling the main function """
status, reset_date, dc_evaluated, \
franchisee_stores_evaluated = main(
debug_mode, reset_date, time_interval_dc, time_interval_franchisee,
volume_fraction, franchisee_ranking_active, franchisee_stores,
as_ms_weights_dc_drug_lvl, as_ms_weights_dc_type_lvl,
pr_weights_dc_drug_lvl, pr_weights_dc_type_lvl,
weights_franchisee_drug_lvl, weights_franchisee_type_lvl, s3,
rs_db_read, rs_db_write, read_schema, write_schema)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Distributor Ranking 2.0 Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
DC's Evaluated: {dc_evaluated}
Franchisee Stores Evaluated: {franchisee_stores_evaluated}
Job Params: {args}
""",
to_emails=email_to)
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/distributor_ranking2/distributor_ranking2_main.py | distributor_ranking2_main.py |
import argparse
import sys
sys.path.append('../../../..')
import os
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "group-activation"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert into
"prod2-generico"."{table_name}" (
"created-by",
"created-at",
"updated-by",
"updated-at",
"store-id",
"group",
"system-first-inv-date",
"system-first-bill-date",
"store-first-inv-date",
"store-first-bill-date")
select
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
c."store-id" as "store-id",
d1."group" as "group",
max(y.system_first_inv_date) as "system-first-inv-date",
max(y.system_first_bill_date) as "system-first-bill-date",
MIN(c."created-at") as "store-first-inv-date",
MIN(b."created-at") as "store-first-bill-date"
from
"prod2-generico"."inventory-1" c
left join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
left join "prod2-generico"."bills-1" b on
b."id" = a."bill-id"
left join "prod2-generico"."drugs" d on
d."id" = c."drug-id"
left join "prod2-generico"."drug-unique-composition-mapping" d1 on
c."drug-id" = d1."drug-id"
left join (
select
d1."group", MIN(b."created-at") as "system_first_bill_date", MIN(c."created-at") as "system_first_inv_date"
from
"prod2-generico"."inventory-1" c
left join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
left join "prod2-generico"."bills-1" b on
b."id" = a."bill-id"
left join "prod2-generico"."drugs" d on
d."id" = c."drug-id"
left join "prod2-generico"."drug-unique-composition-mapping" d1 on
c."drug-id" = d1."drug-id"
where
d1."group" is not null
and d."company-id" = 6984
group by
d1."group" ) as y on
d1."group" = y."group"
where
d1."group" is not null
and d."company-id" = 6984
group by
c."store-id",
d1."group";
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."composition-activation";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/group-activation/group-activation.py | group-activation.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
def main(debug_mode, db_read, db_write, read_schema, write_schema, s3, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
try:
# previous day taken to remove the day on which code is run.
# start_date is the date from which analysis begins. start_date is previous day - 28*4 days
# store cutoff date put as same as that of start_date
# period between (previous_day) & (previous_day - 28*4days)
previous_day = (dt.date.today() - pd.DateOffset(days=1)) # start analysis from the previous day.
start_date = pd.to_datetime(
(previous_day - pd.DateOffset(days=4 * 28)).strftime('%Y-%m-%d'))
store_date_cutoff = pd.to_datetime(
(previous_day - pd.DateOffset(days=4 * 28)).strftime('%Y-%m-%d'))
logger.info("Corrections ran for dates {0} to {1}".format(
start_date.strftime('%Y-%m-%d'),
previous_day.strftime('%Y-%m-%d')))
# remove stores opened before start_date (i.e. four months from previous_day).
Q_STORE = """
select id as "store-id", "opened-at" from "{read_schema}".stores
where "opened-at" <= '{store_date_cutoff}'
and "opened-at" != '0101-01-01 00:00:00'
""".format(read_schema=read_schema,
store_date_cutoff=store_date_cutoff)
df_stores = pull_data(Q_STORE, db_read)
store_filter = tuple(df_stores['store_id'].to_list())
logger.info("Total stores considered for corrections: {0} ".format(len(store_filter)))
# pull sales data with store filter between start_date and previous day.
Q_REF = """
select "store-id" , "drug-id"
from "{read_schema}".sales
where date("created-at") >= date('{start_date}')
and date("created-at") <= date('{end_date}')
and "store-id" in {store_filter}
group by "store-id" , "drug-id"
""".format(read_schema=read_schema,
start_date=start_date,
end_date=previous_day,
store_filter=store_filter)
df_ref = pull_data(Q_REF, db_read)
logger.info("Calculating probability matrix for store-drugs")
# matrix_111 contains list of 111 drugs in current bucket
matrix, matrix_111, probability_matrix_1, probability_matrix_2 = calculate_probabilities(df_ref, db_read, read_schema)
probability_matrix_1['historical_flag_ma_less_than_2'] = 1
probability_matrix_2['historical_flag_ma_less_than_2'] = 0
probability_matrix = pd.concat([probability_matrix_1, probability_matrix_2])
# ensure dtypes to prevent write errors
matrix = matrix.dropna(subset=['store_id', 'drug_id'])
matrix_111 = matrix_111.dropna(subset=['store_id', 'drug_id'])
matrix['store_id'] = matrix['store_id'].astype(int)
matrix['drug_id'] = matrix['drug_id'].astype(int)
matrix_111['store_id'] = matrix_111['store_id'].astype(int)
matrix_111['drug_id'] = matrix_111['drug_id'].astype(int)
# formatting and adding required fields for RS-DB write
matrix.columns = [c.replace('_', '-') for c in matrix.columns]
matrix_111.columns = [c.replace('_', '-') for c in matrix_111.columns]
probability_matrix.columns = [c.replace('_', '-') for c in probability_matrix.columns]
logger.info("Formatting table for RS-DB write")
matrix['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
matrix['created-by'] = 'etl-automation'
matrix['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
matrix['updated-by'] = 'etl-automation'
matrix_111['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
matrix_111['created-by'] = 'etl-automation'
matrix_111['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
matrix_111['updated-by'] = 'etl-automation'
probability_matrix['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
probability_matrix['created-by'] = 'etl-automation'
probability_matrix['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
probability_matrix['updated-by'] = 'etl-automation'
if debug_mode == 'N':
logger.info(f"Truncating ipc-corrections-rest-cases in {write_schema}")
truncate_query = f"""
truncate table "{write_schema}"."ipc-corrections-rest-cases"
"""
db_write.execute(truncate_query)
logger.info(f"Truncating ipc-corrections-111-cases in {write_schema}")
truncate_query = f"""
truncate table "{write_schema}"."ipc-corrections-111-cases"
"""
db_write.execute(truncate_query)
logger.info(f"Truncating ipc-corrections-probability-matrix in {write_schema}")
truncate_query = f"""
truncate table "{write_schema}"."ipc-corrections-probability-matrix"
"""
db_write.execute(truncate_query)
logger.info("Writing table to RS-DB")
logger.info("Writing to table: ipc-corrections-rest-cases")
s3.write_df_to_db(df=matrix,
table_name='ipc-corrections-rest-cases',
db=db_write, schema=write_schema)
logger.info("Writing to table: ipc-corrections-111-cases")
s3.write_df_to_db(df=matrix_111,
table_name='ipc-corrections-111-cases',
db=db_write, schema=write_schema)
logger.info("Writing to table: ipc-corrections-probability-matrix")
s3.write_df_to_db(df=probability_matrix,
table_name='ipc-corrections-probability-matrix',
db=db_write, schema=write_schema)
logger.info("Writing table to RS-DB completed!")
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
except Exception as error:
logger.exception(error)
return status
# ======================= SQL QUERIES =======================
Q_REPEATABLE = """
SELECT id AS "drug-id",
CASE
WHEN (category ='chronic' AND "repeatability-index">=40)
OR ("repeatability-index">=80) THEN 1 ELSE 0 END AS "is-repeatable"
FROM "{schema}".drugs
"""
Q_CURRENT_INV_AND_PTR = """
SELECT "drug-id", "store-id", avg(ptr) AS "avg-ptr",
SUM("locked-quantity"+quantity+"locked-for-audit"+"locked-for-transfer"
+"locked-for-check"+"locked-for-return") AS "current-inventory"
FROM "{schema}"."inventory-1"
GROUP BY "store-id", "drug-id"
"""
Q_SOLD_QUANTITY = """
select "store-id", "drug-id", date("created-at") as "created-at",
sum(quantity) as "total-sales-quantity"
from "{schema}".sales
where quantity > 0
and date("created-at") >= date('{start_date}') and date("created-at") <= date('{end_date}')
group by "store-id", "drug-id", "created-at"
"""
Q_MAX = """
SELECT "store-id", "drug-id", "max" FROM "{schema}"."drug-order-info"
"""
Q_COMPOSITION = """
SELECT id AS "drug-id", composition FROM "{schema}".drugs
"""
# ======================= HELPER FUNCTIONS =======================
def pull_data(query, db):
df = db.get_df(query)
df.columns = [c.replace('-', '_') for c in df.columns]
return df
def divide_time(df, start_date, end_date):
''' Adds 28 days bucket label to df for later pivot table division.
quantity0 for last month, quantity3 for the oldest month.'''
df['created_at'] = pd.to_datetime(df['created_at'])
df = df[df['created_at'] >= start_date]
df['divide_time'] = 999 # a proxy for removing possible errors
for i in range(4):
date1 = end_date - pd.DateOffset(days=i * 28)
date2 = end_date - pd.DateOffset(days=(i + 1) * 28)
df['divide_time'] = np.where(((df['created_at'] <= date1)
& (df['created_at'] > date2)), 'quantity' + str(i),
df['divide_time'])
df = df[df['divide_time'] != '999'] #possible errors removed.
return df
def make_flags(df, db, schema, metric):
''' Add quantity0, quantity1, quantity2, quantity3 flags. '''
end_date = dt.date.today() - pd.DateOffset(days=1)
start_date = (end_date - pd.DateOffset(days=(4 * 28))).strftime('%Y-%m-%d')
end_date = pd.to_datetime(end_date)
start_date = pd.to_datetime(start_date)
q = Q_SOLD_QUANTITY.format(schema=schema,
start_date=start_date,
end_date=end_date)
df_flags = pull_data(q, db)
flags = divide_time(df_flags, start_date=start_date, end_date=end_date)
flags = flags.pivot_table(metric, index=['store_id', 'drug_id'],
columns='divide_time', aggfunc=np.sum).fillna(0)
flags = flags.reset_index()
df = pd.merge(df, flags, how='left', on=['store_id', 'drug_id'],
validate='one_to_one').fillna(0)
return df
def append_binary_tags(df, buckets):
''' convert quantity sold/comp sold into binary tags'''
for x in buckets:
df[x + '_b'] = np.where(df[x] > 0, 1, 0)
return df
def output_comps(df, comp_buckets, i):
'''Looks at number of times substitute composition is sold in the four buckets
-1 is appended for drugs for which composition that don't exist.'''
df_comp = df.copy()
df_comp['total_months_comp_sold_'+str(i)] = 0
for x in comp_buckets:
df_comp['total_months_comp_sold_'+str(i)] = df_comp['total_months_comp_sold_'+str(i)] + df_comp[x]
print('columns used to create total_comp_sold are:', comp_buckets)
df_comp = df_comp[['store_id', 'composition', 'total_months_comp_sold_'+str(i)]]
df_comp.loc[((df_comp['composition'] == 'Doesnt exist') | (df_comp['composition'] == '') | (
df_comp['composition'] == '(mg)')), 'total_months_comp_sold_'+str(i)] = -1
df_comp['total_months_comp_sold_'+str(i)] = df_comp['total_months_comp_sold_'+str(i)].astype(int)
return df_comp
def add_features(df, db, schema):
''' add features like repeatable(1 or 0), composition sold (-1,0,1,2,3), current_inv and ptr, max values '''
# merge all features here
# add max values here
df_max = pull_data(Q_MAX.format(schema=schema), db)[['store_id', 'drug_id', 'max']]
df = pd.merge(df, df_max, on=['store_id', 'drug_id'], how='left', validate='one_to_one')
df = df.dropna() # removing drugs with no entry for max values.
# add average ptr and current inventory here
df_current_inv_ptr = pull_data(Q_CURRENT_INV_AND_PTR.format(schema=schema),
db)[['store_id', 'drug_id', 'avg_ptr', 'current_inventory']]
df_current_inv_ptr['avg_ptr'] = df_current_inv_ptr['avg_ptr'].astype(float)
df = pd.merge(df, df_current_inv_ptr, on=['store_id', 'drug_id'],
how='left', validate='one_to_one')
df = df.fillna(0) #fixing na values for ptr and current inventory
# merge repeatability here
df_repeatable = pull_data(Q_REPEATABLE.format(schema=schema), db)
df = pd.merge(df, df_repeatable, on=['drug_id'], how='left', validate='many_to_one')
# merge composition here. two entries for compostion are made.
# 1 is for total historical composition sold. 2 is for total current composition sold.
df_composition = pull_data(Q_COMPOSITION.format(schema=schema), db)
df_composition['composition'] = df_composition['composition'].fillna('Doesnt exist')
df_composition.loc[((df_composition['composition'] == '') | (
df_composition['composition'] == '(mg)')), 'composition'] = 'Doesnt exist'
df = pd.merge(df, df_composition, on=['drug_id'], how='left', validate='many_to_one')
df_comp = df.groupby(['store_id',
'composition']).sum().reset_index()[['store_id',
'composition',
'quantity0',
'quantity1',
'quantity2',
'quantity3']].rename(columns={'quantity0':'comp_quantity0',
'quantity1':'comp_quantity1',
'quantity2':'comp_quantity2',
'quantity3':'comp_quantity3'})
df_comp = append_binary_tags(df_comp,
buckets=['comp_quantity0',
'comp_quantity1',
'comp_quantity2',
'comp_quantity3'])
df_comp_1 = output_comps(df_comp.copy(), comp_buckets=['comp_quantity1_b',
'comp_quantity2_b',
'comp_quantity3_b'], i=1)
df = pd.merge(df, df_comp_1, on=['store_id', 'composition'], how='left', validate='many_to_one')
df_comp_2 = output_comps(df_comp.copy(), comp_buckets=['comp_quantity0_b', 'comp_quantity1_b', 'comp_quantity2_b'], i=2)
df = pd.merge(df, df_comp_2, on=['store_id', 'composition'], how='left', validate='many_to_one')
return df
def add_filters(df):
''' remove 000 and 111 before passing to probability matrix for drugs bucketwise'''
df = df[~(df['historical_bucket'].str.endswith('000'))]
df = df[~(df['historical_bucket'].str.endswith('111'))]
return df
def add_bucket(df, buckets, name):
''' make buckets is_repeatable + total_comp_sold + quantity0 + quantity1+quantity2'''
df_bucket = df.copy()
print('columns to create final buckets are:', buckets)
df_bucket[name] = ''
# only _b wale are considered. Is_repeatable etc. are all
for x in buckets:
df_bucket[x] = df_bucket[x].map(int)
df_bucket[name] = df_bucket[name] + df_bucket[x].map(str)
df[name] = df_bucket[name]
return df_bucket
def build_probability_matrix(dfi):
''' build cumulative probability matrix. quantity0 means last month quantity sold'''
df = dfi.copy()
df['sold_analysis_month_flag'] = np.where(df['quantity0'] > 0, 1, 0)
df = df.groupby(['historical_bucket'], as_index=False).agg({'sold_analysis_month_flag': 'mean',
'drug_id': 'count'})
df.rename({'sold_analysis_month_flag': 'selling_probability',
'drug_id': 'total_drugs_in_bucket'}, axis=1, inplace=True)
df = df.sort_values(by=['selling_probability'], ascending=False)
df['sold_cumm'] = (df['selling_probability'] * df['total_drugs_in_bucket']).cumsum()
df['total_cumm'] = (df['total_drugs_in_bucket']).cumsum() #cumulative number of total drugs
df['cumm_prob'] = df['sold_cumm'] / df['total_cumm'] #cummulative probability = sold_cumm/total cummulative drugs
df = df.drop(columns=['total_drugs_in_bucket', 'sold_cumm', 'total_cumm'], axis=1)
return df
def calculate_probabilities(df, db, schema):
'''
calculate probabilties for buckets in order as follows
1. is_repeatable.
2. total_months composition sold (i.e. -1,1,2,3).-1 means composition doesn't exist
3. quantity0 is previous month. quantity4 is oldest month:
3.1 previous month - 0
3.2 two months ago - 1
3.3 three months ago - 2
3.4 four months ago - 3
-- 3,2,1 are historical months
-- 2,1,0 are current months
--make flags: adds quantity sold for each month.
--append_binary_tags converts quantity sold into binary tags (1 or 0)
-- add features: appends repeatability, total number of times
composition is sold, current inv and ptr, original max values
-- add bucket: appends bucket in the order of features it is supplied with.
-- add filters removes drugs which aren't used while building the probability matrix (removed 000 and 111 cases from historical)
-- build probability matrix calculates the final probabilities
'''
df = make_flags(df.copy(), db, schema, metric='total_sales_quantity')
df = append_binary_tags(df, buckets=['quantity0', 'quantity1', 'quantity2', 'quantity3'])
df = add_features(df.copy(), db, schema)
df = add_bucket(df.copy(),
buckets=['is_repeatable', 'total_months_comp_sold_1', 'quantity3_b', 'quantity2_b', 'quantity1_b'],
name='historical_bucket')
df = add_bucket(df.copy(),
buckets=['is_repeatable', 'total_months_comp_sold_2', 'quantity2_b', 'quantity1_b', 'quantity0_b'],
name='current_bucket')
#add moving average for last 3 months and current 3 months
df['current_ma_3_months'] = (df['quantity0'] + df['quantity1'] + df['quantity2']) / 3
df['historical_ma_3_months'] = (df['quantity1'] + df['quantity2'] + df['quantity3']) / 3
#make separation between moving average less than 2 and greater than 2 based on moving average of historical months. (month3, month2,month1)
df['historical_flag_ma_less_than_2'] = np.where(df['historical_ma_3_months'] < 2, 1, 0)
df['historical_flag_ma_greater_than_5'] = np.where(df['historical_ma_3_months'] > 5, 1, 0)
df['current_flag_ma_less_than_2'] = np.where(df['current_ma_3_months'] < 2, 1, 0)
#1 is for ma less than 2.
#2 is for ma greater than 2
hist_df1 = df[df['historical_flag_ma_less_than_2'] == 1]
hist_df2 = df[((df['historical_flag_ma_less_than_2'] == 0) & (df['historical_flag_ma_greater_than_5'] == 0))]
# this would remove 000 and 111 drugs before removing historical buckets.
# Done separately because some buckets in historical might be 111 which are not 111 in current bucket
hist_df1 = add_filters(hist_df1)
hist_df2 = add_filters(hist_df2)
probability_matrix_1 = build_probability_matrix(hist_df1)
probability_matrix_2 = build_probability_matrix(hist_df2)
#drugs which are 111 and 000 in current buckets are removed.'
df_eligible = df[~((df['current_bucket'].str.endswith('000')) | (df['current_bucket'].str.endswith('111')))]
df_eligible_1 = df_eligible[(df_eligible['current_flag_ma_less_than_2'] == 1)]
df_eligible_2 = df_eligible[(df_eligible['current_flag_ma_less_than_2'] == 0)]
df_eligible_1 = df_eligible_1.drop(['historical_bucket'], axis=1)
df_eligible_2 = df_eligible_2.drop(['historical_bucket'], axis=1)
#mapping historical bucket probabilites onto current buckets
df_eligible_1 = pd.merge(df_eligible_1, probability_matrix_1, how='left', left_on=['current_bucket'], right_on=['historical_bucket'], validate='many_to_one')
df_eligible_2 = pd.merge(df_eligible_2, probability_matrix_2, how='left', left_on=['current_bucket'], right_on=['historical_bucket'], validate='many_to_one')
matrix = pd.concat([df_eligible_1, df_eligible_2])
# add relevant variables
matrix['corrected_max'] = np.where(matrix['max'] != 0, matrix['max'], np.round(matrix['current_ma_3_months']))
matrix['corrected_max'] = np.where(matrix['current_flag_ma_less_than_2'] == 1, 1, # put default value of MA<2 as 1.
matrix['corrected_max'])
matrix['inv_impact'] = np.where(matrix['current_inventory'] >= matrix['corrected_max'], 0,
(matrix['corrected_max'] - matrix['current_inventory']) * matrix['avg_ptr'])
matrix['max_impact'] = matrix['corrected_max'] * matrix['avg_ptr']
matrix['cumm_prob'] = np.round(matrix['cumm_prob'], 2)
matrix['selling_probability'] = np.round(matrix['selling_probability'], 2)
matrix = matrix[['store_id', 'drug_id', 'composition', 'max', 'avg_ptr', 'current_inventory',
'current_ma_3_months', 'is_repeatable', 'total_months_comp_sold_2', 'quantity2',
'quantity1', 'quantity0', 'current_bucket', 'current_flag_ma_less_than_2',
'selling_probability', 'cumm_prob', 'corrected_max', 'inv_impact', 'max_impact']]
#separating 111 for further analysis.
matrix_111 = df[df['current_bucket'].str.endswith('111')][['store_id', 'drug_id','composition','avg_ptr',
'current_inventory', 'quantity0', 'quantity1',
'quantity2', 'max', 'current_ma_3_months']]
matrix_111['corrected_max'] = np.where(matrix_111['max'] == 0, np.round(matrix_111['current_ma_3_months']), matrix_111['max'])
matrix_111['inv_impact'] = np.where(matrix_111['current_inventory'] >= matrix_111['corrected_max'], 0,
(matrix_111['corrected_max'] - matrix_111['current_inventory']) * matrix_111['avg_ptr'])
matrix_111['max_impact'] = matrix_111['corrected_max'] * matrix_111['avg_ptr']
# quantity0 is prev month. quantity1 is two months ago and so on.
matrix = matrix.rename(columns={"quantity0": "quantity_sold_0",
"quantity1": "quantity_sold_1",
"quantity2": "quantity_sold_2",
"quantity3": "quantity_sold_3",
'max': 'original_max',
'is_repeatable': 'bucket_flag_is_repeatable',
'total_months_comp_sold_2': 'bucket_flag_total_months_comp_sold',
'quantity0_b': 'bucket_flag_quantity0_b',
'quantity1_b': 'bucket_flag_quantity1_b',
'quantity2_b': 'bucket_flag_quantity2_b',
'quantity3_b': 'bucket_flag_quantity3_b'
})
matrix_111 = matrix_111.rename(columns={"quantity0": "quantity_sold_0",
"quantity1": "quantity_sold_1",
"quantity2": "quantity_sold_2",
'current_ma_3_months':"ma_3_months",
"max":"original_max"})
return matrix, matrix_111, probability_matrix_1, probability_matrix_2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="Y", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
""" calling the main function """
status = main(debug_mode, rs_db_read, rs_db_write, read_schema,
write_schema, s3, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
reset_date = dt.date.today().strftime("%Y-%m-%d")
email.send_email_file(
subject=f"IPC V3 Corrections Matrix (GLUE-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Job Params: {args}
""",
to_emails=email_to)
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ipc_correction_matrix/probability_matrix_main.py | probability_matrix_main.py |
import argparse
import sys
import os
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, MySQL
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-mtp', '--main_table_prefix', default="NA", type=str, required=False)
parser.add_argument('-ttp', '--temp_table_prefix', default="pre", type=str, required=False)
parser.add_argument('-bs', '--batch_size', default=25000, type=int, required=False)
parser.add_argument('-td', '--total_drugs', default=125000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
batch_size = args.batch_size
total_drugs = args.total_drugs
main_table_prefix = args.main_table_prefix
temp_table_prefix = f"-{args.temp_table_prefix}"
main_table_prefix = "" if main_table_prefix == "NA" else main_table_prefix
os.environ['env'] = env
logger = get_logger()
table_name = "drug-unique-composition-mapping"
""" Setting the schema name as per the env """
if env == "dev":
ms_source_schema = "test-generico"
elif env == "stage":
ms_source_schema = "test-generico"
elif env == "prod":
ms_source_schema = "prod2-generico"
else:
raise Exception("Set the env first!")
# rs_source_schema = "test-generico" if env == "stage" else "prod2-generico"
# ms_source_schema = "test-generico" if env == "stage" else "prod2-generico"
ms_target_schema = ms_source_schema
temp_table_name = f"`{ms_target_schema}`.`{table_name}{temp_table_prefix}`"
main_table_name = f"`{ms_target_schema}`.`{table_name}{main_table_prefix}`"
logger.info(f"temp_table_name: {temp_table_name}")
logger.info(f"main_table_name: {main_table_name}")
def get_drug_groups_mysql(mysql_read_db, discard_na_mol_drugs=False, batch=1, batch_size=10000):
"""
if discard_na_mol_drugs = False --> Considers the strengths as 0(zero) for unit-type 'NA' molecules
if discard_na_mol_drugs = True --> Discards the drugs from substitution logic all together
where unit-type of one or many molecule is 'NA'
"""
# this filter to discard drugs, which have one or many molecule unit-type as 'NA'
filter_na_mol_drugs = ""
if discard_na_mol_drugs:
filter_na_mol_drugs = f"""
where
d.`composition-master-id` not in (
select
DISTINCT `composition-master-id`
from
`{ms_source_schema}`.`composition-master-molecules-master-mapping`
where
(`unit-type-value` = '' or `unit-type` = 'NA'))
"""
query = f"""
SELECT
`drug-id`,
GROUP_CONCAT(DISTINCT `molecule-combination` ORDER BY `molecule-combination`) as combination,
md5(GROUP_CONCAT(DISTINCT `molecule-combination` ORDER BY `molecule-combination`)) as `group`
from
(
SELECT
`drug-id`,
CONCAT(' name_or_group:' , dm.`molecule-group-or-name` ,
' strength:' , dm.`unit-type-value` , dm.`unit-type`) as "molecule-combination"
from
(
select
d.id as `drug-id`,
case
when (mm.`molecule-group` = ''
or mm.`molecule-group` is null) then mm.id
else mm.`molecule-group`
end as `molecule-group-or-name`,
case when cmmmm.`unit-type-value` = '' then 0 else
cmmmm.`unit-type-value` end as `unit-type-value`,
cmmmm.`unit-type`
from
`{ms_source_schema}`.drugs d
inner join `{ms_source_schema}`.`composition-master-molecules-master-mapping` cmmmm on
d.`composition-master-id` = cmmmm.`composition-master-id`
inner join `{ms_source_schema}`.`molecule-master` mm on
mm.id = cmmmm.`molecule-master-id`
{filter_na_mol_drugs}
) dm
) a
group by
a.`drug-id`
order by
a.`drug-id`
LIMIT {batch_size} OFFSET {(batch - 1) * batch_size};
"""
logger.info(f"query to get the data: {query}")
df = pd.read_sql_query(con=mysql_read_db.connection, sql=query)
return df
mysql_write_db = MySQL(read_only=False)
mysql_write_db.open_connection()
# Truncate the temp table before starting
query = f""" delete from {temp_table_name};"""
mysql_write_db.engine.execute(query)
logger.info(f"deleted from temp table, query: {query}")
# drug_group_df = get_drug_groups_redshift()
# drug_group_df = get_drug_groups_mysql(discard_na_mol_drugs=False) # to discard NA moles drugs
drug_group_df = pd.DataFrame()
mysql_read_db = MySQL()
mysql_read_db.open_connection()
# query = "SELECT count(id) as `drug-count` FROM `{ms_source_schema}`.drugs "
# c_df = pd.read_sql_query(con=mysql_read_db.connection, sql=query)
# total_drugs = 125000
for i in range(1, round(total_drugs / batch_size) + 1):
temp_df = get_drug_groups_mysql(mysql_read_db=mysql_read_db, discard_na_mol_drugs=False,
batch=i, batch_size=batch_size) # to consider NA moles drugs
drug_group_df = pd.concat([drug_group_df, temp_df])
logger.info(f"fetched batch: {i}")
mysql_read_db.close()
total_count = len(drug_group_df)
logger.info(f"Total drug count: {total_count}")
# store the data in the temp table
drug_group_df.to_sql(
con=mysql_write_db.engine, name=f"{table_name}{temp_table_prefix}", schema=ms_target_schema,
if_exists="append", chunksize=500, index=False)
# # Delete the data from temp table which is already present in main table
# query = f""" DELETE FROM t1 USING {temp_table_name} t1 INNER JOIN {main_table_name} t2 ON
# ( t1.`drug-id` = t2.`drug-id` and t1.group = t2.group); """
#
# response = mysql_write_db.engine.execute(query)
# present_correct_count = response.rowcount
# logger.info(f"Correct drug-ids count: {present_correct_count}")
#
# # Delete the incorrect substitutes from main table
# query = f""" DELETE FROM t1 USING {main_table_name} t1 INNER JOIN {temp_table_name} t2 ON
# ( t1.`drug-id` = t2.`drug-id` );"""
#
# response = mysql_write_db.engine.execute(query)
# present_incorrect_count = response.rowcount
# logger.info(f"Incorrect drug-ids count: {present_incorrect_count}")
logger.info("Delete main table: start")
# Method 2: delete all the records from main and refill #
query = f""" delete from {main_table_name};"""
mysql_write_db.engine.execute(query)
# Now Insert the records in main table
query = f""" INSERT INTO {main_table_name} (`drug-id`, `combination`, `group`)
SELECT `drug-id`, `combination`, `group` FROM {temp_table_name} """
response = mysql_write_db.engine.execute(query)
new_insert_count = response.rowcount
logger.info(f"Insert/Update drug-ids count: {new_insert_count}")
mysql_write_db.close()
# if total_count == present_correct_count + new_insert_count:
# logger.info("Drug substitute data updated successfully")
# else:
# raise Exception("Data count mismatch") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drug-substitution-mapping/drug-unique-composition-mapping.py | drug-unique-composition-mapping.py |
import argparse
import datetime
import os
import sys
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import MySQL
from zeno_etl_libs.helper.aws.s3 import S3
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-mtp', '--main_table_prefix', default="NA", type=str, required=False)
parser.add_argument('-ttp', '--temp_table_prefix', default="pre", type=str, required=False)
parser.add_argument('-bs', '--batch_size', default=10000, type=int, required=False)
parser.add_argument('-td', '--total_drugs', default=100000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
batch_size = args.batch_size
total_drugs = args.total_drugs
main_table_prefix = args.main_table_prefix
temp_table_prefix = f"-{args.temp_table_prefix}"
main_table_prefix = "" if main_table_prefix == "NA" else main_table_prefix
os.environ['env'] = env
logger = get_logger()
table_name = "drug-substitution-mapping"
""" Setting the schema name as per the env """
if env == "dev":
ms_source_schema = "test-generico"
elif env == "stage":
ms_source_schema = "test-generico"
elif env == "prod":
ms_source_schema = "prod2-generico"
else:
raise Exception("Set the env first!")
# rs_source_schema = "test-generico" if env == "stage" else "prod2-generico"
# ms_source_schema = "test-generico" if env == "stage" else "prod2-generico"
ms_target_schema = ms_source_schema
temp_table_name = f"`{ms_target_schema}`.`{table_name}{temp_table_prefix}`"
main_table_name = f"`{ms_target_schema}`.`{table_name}{main_table_prefix}`"
logger.info(f"temp_table_name: {temp_table_name}")
logger.info(f"main_table_name: {main_table_name}")
def get_drug_groups_mysql(mysql_read_db, discard_na_mol_drugs=False, batch=1, batch_size=10000):
"""
if discard_na_mol_drugs = False --> Considers the strengths as 0(zero) for unit-type 'NA' molecules
if discard_na_mol_drugs = True --> Discards the drugs from substitution logic all together
where unit-type of one or many molecule is 'NA'
"""
# this filter to discard drugs, which have one or many molecule unit-type as 'NA'
filter_na_mol_drugs = ""
if discard_na_mol_drugs:
filter_na_mol_drugs = f"""
where
d.`composition-master-id` not in (
select
DISTINCT `composition-master-id`
from
`{ms_source_schema}`.`composition-master-molecules-master-mapping`
where
(`unit-type-value` = '' or `unit-type` = 'NA') )
"""
query = f"""
SELECT
`drug-id`,
GROUP_CONCAT(DISTINCT `molecule-combination` ORDER BY `molecule-combination`) as combination,
md5(GROUP_CONCAT(DISTINCT `molecule-combination`)) as `group`
from
(
SELECT
`drug-id`,
CONCAT(' name_or_group:' , dm.`molecule-group-or-name` ,
' strength:' , dm.`strength-in-smallest-unit` , dm.`smallest-unit` ,
' release-pattern:' , `release-pattern-group` ,
' available-in:' , `available-in-group`) as "molecule-combination"
from
(
select
d.id as `drug-id`,
case
when (mm.`molecule-group` = ''
or mm.`molecule-group` is null) then mm.id
else mm.`molecule-group`
end as `molecule-group-or-name`,
case when cmmmm.`unit-type-value` = '' then 0 else
cmmmm.`unit-type-value` * uomm.`smallest-unit-value` end as `strength-in-smallest-unit`,
uomm.`smallest-unit` as `smallest-unit`,
rpm.`group` as `release-pattern-group`,
aidfm.`available-group` as `available-in-group`
from
`{ms_source_schema}`.drugs d
inner join `{ms_source_schema}`.`composition-master-molecules-master-mapping` cmmmm on
d.`composition-master-id` = cmmmm.`composition-master-id`
inner join `{ms_source_schema}`.`molecule-master` mm on
mm.id = cmmmm.`molecule-master-id`
inner join `{ms_source_schema}`.`drug-molecule-release` dmr on
d.id = dmr.`drug-id`
and cmmmm.`molecule-master-id` = dmr.`molecule-master-id`
inner join `{ms_source_schema}`.`available-in-group-mapping` aidfm on
d.`available-in` = aidfm.`available-in`
inner join `{ms_source_schema}`.`release-pattern-master` rpm on
dmr.`release` = rpm.name
inner join `{ms_source_schema}`.`unit-of-measurement-master` uomm on
cmmmm.`unit-type` = uomm.unit
{filter_na_mol_drugs}
) dm ) a
group by
a.`drug-id`
order by
a.`drug-id`
LIMIT {batch_size} OFFSET {(batch - 1) * batch_size};
"""
logger.info(f"query to get the data: {query}")
df = pd.read_sql_query(con=mysql_read_db.connection, sql=query)
return df
mysql_write_db = MySQL(read_only=False)
mysql_write_db.open_connection()
# Truncate the temp table before starting
query = f""" delete from {temp_table_name};"""
mysql_write_db.engine.execute(query)
logger.info(f"deleted from temp table, query: {query}")
# drug_group_df = get_drug_groups_redshift()
# drug_group_df = get_drug_groups_mysql(discard_na_mol_drugs=False) # to discard NA moles drugs
drug_group_df = pd.DataFrame()
mysql_read_db = MySQL()
mysql_read_db.open_connection()
# query = "SELECT count(id) as `drug-count` FROM `{ms_source_schema}`.drugs "
# c_df = pd.read_sql_query(con=mysql_read_db.connection, sql=query)
s3 = S3(bucket_name=f"{env}-zeno-s3-db")
# total_drugs = 125000
for i in range(1, round(total_drugs / batch_size) + 1):
temp_df = get_drug_groups_mysql(mysql_read_db=mysql_read_db, discard_na_mol_drugs=True, batch=i,
batch_size=batch_size) # to consider NA moles drugs
drug_group_df = pd.concat([drug_group_df, temp_df])
logger.info(f"fetched batch: {i}")
mysql_read_db.close()
total_count = len(drug_group_df)
logger.info(f"Total drug count: {total_count}")
# store the data in the temp table
drug_group_df.to_sql(
con=mysql_write_db.engine, name=f"{table_name}{temp_table_prefix}", schema=ms_target_schema,
if_exists="append", chunksize=500, index=False)
# # This is correct query but lock the transaction.
# query = f""" DELETE FROM t1 USING {main_table_name} t1 LEFT JOIN {temp_table_name} t2 ON
# t1.`drug-id` = t2.`drug-id` where t2.`drug-id` is null ;"""
# # Delete the drugs which are NOT in the temp table.
# query = f""" DELETE FROM t1 USING {main_table_name} t1 JOIN {temp_table_name} t2 ON
# ( t1.`drug-id` = t2.`drug-id` and t2.`drug-id` is null );"""
# response = mysql_write_db.engine.execute(query)
# logger.info(
# f"Delete the records from main table, which are absent in temp table: {response.rowcount}")
#
# # Delete the data from temp table which is already present in main table
# query = f""" DELETE FROM t1 USING {temp_table_name} t1 INNER JOIN {main_table_name} t2 ON
# ( t1.`drug-id` = t2.`drug-id` and t1.group = t2.group); """
#
# response = mysql_write_db.engine.execute(query)
# present_correct_count = response.rowcount
# logger.info(f"Correct drug-ids count: {present_correct_count}")
#
# # Delete the incorrect substitutes from main table
# query = f""" DELETE FROM t1 USING {main_table_name} t1 INNER JOIN {temp_table_name} t2 ON
# ( t1.`drug-id` = t2.`drug-id` );"""
#
# response = mysql_write_db.engine.execute(query)
# present_incorrect_count = response.rowcount
# logger.info(f"Incorrect drug-ids count: {present_incorrect_count}")
logger.info("Delete main table: start")
# Method 2: delete all the records from main and refill #
query = f""" delete from {main_table_name};"""
mysql_write_db.engine.execute(query)
# Now Insert the records in main table
query = f""" INSERT INTO {main_table_name} (`drug-id`, `combination`, `group`)
SELECT `drug-id`, `combination`, `group` FROM {temp_table_name} """
response = mysql_write_db.engine.execute(query)
new_insert_count = response.rowcount
logger.info(f"Insert/Update drug-ids count: {new_insert_count}")
# log the changes
query = f""" SELECT `drug-id`, `combination`, `group` FROM {temp_table_name}; """
df = pd.read_sql_query(con=mysql_write_db.connection, sql=query)
if not df.empty:
start_ts = datetime.datetime.now() + datetime.timedelta(days=-1)
start_ts = start_ts.strftime("%Y-%m-%d-%H-%M-%S")
f_name = 'drug-substitution-mapping-change-log/{}/data.csv'.format(
start_ts[:16].replace("-", "/"))
uri = s3.save_df_to_s3(df=df, file_name=f_name)
print(f"changes have been logged at uri: {uri}")
else:
print("No change detected!")
mysql_write_db.close()
# if total_count == present_correct_count + new_insert_count:
# logger.info("Drug substitute data updated successfully")
# else:
# raise Exception("Data count mismatch") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drug-substitution-mapping/drug-substitution-mapping.py | drug-substitution-mapping.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'happay-expenses'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# max of data
happay_q = """
select
date(max("expense-created-at")) max_exp
from
"prod2-generico"."happay-expenses"
"""
max_exp_date = rs_db.get_df(happay_q)
max_exp_date['max_exp'].fillna(np.nan ,inplace=True)
logger.info(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
logger.info(max_exp_date)
# Read from gsheet
gs = GoogleSheet()
happay_data = gs.download(data={
"spreadsheet_id": "1XTTsiGEJgX7lpgnVLgkEs0SQojz-F06dICAuqWZKLtg",
"sheet_name": "Happay Data",
"listedFields": []
})
happay_expenses = pd.DataFrame(happay_data)
happay_expenses[['expense_created_at', 'report_created_at']] = happay_expenses[
['expense_created_at', 'report_created_at']] \
.apply(pd.to_datetime, errors='coerce')
# params
if full_run or max_exp_date == 'NaN':
start = '2017-05-13'
else:
start = max_exp_date
start = dateutil.parser.parse(start)
happay_expenses = happay_expenses[(happay_expenses['expense_created_at'] >= start)]
# etl
happay_expenses['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
happay_expenses['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
happay_expenses['created-by'] = 'etl-automation'
happay_expenses['updated-by'] = 'etl-automation'
happay_expenses.columns = [c.replace('_', '-') for c in happay_expenses.columns]
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=happay_expenses[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/happay_expenses/happay_expenses.py | happay_expenses.py |
import os
import sys
sys.path.append('../../../..')
import numpy as np
import pandas as pd
import datetime as dt
import argparse
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from dateutil.tz import gettz
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
# composition wise data for the last 30 days
query = '''
select
x1.composition,
x1."overall_qty",
round((x1."gen_quantity"/(x1."gen_quantity"+x1."eth_quantity"))*100,2) as "gen_share",
round((x1."stores_comp_sold"/x1."live_stores")*100,2) as "stores_share_comp_sold",
round((x1."gen_stores"/x1."stores_comp_sold")*100,2) as "%_stores_gen_sold" ,
round((x1."sub_qty"/x1."sub_base")*100,2) as "substitution_percentage",
-- round((x1."not_inv_base"/x1."overall_qty")*100,2) as "not_in_inv_%",
round(((x1."gen_mrp"-x1."gen_sp")/x1."gen_mrp")*100,2) as "%_gen_discount",
round(((x1."eth_mrp"-x1."eth_sp")/x1."eth_mrp")*100,2) as "%_eth_discount",
x1."gen_mrp",
x1."eth_mrp",
x1."gen_sp",
x1."eth_sp",
x1."no_gen_drug",
x1."no_eth_drug",
x1."GA_flag"
from
(select
s.composition ,
sum(s."net-quantity") as "overall_qty",
sum(case when s."type" = 'generic' then convert(float,s."net-quantity") end) as "gen_quantity",
sum(case when s."type" = 'ethical' then convert(float,s."net-quantity") end) as "eth_quantity",
convert(float, count(distinct "store-id")) as "stores_comp_sold",
convert(float, count(distinct(case when s."type" = 'generic' then s."store-id" end))) as "gen_stores",
convert(float, count(distinct(case when s."type" = 'ethical' then s."store-id" end))) as "eth_stores",
convert(float, count(distinct(case when s."substitution-status"= 'not-in-inventory' then s."store-id" end))) as "not_inv_stores",
convert(float, (select count(distinct "store-id") from "prod2-generico"."prod2-generico".sales s where "created-date" >= dateadd(day, -30, current_date))) as "live_stores",
sum(case when s."substitution-status" in ('substituted', 'not-substituted') then convert(float,"net-quantity") end) as "sub_base",
sum(case when s."substitution-status" in ('substituted') then convert(float,"net-quantity") end) as "sub_qty",
sum(case when s."substitution-status" in ('not-in-inventory', 'generic-unavailable') then convert(float,"net-quantity") end) as "not_inv_base",
avg(case when s."type" = 'generic' then mrp end) as "gen_mrp",
avg(case when s."type" = 'ethical' then mrp end) as "eth_mrp",
avg(case when s."type" = 'ethical' then "net-rate" end) as "eth_sp",
avg(case when s."type" = 'generic' then "net-rate" end) as "gen_sp",
count(distinct (case when s."type" = 'generic' then s."drug-id" end)) as "no_gen_drug",
count(distinct (case when s."type" = 'ethical' then s."drug-id" end)) as "no_eth_drug",
max(case when s."company-id"=6984 then 1 else 0 end) as "GA_flag"
from
"prod2-generico"."prod2-generico".sales s
where
s."created-date" >= dateadd(day, -30, current_date)
and s.composition in (select composition from "prod2-generico"."prod2-generico".drugs d where d."type"= 'generic')
and s."net-quantity">0
group by 1)x1 '''
comp_wise_data = rs_db.get_df(query)
comp_wise_data.fillna(0, inplace=True)
# Not in inv data store store store wise
query = '''
select
x1.composition,
(sum(case when x1."gen_flag" =0 then convert(float,x1."not_inv_tot_qty") end)/sum(case when x1."gen_flag" =0 then convert(float, x1."tot_qty") end))*100 as "not_inv_per_gen_not_sold",
(sum(case when x1."gen_flag" =1 then convert(float,x1."not_inv_tot_qty") end)/sum(case when x1."gen_flag" =1 then convert(float, x1."tot_qty") end))*100 as "not_inv_per_gen_sold",
(sum(case when x1."gen_flag" =1 then convert(float,x1."sub_qty") end)/sum(case when x1."gen_flag" =1 then convert(float, x1."sub_qty_base") end))*100 as "sub_per_str_whr_gen_sold",
avg(case when x1."gen_flag" = 1 then "opportunity" end)*100 as "opp_gen_sold"
from
(select
s."store-id" ,
s.composition ,
nvl(sum(case when s."substitution-status" in ('substituted') then convert(float,s."net-quantity") end ),0) as "sub_qty",
nvl(sum(case when s."substitution-status" in ('not-substituted') then s."net-quantity" end ),0) as "not_sub_qty",
nvl(sum(case when s."substitution-status" in ('not-in-inventory') then s."net-quantity" end ),0) as "not_inv_qty",
nvl(sum(case when s."substitution-status" in ('not-in-inventory','generic-unavailable') then s."net-quantity" end ),0) as "not_inv_tot_qty",
nvl(sum(case when s."substitution-status" in ('substituted','not-substituted') then s."net-quantity" end ),0) as "sub_qty_base",
nvl(sum(case when s."substitution-status" in ('substituted','not-substituted','not-in-inventory','generic-unavailable') then s."net-quantity" end),0) as "tot_qty",
(case when "sub_qty">0 then 1 else 0 end) as "gen_flag",
nvl(round(convert(float,"sub_qty")/nullif(convert(float,"sub_qty_base"),0),2),0) as "sub_%",
ceil ((convert(float,"not_inv_qty")*"sub_%")+convert(float,"sub_qty")) as "sub_sim",
ceil ((convert(float,"not_inv_qty")*"sub_%")+convert(float,"not_sub_qty")) as "not_sub_sim",
nvl(round(convert(float, "sub_sim")/nullif(convert(float, "tot_qty"),0),2),0) as "opportunity"
from
"prod2-generico"."prod2-generico".sales s left join
"prod2-generico"."prod2-generico".drugs d2 on s."drug-id"= d2.id
where
s.composition in (select composition from "prod2-generico"."prod2-generico".drugs d where d."type" = 'generic')
and s."created-date" >= dateadd(day, -30, current_date)
group by s."store-id",s.composition )x1
where x1."tot_qty">0
group by x1.composition'''
not_inv_data = rs_db.get_df(query)
not_inv_data.fillna(0, inplace=True)
comp_wise_data = pd.merge(left=comp_wise_data, right=not_inv_data, how='left', on='composition')
# gen comp max set
query = '''
select
d.composition ,
case when sum(doi.max) >0 then 'Yes' else 'No' end as "max_set"
from
"prod2-generico"."prod2-generico"."drug-order-info" doi
left join "prod2-generico"."prod2-generico".drugs d on
doi."drug-id" = d.id
where
d."type" = 'generic'
group by d.composition '''
gen_max_set = rs_db.get_df(query)
comp_wise_data = pd.merge(left = comp_wise_data, right = gen_max_set, how = 'left', on = 'composition')
# gettin the comp from the above table
comp = comp_wise_data.composition.unique()
comp = tuple(comp)
# getting rank 1 drug ids of both ethical and generic of the above compositions
query = f'''
select
"drug-id" as "drug_id", egr.composition, "type", egr.company
from
"prod2-generico"."prod2-generico"."ethical-generic-rank" egr
where
composition in {comp}
and "rank" = 1
and "type" in ('generic', 'ethical') '''
drug_rank = rs_db.get_df(query)
# top 20 companies check
query = f'''
select * from
(
select
s.company ,
rank() over(order by sum(s."net-quantity") desc) as "row"
from
"prod2-generico"."prod2-generico".sales s
where s."year-created-at" > extract(year from current_date)-4
and s."type"= 'generic'
group by 1)x1
where "row"<= 20'''
top_20 = rs_db.get_df(query)
condition = [drug_rank['company'].isin(top_20['company'].unique()),
~drug_rank['company'].isin(top_20['company'].unique())]
choice = [1, 0]
drug_rank['in-top-20'] = np.select(condition, choice, default=0)
top_20_company = drug_rank[drug_rank['type'] == 'generic']
comp_wise_data = comp_wise_data.merge(top_20_company[['composition', 'in-top-20']])
id_list = drug_rank.drug_id.unique()
id_list = tuple(id_list)
# best selling generic drug of the above and ethical drug Margin, mrp, and selling price.
query = f'''
select
x1.composition,
round((x1."eth1_mrp"-x1."eth1_sp")/(x1."eth1_mrp")*100,2) as "%_eth1_discount",
round((x1."gen1_mrp"-x1."gen1_sp")/(x1."gen1_mrp")*100,2) as "%_gen1_discount",
round((x1."gen1_sp"-x1."gen1_pur")/(x1."gen1_sp")*100,2) as "%_gen1_margin",
round((x1."eth1_sp"-x1."eth1_pur")/(x1."eth1_sp")*100,2) as "%_eth1_margin",
x1.gen1_sp,
x1.eth1_sp
from
(select
composition ,
round(avg(case when "type" = 'generic' then mrp end),2) as "gen1_mrp",
round(avg(case when "type" = 'generic' then "net-rate" end),2) as "gen1_sp",
round(avg(case when "type" = 'generic' then "purchase-rate" end),2) as "gen1_pur",
round(avg(case when "type" = 'ethical' then mrp end), 2) as "eth1_mrp",
round(avg(case when "type" = 'ethical' then "net-rate" end),2) as "eth1_sp",
round(avg(case when "type" = 'ethical' then "purchase-rate" end),2) as "eth1_pur"
from "prod2-generico"."prod2-generico".sales s
where "drug-id" in {id_list}
group by 1
having "gen1_sp"> 0 and "eth1_sp">0)x1 '''
comp1_price = rs_db.get_df(query)
merged_df = pd.merge(left=comp_wise_data, right=comp1_price, on='composition')
# Drug exists in WH
query = '''
select
d.composition , w."add-wh"
from
"prod2-generico"."prod2-generico"."wh-sku-subs-master" w
inner join "prod2-generico"."prod2-generico".drugs d on w."drug-id" = d.id
where
w."add-wh" = 'Yes'
and d."type"= 'generic'
group by d.composition, w."add-wh" '''
avail_at_wh = rs_db.get_df(query)
condition = [merged_df['composition'].isin(avail_at_wh['composition'].unique()),
~merged_df['composition'].isin(avail_at_wh['composition'].unique())]
choice = ['Yes', 'No']
merged_df['add-wh'] = np.select(condition, choice, default=0)
# Schedule H1 drug or not
query = '''
select
b."composition"
from
"prod2-generico"."composition-master-molecules-master-mapping" a
inner join "prod2-generico"."composition-master" b
on
a."composition-master-id" = b."id"
inner join "prod2-generico"."molecule-master" c
on
a."molecule-master-id" = c."id"
where
c."schedule" = 'h1'
group by
b."composition" '''
schedule = rs_db.get_df(query)
condition = [merged_df['composition'].isin(schedule['composition'].unique()),
~merged_df['composition'].isin(schedule['composition'].unique())]
choice = ['Yes', 'No']
merged_df['schedule-h1'] = np.select(condition, choice, default=0)
merged_df.columns = [c.replace('_', '-') for c in merged_df.columns]
merged_df['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
merged_df['created-by'] = 'etl-automation'
merged_df['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
merged_df['updated-by'] = 'etl-automation'
# writing the table
table_name = 'actionable-compositions'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=merged_df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# finding how many days the gen 1 compositions was oos and at how many stores,
gen1_id = drug_rank[drug_rank['type'] == 'generic']
gen1_id_list = gen1_id.drug_id.unique()
gen1_id_list = tuple(gen1_id_list)
# finding OOS for the above drgu-ids
query = f'''
select
o."store-id" as "store_id",
o."store-name",
o."drug-id" as "drug_id",
d."drug-name" as "drug_name",
d.composition ,
sum(o."oos-count") as "oos_days",
o."as-active" as "as_active",
o."max-set"
from
"prod2-generico"."prod2-generico"."out-of-shelf-drug-level" o
left join "prod2-generico"."prod2-generico".drugs d on o."drug-id" = d.id
where
o."drug-id" in {gen1_id_list}
and "max-set" = 'Y'
and date(o."closing-date") >= dateadd(day, -30, current_date)
group by 1,2,3,4,5,7,8; '''
oos_data = rs_db.get_df(query)
oos_data.columns = [c.replace('_', '-') for c in oos_data.columns]
df = merged_df[['composition', 'gen-share']]
oos_data = oos_data.merge(df[['composition', 'gen-share']])
oos_data['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
oos_data['created-by'] = 'etl-automation'
oos_data['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
oos_data['updated-by'] = 'etl-automation'
# writing the table
table_name = 'actionable-gen1-oos'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=oos_data[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# store wise substitution
query = f'''
select
x1."store-id",
x1."store-name",
x1.composition,
x1."created-by",
x1."overall_qty",
round((x1."gen_quantity"/(x1."gen_quantity"+x1."eth_quantity"))*100,2) as "gen_share",
round((x1."sub_qty"/x1."sub_base")*100,2) as "substitution_percentage",
round((x1."not_inv_base"/x1."tot_qty")*100,2) as "not_inv_percentage"
from
(select
"store-id" ,
"store-name" ,
composition ,
s."created-by" ,
sum(s."net-quantity") as "overall_qty",
sum(case when s."type" = 'generic' then convert(float,s."net-quantity") end) as "gen_quantity",
sum(case when s."type" = 'ethical' then convert(float,s."net-quantity") end) as "eth_quantity",
sum(case when s."substitution-status" in ('substituted') then convert(float,"net-quantity") end) as "sub_qty",
sum(case when s."substitution-status" in ('substituted', 'not-substituted') then convert(float,"net-quantity") end) as "sub_base",
sum(case when s."substitution-status" in ('substituted','not-substituted','not-in-inventory', 'generic-not-available') then convert(float,"net-quantity") end) as "tot_qty",
sum(case when s."substitution-status" in ('not-in-inventory', 'generic-not-available') then convert(float,"net-quantity") end) as "not_inv_base"
from
"prod2-generico"."prod2-generico".sales s
where
s.composition in (select composition from "prod2-generico"."prod2-generico".drugs d where d."type"= 'generic')
and s."created-date" >= dateadd(day, -30, current_date)
group by 1,2,3,4
having "gen_quantity">0 and "eth_quantity" >0 )x1 '''
store_wise_sub = rs_db.get_df(query)
store_wise_sub.columns = [c.replace('_', '-') for c in store_wise_sub.columns]
store_wise_sub['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
store_wise_sub['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
store_wise_sub['updated-by'] = 'etl-automation'
# writing table
table_name = 'actionable-store-sub'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=store_wise_sub[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/substitution/actionable-substitution.py | actionable-substitution.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
import calendar
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
sla_input = pd.read_csv(s3.download_file_from_s3(file_name="SLA/SLA_Format.csv"))
sla_input["order_time"] = pd.to_datetime(sla_input["order_time"], format='%I:%M:%S %p').dt.time
sla_input['invoice_time'] = pd.to_datetime(sla_input['invoice_time'], format='%I:%M:%S %p').dt.time
sla_input['dispatch_time'] = pd.to_datetime(sla_input['dispatch_time'], format='%I:%M:%S %p').dt.time
sla_input['delivery_time'] = pd.to_datetime(sla_input['delivery_time'], format='%I:%M:%S %p').dt.time
sla_input['reorder_time'] = pd.to_datetime(sla_input['reorder_time'], format='%I:%M:%S %p').dt.time
sla_input['store_id'] = sla_input['store_id'].astype(str)
sla_input['city_id'] = sla_input['city_id'].astype(str)
sla_input['city_id'] = sla_input['city_id'].str.split('-')
sla_input = sla_input.explode('city_id')
sla_input['store_id'] = sla_input['store_id'].str.split('-')
sla_input = sla_input.explode('store_id')
sla_input['store_id'] = sla_input['store_id'].astype(int)
sla_input['city_id'] = sla_input['city_id'].astype(int)
as_ms_query = '''
select
a.id as "short-book-id",
a."patient-id",
getdate() as "refreshed_at",
a."store-name" as "store-name",
sm."franchisee-name" as "franchisee-name",
a."drug-name" as "drug-name",
a."as-ms" as "AS/MS",
a."status" as "sb-status",
a."requested-quantity" as "requested-quantity",
a."quantity" as "quantity",
a."required-quantity" as "required-quantity",
a."created-at",
Date(a."ordered-time") as "ordered-at",
Date(a."invoiced-at") as "invoiced-at",
Date(a."dispatched-at") as "Dispatched-at",
Date(a."store-delivered-at") as "received-at",
Date(a."completed-at") as "completed-at",
Date(a."delivered-at") as "delivered-at",
a."created-at" as "created-time",
a."ordered-time" as "ordered-time",
a."invoiced-at" as "invoiced-time",
a."dispatched-at" as "dispatch-time",
a."delivered-at" as "delivered-time",
a."completed-at" as "completed-time",
a."decline-reason" as "decline reason",
a."re-ordered-at" as "re-ordered-at",
a."type",
a."store-id",
a."drug-id",
a."company",
a."preferred-distributor-name" as "preferred dist",
a."drug-grade",
'' as "distributor type",
a."preferred-distributor-id" as "preferred-distributor-id",
a."received-distributor-name" as "received distributor",
a."recieved-distributor-id",
sm."old-new-static" as "store-type",
a."forward-dc-id",
a."dc-name" as "dc_name",
a."store-delivered-at" as "store_received_at",
a."purchase-rate" as "purchase-rate",
sm."line-manager" as "line_manager",
sm.abo,
sm.city as "city-store-master",
zc."name" as "city",
s."city-id",
sm."store-b2b" as "store_b2b",
s."franchisee-id" as "franchise_id",
case when s."franchisee-id"=1 then 'COCO'
when s."franchisee-id"!= 1 then 'FOFO'
else 'default' end as "franchisee_flag",
sm."franchisee-name" as "franchise_name",
sm."opened-at",
a."franchisee-short-book" ,
bl."buyer-name" as "Buyer",
sbol."status-log",
case
when sbol."status-log" in ('presaved,lost') then 'FOFO-partner-rejected'
else a.status
end as "status",
fofo_approved_at."presaved_approved_at"
from
"prod2-generico"."as-ms" a
left join "prod2-generico"."stores-master" sm
on
sm.id = a."store-id"
left join "prod2-generico"."buyer-store-mapping" bsm
on
bsm."store-id" = a."store-id"
left join "prod2-generico"."buyers-list" bl
on
bl.id = bsm."buyer-id"
left join "prod2-generico".stores s
on
s.id = a."store-id"
left join "prod2-generico"."zeno-city" zc
on
zc.id = s."city-id"
left join(
select
sbol."short-book-id" ,
listagg(distinct sbol.status,
',') within group (
order by sbol.id) as "status-log"
from
"prod2-generico"."short-book-order-logs" sbol
left join "prod2-generico"."prod2-generico"."short-book-1" sb
on
sbol."short-book-id" = sb.id
where
Date(sb."created-at") >= date(date_trunc('month', current_date) - interval '2 month')
group by
sbol."short-book-id") sbol
on
a.id = sbol."short-book-id"
left join (
select
sbol."short-book-id" ,
min(sbol."created-at") as "presaved_approved_at"
from
"prod2-generico"."prod2-generico"."short-book-order-logs" sbol
left join "prod2-generico"."prod2-generico"."short-book-1" sb2
on
sb2.id = sbol."short-book-id"
left join "prod2-generico"."prod2-generico".stores s2
on
s2.id = sb2."store-id"
where
s2."franchisee-id" != 1
and sbol.status not in ('presaved', 'lost', 'failed', 'declined', 'deleted')
group by
sbol."short-book-id"
)fofo_approved_at
on
fofo_approved_at."short-book-id" = a.id
where
Date(a."created-at") >= date(date_trunc('month', current_date) - interval '1 day')
'''
data = rs_db.get_df(as_ms_query)
logger.info('fetched data')
class As_ms_tat:
def __init__(self,sla_input_main):
self.sla_input_main = sla_input_main
def weekday_calender(self,day):
if day == 0 :
return 'Monday'
elif day == 1:
return 'Tuesday'
elif day ==2:
return 'Wednesday'
elif day == 3:
return 'Thursday'
elif day==4:
return 'Friday'
elif day == 5:
return 'Saturday'
elif day == 6:
return 'Sunday'
def timecheck(self, start_time_sla, end_time_sla, check_time):
if str(start_time_sla) == '0' or str(end_time_sla) == '0':
return 'default_time'
start_time_sla = datetime.datetime.strptime(start_time_sla, '%I:%M:%S %p').time()
end_time_sla = datetime.datetime.strptime(end_time_sla, '%I:%M:%S %p').time()
if check_time >= start_time_sla and check_time <= end_time_sla:
return 'time_found'
else:
return 'time_not_found'
def tat_sla_calculator(self,created_time, pre_saved_approved_at_time,store_id, franchisee, city_id, distributor_id, drug_type):
print(1)
# print(1) np.where, CFR_PR, Np.select
if franchisee == 'COCO':
start_time = created_time
else:
start_time = pre_saved_approved_at_time
if pd.isnull(start_time) or start_time is pd.NaT:
return pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, None
else:
start_time = pd.to_datetime(start_time)
sla_input = self.sla_input_main.copy(deep = True)
store_parameter = False
# start_time = datetime.datetime.strptime(start_time , '%Y-%m-%d %H:%M:%S')
day_in_number = start_time.weekday()
day = self.weekday_calender(day_in_number)
if franchisee in sla_input['franchisee_flag'].unique():
franchisee = franchisee
else:
franchisee = 'default'
sla_input = sla_input[((sla_input['franchisee_flag'] == franchisee) & (sla_input['day'] == day))]
if city_id in sla_input['city_id'].unique():
city = city_id
else:
city = 0
# city = 0
# for city_ids in sla_input['city_id'].unique():
# if str(city_id) in (str(city_ids).split('-')):
# city = city_ids
# city_parameter = True
# break
if store_id in sla_input['store_id'].unique():
store = 0
else:
store = 0
# store = 0
# for store_ids in sla_input['store_id'].unique():
# if str(store_id) in (str(store_ids).split('-')):
# store = store_ids
# store_parameter = True
# break
if store_parameter:
city = 0
sla_input = sla_input[((sla_input['store_id'] == (store)) & (sla_input['city_id'] == (city)))]
if drug_type in sla_input['drug_type'].unique():
drug_type = drug_type
else:
drug_type = 'default'
sla_input = sla_input[((sla_input['drug_type'] == (drug_type)))]
if int(distributor_id) == 8105:
distributor = 'WH'
else:
distributor = 'DC'
if distributor in sla_input['distributor_name'].unique():
distributor = distributor
else:
distributor = 'default'
sla_input = sla_input[((sla_input['distributor_name'] == (distributor)))]
# print(2)
if len(sla_input)>1:
sla_input['timecheck'] = np.vectorize(self.timecheck)(sla_input['start_time'], sla_input['end_time'], start_time.time())
if 'time_found' in sla_input['timecheck'].unique():
sla_input = sla_input[sla_input['timecheck']=='time_found']
else:
sla_input = sla_input[sla_input['timecheck']=='default_time']
sla_input = sla_input.reset_index(drop=True)
order_date = sla_input.loc[0, 'order_date']
order_time = sla_input.loc[0, 'order_time']
order_sla = start_time + datetime.timedelta(days=int(order_date))
order_sla = order_sla.replace(hour=order_time.hour, minute=order_time.minute, second=order_time.second)
invoice_date = sla_input.loc[0, 'invoice_date']
invoice_time = sla_input.loc[0, 'invoice_time']
invoice_sla = start_time + datetime.timedelta(days=int(invoice_date))
invoice_sla = invoice_sla.replace(hour=invoice_time.hour, minute=invoice_time.minute,
second=invoice_time.second)
dispatch_date = sla_input.loc[0, 'dispatch_date']
dispatch_time = sla_input.loc[0, 'dispatch_time']
dispatch_sla = start_time + datetime.timedelta(days=int(dispatch_date))
dispatch_sla = dispatch_sla.replace(hour=dispatch_time.hour, minute=dispatch_time.minute,
second=dispatch_time.second)
delivery_date = sla_input.loc[0, 'delivery_date']
delivery_time = sla_input.loc[0, 'delivery_time']
delivery_sla = start_time + datetime.timedelta(days=int(delivery_date))
delivery_sla = delivery_sla.replace(hour=delivery_time.hour, minute=delivery_time.minute,
second=delivery_time.second)
reorder_date = sla_input.loc[0, 'reorder_date']
reorder_time = sla_input.loc[0, 'reorder_time']
reorder_sla = start_time + datetime.timedelta(days=int(reorder_date))
reorder_sla = reorder_sla.replace(hour=reorder_time.hour, minute=reorder_time.minute,
second=reorder_time.second)
# print(4)
return order_sla, invoice_sla, dispatch_sla, delivery_sla, reorder_sla,sla_input.loc[0, 'id']
def tat_checker(self,ordered_at,order_sla):
if pd.isnull('ordered_at') or ordered_at is pd.NaT or ordered_at is None:
return 'Pending'
elif order_sla<=ordered_at:
return 'ontime'
else:
return 'delayed'
as_ms_tat= As_ms_tat(sla_input)
data['recieved-distributor-id'] = data['recieved-distributor-id'].fillna(0)
#
# logger.info('apply')
#
# data['order_sla'],data['invoice_sla'],data['dispatch_sla'],data['delivery_sla'],data['reorder_sla'],data['sla_id'] = data.apply(lambda x: as_ms_tat.tat_sla_calculator(x['created-at'], x['presaved_approved_at'], x['store-id'], x['franchisee_flag'], x['city-id'],x['recieved-distributor-id'],x['type']), axis=1)
logger.info('vectorise')
data['order_sla'],data['invoice_sla'],data['dispatch_sla'],data['delivery_sla'],data['reorder_sla'],data['sla_id'] = np.vectorize(as_ms_tat.tat_sla_calculator)(data['created-at'], data['presaved_approved_at'], data['store-id'], data['franchisee_flag'], data['city-id'],data['recieved-distributor-id'],data['type'])
logger.info('fetched SLA Timelines')
data['ordered timing'] = np.vectorize(as_ms_tat.tat_checker)(data['ordered-time'], data['order_sla'])
data['fullfilment on invoice'] = np.vectorize(as_ms_tat.tat_checker)(data['invoiced-time'], data['invoice_sla'])
data['fullfilment on dispatch'] = np.vectorize(as_ms_tat.tat_checker)(data['dispatch-time'], data['dispatch_sla'])
data['fullfilment on delivery'] = np.vectorize(as_ms_tat.tat_checker)(data['store_received_at'], data['delivery_sla'])
data['re-order timing'] = np.vectorize(as_ms_tat.tat_checker)(data['re-ordered-at'], data['reorder_sla'])
logger.info('fetched SLA ')
# =============================================================================
# writing to Redshift
# =============================================================================
schema = 'prod2-generico'
table_name = 'as-ms-tat'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
status2 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}" '''
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table deleted')
s3.write_df_to_db(df=data[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status2 = True
if status2 is True:
status = 'Success'
else:
status = 'Failed'
#logger.close()
end_time = datetime.datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds()/60 , 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/as-ms/as_ms_tat.py | as_ms_tat.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
def main(db):
table_name = "as-ms"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}" where date("created-at")>=date(date_trunc('month', current_date) - interval '6 month'); """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"etl-created-by",
"updated-by",
"updated-at",
"patient-id",
"store-name",
"drug-name",
"as-ms",
"created-to-invoice-days",
"created-to-invoice-hour",
"created-to-dispatch-days",
"created-to-dispatch-hour",
"created-to-delivery-days",
"created-to-delivery-hour",
"created-to-re-order-days",
"created-to-re-order-hour",
"created-to-order-days",
"created-to-order-hour",
"status",
"requested-quantity",
"quantity",
"required-quantity",
"inventory-at-creation",
"inventory-at-ordering",
"created-at",
"year-created-at",
"month-created-at",
"ordered-time",
"invoiced-at",
"dispatched-at",
"delivered-at",
"completed-at",
"re-ordered-at",
"store-delivered-at",
"decline-reason",
"type",
"store-id",
"drug-id",
"company",
"company-id",
"composition",
"composition-master-id",
"category",
"schedule",
"sub-type",
"preferred-distributor-id",
"preferred-distributor-name",
"drug-grade",
"purchase-rate",
"ptr",
"distributor-type",
"recieved-distributor-id",
"received-distributor-name",
"forward-dc-id",
"dc-name",
"abo",
"line-manager",
"store-manager",
"city",
"store-b2b",
"franchisee-short-book"
)
select
a.id,
'etl-automation' as "etl-created-by",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta', GETDATE()) as "updated-at",
a."patient-id" as "patient-id",
b."name" as "store-name",
a."drug-name" as "drug-name",
case
when a."auto-short" = 1
and a."created-by" = 'AUTO SHORT'
and a."patient-id" = 4480 then
'AS'
when
a."auto-short" = 1
and a."patient-id" != 4480 then
'MS'
else
'NA'
end as "as-ms",
--Fulfillment on Invoice
(case
when (a."invoiced-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."invoiced-at")
end) as "created-to-invoice-days",
(case
when (a."invoiced-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."invoiced-at")
end) as "created-to-invoice-hour",
--Fulfillment on dispatch
(case
when (a."dispatched-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."dispatched-at")
end) as "created-to-dispatch-days",
(case
when (a."dispatched-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."dispatched-at")
end) as "created-to-dispatch-hour",
--Fulfillment on delivery
(case
when (msda."store-delivered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
msda."store-delivered-at")
end) as "created-to-delivery-days",
(case
when (msda."store-delivered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
msda."store-delivered-at")
end) as "created-to-delivery-hour",
-- Re-order Timing --
(case
when (a."re-ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."re-ordered-at")
end) as "created-to-re-order-days",
(case
when (a."re-ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."re-ordered-at")
end) as "created-to-re-order-hour",
--order Timing--
(case
when (a."ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(day,
a."created-at",
a."ordered-at")
end) as "created-to-order-days",
(case
when (a."ordered-at" = '0101-01-01'
or a."created-at" = '0101-01-01') then null
else datediff(hours,
a."created-at",
a."ordered-at")
end) as "created-to-order-hour",
a."status" as "status",
a."requested-quantity" as "requested-quantity",
a."quantity" as "quantity",
a."required-quantity" as "required-quantity",
a."inventory-at-creation" as "inventory-at-creation" ,
a."inventory-at-ordering" as "inventory-at-ordering",
case
when a."created-at" = '0101-01-01' then null
else a."created-at"
end as "created-at",
extract(year
from
a."created-at") as "year-created-at",
extract(month
from
a."created-at") as "month-created-at",
case
when a."ordered-at" = '0101-01-01' then null
else a."ordered-at"
end as "ordered-time",
case
when a."invoiced-at" = '0101-01-01' then null
else a."invoiced-at"
end as "invoiced-at",
case
when a."dispatched-at" = '0101-01-01' then null
else a."dispatched-at"
end as "dispatched-at",
case
when a."delivered-at" = '0101-01-01' then null
else a."delivered-at"
end as "delivered-at",
case
when a."completed-at" = '0101-01-01' then null
else a."completed-at"
end as "completed-at",
case
when a."re-ordered-at" = '0101-01-01' then null
else a."re-ordered-at"
end as "re-ordered-at",
case
when msda."store-delivered-at" = '0101-01-01' then null
else msda."store-delivered-at"
end as "store-delivered-at",
a."decline-reason" as "decline-reason",
c."type",
a."store-id" as "store-id",
a."drug-id" as "drug-id",
c."company",
c."company-id" as "company-id" ,
c."composition" ,
c."composition-master-id" as "composition-master-id" ,
c."category" ,
c."schedule" ,
c."sub-type" as "sub-type" ,
f."id" as "preferred-distributor-id",
f."name" as "preferred-distributor-name",
e."drug-grade" as "drug-grade",
dp."purchase-rate" as "purchase-rate",
dp."ptr",
d."type" as "distributor-type",
d."id" as "recieved-distributor-id",
d."name" as "received-distributor-name",
j."forward-dc-id" as "forward-dc-id",
ss."name" as "dc-name",
msm."abo" ,
msm."line-manager" ,
msm."store-manager" ,
msm."city",
msm."store-b2b",
a."franchisee-short-book" as "franchisee-short-book"
from
"prod2-generico"."short-book-1" a
left join "prod2-generico"."stores" b on
b."id" = a."store-id"
left join "prod2-generico"."drugs" c on
c."id" = a."drug-id"
left join (
select
"drug-id",
AVG("purchase-rate") as "purchase-rate",
AVG(ptr) as "ptr"
from
"prod2-generico"."inventory-1" i
where
"created-at" >= dateadd(day,
-360,
CURRENT_DATE)
group by
"drug-id") as dp on
a."drug-id" = dp."drug-id"
left join "prod2-generico"."distributors" d on
d."id" = a."distributor-id"
left join "prod2-generico"."drug-order-info" e on
e."store-id" = a."store-id"
and e."drug-id" = a."drug-id"
left join "prod2-generico"."distributors" f on
a."preferred-distributor" = f."id"
left join (
select
*
from
"prod2-generico"."store-dc-mapping"
where
"drug-type" = 'ethical') j on
j."store-id" = a."store-id"
left join "prod2-generico"."stores" ss on
ss."id" = j."forward-dc-id"
left join "prod2-generico"."store-delivered" msda on
a."id" = msda."id"
left join "prod2-generico"."stores-master" msm on
a."store-id" = msm.id
where
a."auto-short" = 1
and a."status" not in ('deleted')
and date(a."created-at")>=date(date_trunc('month', current_date) - interval '6 month');
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/as-ms/as_ms.py | as_ms.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "substitutable-groups"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"group",
"generic-flag"
)
select
d."group" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
d."group" as "group",
count(distinct(case when (d1."type" = 'generic') then c."drug-id" end)) as "generic-flag"
from
"prod2-generico"."drug-unique-composition-mapping" d
inner join "prod2-generico"."drugs" d1 on
d1.id = d."drug-id"
inner join "prod2-generico"."inventory-1" c on
c."drug-id" = d."drug-id"
inner join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
where
d."group" is not null
and d1."type" = 'generic'
group by
d."group"
;
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."substitutable-compositions";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/substitutable-groups/substitutable-groups.py | substitutable-groups.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from datetime import datetime as dt
from datetime import timedelta
import pandas as pd
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-fr', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
full_run = args.full_run
logger = get_logger()
logger.info(f"env: {env}")
logger.info(f"print the env again: {env}")
# params
if full_run:
start = dt.strptime('2017-05-13', '%Y-%m-%d')
end = dt.today().date() - timedelta(days=1)
else:
start = dt.today().date() - timedelta(days=91)
end = dt.today().date() - timedelta(days=1)
interval = (end - start).days
schema = 'prod2-generico'
table_name = "campaign-prepost"
rs_db = DB()
rs_db.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
read_schema = 'prod2-generico'
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."{table_name}"
WHERE
"bill-date" BETWEEN '{start}' AND '{end}';
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
# Fetching all patient who used promo
df_q = f"""select
b.*,
r1."previous-3trips-abv",
r1."previous-90days-trips",
r1."pre3trips-promo",
r2."next-3trips-abv",
r2."next-90days-trips",
r2."post3trips-promo"
from
(
select
rm.id as "bill-id",
rm."patient-id",
rm."promo-code",
rm."promo-code-type" as "code-type",
rm."store-id",
rm."bill-date",
rm."prev-cum-nob" as "previous-total-trips",
rm.store,
rm.abo,
rm."store-manager",
rm."line-manager",
rm."store-opened-at",
rm."is-chronic",
rm."hd-flag",
rm."is-generic",
rm."pr-flag",
rm."is-repeatable",
rm."behaviour-segment",
rm."value-segment",
rm."total-spend" as "bill-value",
rm."promo-discount",
rm."recency-customer-days" as "last-visit-in-days",
pc."type",
pc."discount-type",
pc."discount-level",
pc."flat-discount",
pc."percent-discount",
pc."max-discount",
pc."min-purchase",
pc."max-time",
pc."max-per-patient",
pc."start",
pc.expiry,
pc."campaign-id",
c.campaign
from
"{read_schema}"."retention-master" rm
left join "{read_schema}"."promo-codes" pc on rm."promo-code-id" = pc.id
left join "{read_schema}".campaigns c on pc."campaign-id" = c.id
where
rm."promo-code-id" is not null
and datediff('days', rm."created-at", current_date) <= {interval}
) b
left join (
select
"bill-id",
avg("total-spend") as "previous-3trips-abv",
max("bill-rank-2") as "previous-90days-trips",
sum("promo-bill") as "pre3trips-promo"
from
(
select
rm1.id as "bill-id",
rm1."bill-date" as "bill-date-1",
rm2."bill-date" as "bill-date-2",
rm2."total-spend",
datediff('days', "bill-date-1", "bill-date-2") as "datediff",
rank() over (partition by rm1."id" order by "datediff" desc) as "bill-rank-1",
rank() over (partition by rm1."id" order by "datediff" asc) as "bill-rank-2",
datediff('days', "bill-date-1", current_date) as "datediff-filter",
(case
when rm2."promo-code-id" is not null then 1
else 0
end) "promo-bill"
from
"{read_schema}"."retention-master" rm1
left join "{read_schema}"."retention-master" rm2 on
rm1."patient-id" = rm2."patient-id"
where
"datediff" between -90 and 0
and rm1.id != rm2.id
and "datediff-filter" between 0 and {interval}
and rm1."promo-code-id" is not null
)
where
"bill-rank-1" <= 3
group by
"bill-id") r1 on
b."bill-id" = r1."bill-id"
left join (
select
"bill-id",
avg("total-spend") as "next-3trips-abv",
max("bill-rank-2") as "next-90days-trips",
sum("promo-bill") as "post3trips-promo"
from
(
select
rm1.id as "bill-id",
rm1."bill-date" as "bill-date-1",
rm2."bill-date" as "bill-date-2",
rm2."total-spend",
datediff('days', "bill-date-1", "bill-date-2") as "datediff",
rank() over (partition by rm1."id" order by "datediff" asc) as "bill-rank-1",
rank() over (partition by rm1."id" order by "datediff" desc) as "bill-rank-2",
datediff('days', "bill-date-1", current_date) as "datediff-filter",
(case
when rm2."promo-code-id" is not null then 1
else 0
end) "promo-bill"
from
"{read_schema}"."retention-master" rm1
left join "{read_schema}"."retention-master" rm2 on
rm1."patient-id" = rm2."patient-id"
where
"datediff" between 0 and 90
and rm1.id != rm2.id
and "datediff-filter" between 0 and {interval}
and rm1."promo-code-id" is not null
)
where
"bill-rank-1" <= 3
group by
"bill-id") r2 on
b."bill-id" = r2."bill-id";
"""
patient_data = rs_db.get_df(query=df_q)
logger.info(f'Patient promo data query : {df_q}')
logger.info(f'Patient promo data query size : {len(patient_data)}')
logger.info(patient_data.info())
# data type correction
patient_data['bill-value'] = patient_data['bill-value'].astype(float)
patient_data['promo-discount'] = patient_data['promo-discount'].astype(float)
patient_data['flat-discount'] = patient_data['flat-discount'].fillna(0).astype(int)
patient_data['percent-discount'] = patient_data['percent-discount'].fillna(0).astype(int)
patient_data['max-discount'] = patient_data['max-discount'].fillna(0).astype(int)
patient_data['min-purchase'] = patient_data['min-purchase'].fillna(0).astype(int)
patient_data['max-time'] = patient_data['max-time'].fillna(0).astype(int)
patient_data['max-per-patient'] = patient_data['max-per-patient'].fillna(0).astype(int)
patient_data['campaign-id'] = patient_data['campaign-id'].fillna(0).astype(int)
patient_data['hd-flag'] = patient_data['hd-flag'].astype(int)
patient_data['pr-flag'] = patient_data['pr-flag'].astype(int)
patient_data['is-generic'] = patient_data['is-generic'].astype(int)
patient_data['is-chronic'] = patient_data['is-chronic'].astype(int)
patient_data['is-repeatable'] = patient_data['is-repeatable'].astype(int)
patient_data['previous-total-trips'] = patient_data['previous-total-trips'].fillna(0).astype(int)
patient_data['previous-90days-trips'] = patient_data['previous-90days-trips'].fillna(0).astype(int)
patient_data['pre3trips-promo'] = patient_data['pre3trips-promo'].fillna(0).astype(int)
patient_data['next-90days-trips'] = patient_data['next-90days-trips'].fillna(0).astype(int)
patient_data['post3trips-promo'] = patient_data['post3trips-promo'].fillna(0).astype(int)
patient_data['last-visit-in-days'] = patient_data['last-visit-in-days'].fillna(-1).astype(int)
patient_data['bill-date'] = pd.to_datetime(patient_data['bill-date']).dt.date
# etl
patient_data['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
patient_data['created-by'] = 'etl-automation'
patient_data['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
patient_data['updated-by'] = 'etl-automation'
# Write to csv
s3.save_df_to_s3(df=patient_data[table_info['column_name']], file_name='Shubham_G/43/campaign_prepost.csv')
s3.write_df_to_db(df=patient_data[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
# remove blanks
value_q = f"""update "{schema}"."{table_name}"
set "value-segment" = null
where "value-segment" = '';
"""
rs_db.execute(value_q)
# remove blanks
behaviour_q = f"""update "{schema}"."{table_name}"
set "behaviour-segment" = null
where "behaviour-segment" = '';
"""
rs_db.execute(behaviour_q)
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/campaign_prepost/campaign-prepost.py | campaign-prepost.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.parameter.job_parameter import parameter
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
job_params = parameter.get_params(job_id=48)
os.environ['env'] = env
email_to = job_params['email_to']
skus = tuple(map(int, job_params['skus_tracked'].split(',')))
logger = get_logger()
logger.info(skus)
logger.info(f"env: {env}")
logger.info(f"print the env again: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
read_schema = 'prod2-generico'
# Fetching earn and burn data for past one week
penetration_q = f""" select
date("created-at") as date,
rm.store as Store_Name,
rm."store-city" as City_Name,
SUM(case when rm."promo-code" = 'BOGO' then 1 else 0 end) BOGO_Bills,
COUNT(distinct rm.id) Total_Bills,
SUM(case when rm."promo-code" = 'BOGO' then 1.0 else 0.0 end) / COUNT(distinct rm.id) Penetration
from
"{read_schema}"."retention-master" rm
where
date(rm."created-at") >= '2022-02-21'
and DATE(rm."created-at") < current_date
and extract (day from DATE(rm."created-at")) >= 1
and extract (month from DATE(rm."created-at")) = extract (month from current_date )
group by
date("created-at"),
rm.store,
rm."store-city";"""
penetration_city_q = f"""
select
date("created-at") as date,
rm."store-city" as City_Name,
SUM(case when rm."promo-code" = 'BOGO' then 1 else 0 end) BOGO_Bills,
COUNT(distinct rm.id) Total_Bills,
SUM(case when rm."promo-code" = 'BOGO' then 1.0 else 0.0 end) / COUNT(distinct rm.id) Penetration
from
"{read_schema}"."retention-master" rm
where
date(rm."created-at") >= '2022-02-21'
and DATE(rm."created-at") < current_date
and extract (day from DATE(rm."created-at")) >= 1
and extract (month from DATE(rm."created-at")) = extract (month from current_date )
group by
date("created-at"),
rm."store-city";"""
sku_q = f"""
select
date(s."created-at") date_,
s."store-name",
s."drug-name",
s."drug-id",
s."bill-flag",
(case
when s."promo-code" = 'BOGO' then 'BOGO'
when s."promo-code" is null then 'Organic'
when s."code-type" = 'referral' then 'referral'
else s."promo-code"
end) Promo_Code,
sum(s.quantity) quantity_
from
"{read_schema}"."sales" s
where
s."drug-id" in {skus}
and date(s."created-at") >= '2022-02-21'
and date(s."created-at") < current_date
and extract(day from s."created-at") >= 1
and extract(month from s."created-at") = extract(month from current_date)
group by
date(s."created-at"),
s."store-name",
s."drug-name",
s."drug-id",
s."bill-flag",
(case
when s."promo-code" = 'BOGO' then 'BOGO'
when s."promo-code" is null then 'Organic'
when s."code-type" = 'referral' then 'referral'
else s."promo-code"
end);
"""
avg_sku_q = f"""
select
T.date_,
T."store-name",
AVG(T.number_of_sku)
from
(
select
date(s."created-at") date_,
s."bill-id",
s."store-name",
COUNT(distinct s."drug-id") number_of_sku
from
"{read_schema}"."sales" s
where
s."promo-code" = 'BOGO'
and date(s."created-at") >= '2022-02-21'
and date(s."created-at") < current_date
and extract(day
from
date(s."created-at")) >= 1
and extract(month
from
date(s."created-at")) = extract(month
from
current_date)
group by
date(s."created-at"),
s."bill-id",
s."store-name") T
group by
T.date_,
T."store-name";"""
penetration = rs_db.get_df(penetration_q)
penetration_city = rs_db.get_df(penetration_city_q)
sku = rs_db.get_df(sku_q)
avg_sku = rs_db.get_df(avg_sku_q)
# MTD Report
penetration_mtd = penetration.groupby(['store_name',
'city_name'],
as_index=False).agg({'bogo_bills': 'sum',
'total_bills': 'sum'})
penetration_mtd['penetration'] = penetration_mtd['bogo_bills'] / penetration_mtd['total_bills']
penetration_city_mtd = penetration_city.groupby(['city_name'], as_index=False).agg({'bogo_bills': 'sum',
'total_bills': 'sum'})
penetration_city_mtd['penetration'] = penetration_city_mtd['bogo_bills'] / penetration_city_mtd['total_bills']
# file_name
penetration_file_name = 'penetration.csv'
penetration_mtd_file_name = 'penetration_mtd.csv'
penetration_city_file_name = 'penetration_city.csv'
penetration_city_mtd_file_name = 'penetration_city_mtd.csv'
sku_file_name = 'sku_sold.csv'
avg_sku_file_name = 'avg_sku_in_bill.csv'
# Uploading the file to s3
penetration = s3.save_df_to_s3(df=penetration, file_name=penetration_file_name)
penetration_mtd = s3.save_df_to_s3(df=penetration_mtd, file_name=penetration_mtd_file_name)
penetration_city = s3.save_df_to_s3(df=penetration_city, file_name=penetration_city_file_name)
penetration_city_mtd = s3.save_df_to_s3(df=penetration_city_mtd, file_name=penetration_city_mtd_file_name)
sku = s3.save_df_to_s3(df=sku, file_name=sku_file_name)
avg_sku = s3.save_df_to_s3(df=avg_sku, file_name=avg_sku_file_name)
# Sending email
subject = 'Campaign Tracker (BOGO)'
mail_body = "Reports are attached"
file_uris = [penetration, penetration_mtd, penetration_city, penetration_city_mtd, sku, avg_sku]
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/bogo_tracking/bogo_campaign_tracking.py | bogo_campaign_tracking.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import numpy as np
import datetime
from datetime import timedelta
from zeno_etl_libs.django.api import Sql
import typing
from functools import reduce
from sklearn.preprocessing import StandardScaler
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-sd', '--datem', default='NA', type=str, required=False)
parser.add_argument('-my', '--sqlwrite', default='yes', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
datem = args.datem
sqlwrite = args.sqlwrite
os.environ['env'] = env
logger = get_logger(level="INFO")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
""" for data verification after update """
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
schema = 'prod2-generico'
table_name = 'drug-grades'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if datem == 'NA':
date1 = datetime.date.today().strftime('%Y-%m-%d')
logger.info('Entry taken from params:{}'.format(str(date1)))
else:
date1 = datem
logger.info('Selecting the default date as the job run date: {}'.format(str(date1)))
q_aa = f"""
select
"bill-id",
"patient-id" ,
"store-id" ,
"store-name" as "name",
"drug-id" ,
"drug-name" ,
"type" ,
"created-date" as "created-at" ,
NVL(sum(case when "bill-flag" = 'gross' then quantity end),
0) as "sold-quantity",
NVL(sum(case when "bill-flag" = 'return' then quantity end),
0) as "returned-quantity",
sum("net-quantity") as "quantity",
sum(rate) as "rate"
from
"prod2-generico"."sales"
where
datediff('day','{date1}',
"created-date") between -180 and -1
group by
"bill-id",
"patient-id" ,
"store-id" ,
"store-name",
"drug-id" ,
"drug-name" ,
"type" ,
"created-date"
having
sum("net-quantity")>0
"""
df_aa = rs_db.get_df(q_aa)
df_aa.columns = [c.replace('-', '_') for c in df_aa.columns]
logger.info('Shape of data: {}'.format(str(df_aa.shape)))
df_aa['quantity'].fillna(0, inplace=True)
df_aa['rate'].fillna(0, inplace=True)
df_aa['value'] = df_aa['rate'] * df_aa['quantity']
# =============================================================================
# Store opened at
# =============================================================================
q_bb = f"""
SELECT
"id",
datediff('day' ,
"opened-at",
'{date1}') as "age"
FROM
"prod2-generico"."stores"
WHERE
datediff('day' ,
"opened-at",
'{date1}' ) < 180
"""
df_bb = rs_db.get_df(q_bb)
df_bb.columns = [c.replace('-', '_') for c in df_bb.columns]
logger.info('Shape of stores data: {}'.format(str(df_bb.shape)))
def store_age(df_bb):
if df_bb['age'] >= 90:
return '3-6 month'
else:
return '1-3 month'
df_bb['age1'] = df_bb.apply(lambda x: store_age(x), axis=1)
# =============================================================================
# quantity sold
# =============================================================================
df_qty = df_aa.groupby(['drug_id', 'store_id'])[['quantity']].sum().reset_index()
df_qty1 = df_aa.groupby(['drug_id'])[['quantity']].sum().reset_index()
# =============================================================================
# revenue
# =============================================================================
df_revenue = df_aa.groupby(['drug_id', 'store_id'])[['value']].sum().reset_index()
df_revenue1 = df_aa.groupby(['drug_id'])[['value']].sum().reset_index()
# =============================================================================
# no. of bills
# =============================================================================
df_bills = df_aa.groupby(['drug_id', 'store_id'])[['bill_id']].nunique().reset_index()
df_bills1 = df_aa.groupby(['drug_id'])[['bill_id']].nunique().reset_index()
# =============================================================================
# no. of consumers
# =============================================================================
df_consumers = df_aa.groupby(['drug_id', 'store_id'])[['patient_id']].nunique().reset_index()
df_consumers1 = df_aa.groupby(['drug_id'])[['patient_id']].nunique().reset_index()
df_aa['created_at'] = pd.to_datetime(df_aa['created_at'])
# =============================================================================
# no. of days sold
# =============================================================================
df_aa['days'] = df_aa['created_at'].dt.date
df_days = df_aa.groupby(['drug_id', 'store_id'])[['days']].nunique().reset_index()
df_days1 = df_aa.groupby(['drug_id'])[['days']].nunique().reset_index()
# =============================================================================
# recency (last sold)
# =============================================================================
days = timedelta(1)
period_end_d = pd.to_datetime(date1) - days
df_recency = df_aa.groupby(['drug_id', 'store_id'])[['created_at']].max().reset_index()
df_recency1 = df_aa.groupby(['drug_id'])[['created_at']].max().reset_index()
df_recency['recency'] = (pd.to_datetime(period_end_d) - df_recency['created_at']).dt.days
df_recency1['recency'] = (pd.to_datetime(period_end_d) - df_recency1['created_at']).dt.days
# =============================================================================
# merge all features
# =============================================================================
meg = [df_qty, df_revenue, df_bills, df_consumers, df_days, df_recency]
df_features = reduce(lambda left, right: pd.merge(left, right, on=[
'drug_id', 'store_id'], how='outer'), meg)
del (df_features['created_at'])
meg1 = [df_qty1, df_revenue1, df_bills1, df_consumers1, df_days1, df_recency1]
df_features1 = reduce(lambda left, right: pd.merge(left, right, on=[
'drug_id'], how='outer'), meg1)
del (df_features1['created_at'])
df_features = df_features1.append(df_features)
df_features['store_id'] = df_features['store_id'].fillna(999)
df_features = df_features.reset_index().drop('index', axis=1)
# =============================================================================
# creating standard scaler store wise
# =============================================================================
temp_normalise = df_features[['store_id', 'quantity', 'value', 'bill_id', 'patient_id', 'days', 'recency']]
class SklearnWrapper:
def __init__(self, transform: typing.Callable):
self.transform = transform
def __call__(self, df):
transformed = self.transform.fit_transform(df.values)
return pd.DataFrame(transformed, columns=df.columns, index=df.index)
# This one will apply any sklearn transform you pass into it to a group.
df_rescaled = (
temp_normalise.groupby('store_id')
.apply(SklearnWrapper(StandardScaler()))
.drop('store_id', axis=1)
)
temp2_normalise = df_rescaled
# =============================================================================
# importing pca_components and appling to scaled data set.
# =============================================================================
pca_file_name = 'drug_grades/pca_components.csv'
pca_file_path = s3.download_file_from_s3(file_name=pca_file_name)
pca_components = pd.read_csv(pca_file_path, delimiter=',')
# =============================================================================
# creating Euclidean Distance Caculator and applyin to nearest cluster
# =============================================================================
cluster_file_name = 'drug_grades/cluster_centers_1.csv'
pca_file_path = s3.download_file_from_s3(file_name=cluster_file_name)
cluster_centers_set = pd.read_csv(pca_file_path, delimiter=',')
cluster_centers_set = np.array(cluster_centers_set)
# Euclidean Distance Caculator
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
clusters = []
test = np.dot(np.array(temp2_normalise), (np.array(pca_components).T))
for i in range(len(test)):
distances = dist(np.array(test[i]), (cluster_centers_set))
cluster = np.argmin(distances)
clusters.append(cluster)
cluster_df = pd.DataFrame(clusters)
cluster_df.columns = ['final_cluster']
# =============================================================================
# Summary pivot 1
# =============================================================================
test_df = pd.DataFrame(test)
cluster_lvl_1 = pd.merge(test_df, cluster_df,
right_index=True, left_index=True)
cluster_lvl1_output = pd.merge(cluster_lvl_1, df_features, how='inner',
left_index=True, right_index=True)
cluster_lvl1_output_pivot = cluster_lvl1_output.groupby(['final_cluster', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(
drop=True)
cluster_lvl1_output_pivot.columns = ['_'.join(x) for x in
cluster_lvl1_output_pivot.columns.ravel()]
cluster_lvl1_output_pivot_name = 'drug_grades/cluster_lv1_output.csv'
# Uploading File to S3
s3.save_df_to_s3(df=cluster_lvl1_output_pivot, file_name=cluster_lvl1_output_pivot_name)
# =============================================================================
# # 2nd level
# =============================================================================
# =============================================================================
# Further split of large cluster
# =============================================================================
further_split_lvl2 = cluster_lvl1_output[cluster_lvl1_output['final_cluster'] == 0]
# change features here if needed
further_split_lvl2 = pd.DataFrame(further_split_lvl2[[0, 1, 2, 3]])
further_split_lvl2_mat = np.array(further_split_lvl2)
cluster2_file_name = 'drug_grades/cluster_centers_2.csv'
pca_file_path = s3.download_file_from_s3(file_name=cluster2_file_name)
cluster_centers_set2 = pd.read_csv(pca_file_path, delimiter=',')
cluster_centers_set2 = np.array(cluster_centers_set2)
clusters_lvl2 = []
for i in range(len(further_split_lvl2)):
distances = dist((further_split_lvl2_mat[i]), (cluster_centers_set2))
clusterlvl2 = np.argmin(distances)
clusters_lvl2.append(clusterlvl2)
further_split_lvl2_df = pd.DataFrame(further_split_lvl2)
further_split_lvl2_df['final_cluster_lvl2'] = clusters_lvl2
# =============================================================================
# Summary pivot 2
# =============================================================================
cluster_lvl2_output = pd.merge(cluster_lvl1_output, further_split_lvl2_df[['final_cluster_lvl2']],
how='inner',
left_index=True, right_index=True)
cluster_lvl2_output_pivot = cluster_lvl2_output.groupby(['final_cluster_lvl2', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(
drop=True)
cluster_lvl2_output_pivot.columns = ['_'.join(x) for x in
cluster_lvl2_output_pivot.columns.ravel()]
cluster_lvl2_output_pivot_name = 'drug_grades/cluster_lvl2_output.csv'
# Uploading File to S3
s3.save_df_to_s3(df=cluster_lvl2_output_pivot, file_name=cluster_lvl2_output_pivot_name)
# =============================================================================
# Final cluster
# =============================================================================
cluster_file = cluster_lvl1_output[cluster_lvl1_output['final_cluster'] != 0]
final_cluster_file = cluster_file.append(cluster_lvl2_output)
final_cluster_file['cluster'] = final_cluster_file['final_cluster'
].astype(str) + '_' + final_cluster_file['final_cluster_lvl2'].astype(str)
final_output_pivot = final_cluster_file.groupby(['cluster', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(drop=True)
final_output_pivot.columns = ['_'.join(x) for x in
final_output_pivot.columns.ravel()]
final_output_pivot['drug%'] = final_output_pivot['drug_id_count'
] / final_output_pivot['drug_id_count'].sum()
final_output_pivot['spend%'] = final_output_pivot['value_sum'
] / final_output_pivot['value_sum'].sum()
final_output_pivot['drug%'] = final_output_pivot['drug%'].astype('float64')
final_output_pivot['spend%'] = final_output_pivot['spend%'].astype('float64')
final_output_pivot['factor'] = final_output_pivot['spend%'] / final_output_pivot['drug%']
# =============================================================================
# cluster allocation
# =============================================================================
new_store = df_bb['id'].values
new_store1 = df_bb['id'][df_bb['age1'] == '3-6 month'].values
new_store2 = df_bb['id'][df_bb['age1'] == '1-3 month'].values
new_store1_cluster = final_cluster_file[final_cluster_file.store_id.isin(new_store1)]
new_store2_cluster = final_cluster_file[final_cluster_file.store_id.isin(new_store2)]
Enterprise_cluster = final_cluster_file[final_cluster_file.store_id == 999]
old_stores_cluster = final_cluster_file[(~final_cluster_file.store_id.isin(new_store)) &
(final_cluster_file.store_id != 999)]
new_store1_cluster.drop(['cluster'], axis=1, inplace=True)
new_store2_cluster.drop(['cluster'], axis=1, inplace=True)
new_store1_predict = pd.merge(new_store1_cluster, Enterprise_cluster[['drug_id', 'cluster']], how='left',
left_on='drug_id', right_on='drug_id')
for i in range(len(new_store2)):
Enterprise_temp = Enterprise_cluster.copy()
Enterprise_temp['new_store_id'] = new_store2[i]
if i == 0:
new_store2_predict_data = Enterprise_temp
else:
new_store2_predict_data = new_store2_predict_data.append(Enterprise_temp)
new_store2_predict = new_store2_predict_data
del new_store2_predict['store_id']
new_store2_predict = new_store2_predict.rename({'new_store_id': 'store_id'}, axis=1)
cluster_all = new_store1_predict.append(new_store2_predict)
cluster_all = cluster_all.append(Enterprise_cluster)
cluster_all = cluster_all.append(old_stores_cluster)
# =============================================================================
# Summary report
# =============================================================================
cluster_all_pivote = cluster_all.groupby(['cluster', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(drop=True)
cluster_all_pivote.columns = ['_'.join(x) for x in
cluster_all_pivote.columns.ravel()]
cluster_all_pivote['drug%'] = cluster_all_pivote['drug_id_count'
] / cluster_all_pivote['drug_id_count'].sum()
cluster_all_pivote['spend%'] = cluster_all_pivote['value_sum'
] / cluster_all_pivote['value_sum'].sum()
cluster_all_pivote['drug%'] = cluster_all_pivote['drug%'].astype('float64')
cluster_all_pivote['spend%'] = cluster_all_pivote['spend%'].astype('float64')
cluster_all_pivote['factor'] = cluster_all_pivote['spend%'
] / cluster_all_pivote['drug%']
cluster_all_pivote_name = 'drug_grades/cluster_all_pivot.csv'
# Uploading File to S3
s3.save_df_to_s3(df=cluster_all_pivote, file_name=cluster_all_pivote_name)
# =============================================================================
# Assigning Cluster
# =============================================================================
def assign_cluster(cluster_all):
if cluster_all['cluster'] == '1_nan':
return 'A1'
elif cluster_all['cluster'] == '2_nan':
return 'A1'
elif cluster_all['cluster'] == '4_nan':
return 'A2'
elif cluster_all['cluster'] == '0_2.0':
return 'B'
elif cluster_all['cluster'] == '3_nan':
return 'D'
elif cluster_all['cluster'] == '0_0.0':
return 'C'
elif cluster_all['cluster'] == '0_1.0':
return 'C'
else:
return cluster_all['cluster']
cluster_all['grade'] = cluster_all.apply(lambda row: assign_cluster(row), axis=1)
cluster_all_name = 'drug_grades/cluster_all.csv'
# Uploading File to S3
s3.save_df_to_s3(df=cluster_all, file_name=cluster_all_name)
cluster_all_pivote1 = cluster_all.groupby(['grade', 'store_id'],
as_index=False).agg({'drug_id': ['count'],
'value': ['sum'],
'bill_id': ['mean'],
'patient_id': ['mean'],
'days': ['mean'],
'recency': ['mean']}).reset_index(drop=True)
cluster_all_pivote1.columns = ['_'.join(x) for x in
cluster_all_pivote1.columns.ravel()]
cluster_all_pivote1['drug%'] = cluster_all_pivote1['drug_id_count'
] / cluster_all_pivote1['drug_id_count'].sum()
cluster_all_pivote1['spend%'] = cluster_all_pivote1['value_sum'
] / cluster_all_pivote1['value_sum'].sum()
cluster_all_pivote1['drug%'] = cluster_all_pivote1['drug%'].astype('float64')
cluster_all_pivote1['spend%'] = cluster_all_pivote1['spend%'].astype('float64')
cluster_all_pivote1['factor'] = cluster_all_pivote1['spend%'
] / cluster_all_pivote1['drug%']
cluster_all_pivote1_name = 'drug_grades/cluster_all_pivot1.csv'
# Uploading File to S3
s3.save_df_to_s3(df=cluster_all_pivote1, file_name=cluster_all_pivote1_name)
final_data = cluster_all[['store_id', 'drug_id', 'grade']]
final_data['calculation_date'] = date1
final_data.columns = [c.replace('_', '-') for c in final_data.columns]
final_data['created-at'] = datetime.datetime.now()
final_data['store-id'] = final_data['store-id'].astype(int)
final_data['drug-id'] = final_data['drug-id'].astype(int)
s3.write_df_to_db(df=final_data[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
'''getting current grades and replacing them with new if changed'''
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
for store_id in final_data['store-id'].unique():
if sqlwrite == 'yes':
if store_id != 999:
logger.info(f"SQL update starts !!!")
current_grade_query = f'''
SELECT
id,
"store-id",
"drug-id",
"drug-grade"
FROM "prod2-generico"."drug-order-info-data"
WHERE "store-id" = {store_id}
'''
current_grade = rs_db.get_df(current_grade_query)
current_grade.columns = [c.replace('-', '_') for c in current_grade.columns]
current_grade.columns = list(map(
lambda s: str.replace(s, '-', '_'),
list(current_grade.columns.values)
))
final_data_store = final_data.loc[
final_data['store-id'] == store_id,
['store-id', 'drug-id', 'grade']]
final_data_store.columns = [c.replace('-', '_') for c in final_data_store.columns]
grade_joined = current_grade.merge(
final_data_store, on=['store_id', 'drug_id'], how='outer')
grade_joined.loc[grade_joined['grade'].isna(), 'grade'] = 'NA'
new_drug_entries = new_drug_entries.append(
grade_joined[grade_joined['id'].isna()])
grade_joined = grade_joined[~grade_joined['id'].isna()]
grade_joined['change_flag'] = np.where(
grade_joined['drug_grade'] == grade_joined['grade'],
'same', 'changed')
logger.info('Store ' + str(store_id))
logger.info('Total grades calculated' + str(final_data_store.shape[0]))
logger.info('Grades changed' + str(grade_joined[
grade_joined['change_flag'] == 'changed'].shape[0]))
grades_to_change = grade_joined.loc[
grade_joined['change_flag'] == 'changed',
['id', 'store_id', 'drug_id', 'grade']]
grades_to_change.columns = ['id', 'store_id', 'drug_id', 'drug_grade']
data_to_be_updated_list = list(
grades_to_change[['id', 'drug_grade']].apply(dict, axis=1))
sql = Sql()
""" update using api """
status, text = sql.update(
{'table': 'DrugOrderInfoData',
'data_to_be_updated': data_to_be_updated_list}, logger
)
update_test_query = '''
SELECT
`store-id`,
`drug-id`,
`drug-grade`
FROM `drug-order-info-data`
WHERE `store-id` = {store_id}
and `grade-updated-at` >= CURRENT_TIMESTAMP() - INTERVAL 10 MINUTE
and `grade-updated-at` < CURRENT_TIMESTAMP()
'''.format(store_id=store_id)
update_test = pd.read_sql_query(update_test_query, mysql_write.connection)
update_test.columns = [c.replace('-', '_') for c in update_test.columns]
update_test.columns = list(map(
lambda s: str.replace(s, '-', '_'),
list(update_test.columns.values)
))
update_test = grades_to_change.merge(
update_test, how='left', on=['store_id', 'drug_id'],
suffixes=('', '_updated'))
mismatch = update_test[
update_test['drug_grade'] != update_test['drug_grade_updated']]
missed_entries = missed_entries.append(mismatch)
logger.info('For store ' + str(store_id) + 'update mismatch count'
+ str(mismatch.shape[0]))
new_drug_entries_name = 'drug_grades/new_drug_entries.csv'
# Uploading File to S3
s3.save_df_to_s3(df=new_drug_entries, file_name=new_drug_entries_name)
missed_entries_name = 'drug_grades/missed_entries.csv'
# Uploading File to S3
s3.save_df_to_s3(df=missed_entries, file_name=missed_entries_name)
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drug-grades/drug-grades.py | drug-grades.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "ecomm"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."ecomm"
(
"etl-created-at",
"etl-created-by",
"updated-at",
"updated-by",
"zeno-order-id",
"patient-id",
"promo-code-id",
"preferred-store-id",
"order-type",
"zeno-created-at",
"zeno-created-by",
"is-prescription-required",
"order-number",
"status",
"comments",
"zeno-drug-id",
"patient-store-order-id",
"zeno-qty",
"overall-min-bill-date",
"type",
"category",
"composition",
"company-id",
"composition-master-id",
"zeno-drug-name",
"zeno-drug-type",
"source-pincode",
"order-cancellation-reason-id",
"cancel-comment",
"cancelled-at",
"cancelled-by",
"cancel-reason",
"cancel-type",
"pso-requested-quantity",
"patient-request-id",
"pso-created-at",
"pso-created-by",
"pso-inventory-quantity",
"pso-status",
"store-id",
"bill-id",
"slot-id",
"turnaround-time",
"delivered-at",
"assigned-to",
"slot-type",
"per-slot-capacity",
"vendor-bill-number",
"prescription-needed",
"prescreptions-created",
"completed-at",
"mrp",
"selling-rate",
"gross-quantity",
"sale-flag",
"gross-revenue-value",
"returned-quantity",
"returned-revenue-value",
"promo-code",
"promo-code-type",
"promo-eligibility",
"campaign-name",
"store-name",
"store-city",
"store-b2b",
"abo",
"line-manager",
"order-origin",
"zeno-drug-created-by",
"billed-at",
"delivery-type",
"order-lead")
select
convert_timezone('Asia/Calcutta',
GETDATE()) as "etl-created-at",
'etl-automation' as "etl-created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
'etl-automation' as "updated-by",
zo.id as "zeno-order-id",
zo."patient-id" as "patient-id" ,
zo."promo-code-id" as "promo-code-id",
zo."preferred-store-id" as "preferred-store-id",
zo."order-type" as "order-type" ,
zo."created-at" as "zeno-created-at",
zo."created-by" as "zeno-created-by",
zo."is-prescription-required" as "is-prescription-required",
zo."order-number" as "order-number",
zo."status",
zo."comments",
zos."drug-id" as "zeno-drug-id",
pso."id" as "patient-store-order-id" ,
zos."quantity" as "zeno-qty",
pm."first-bill-date" as "overall-min-bill-date",
d2."type",
d2."category",
d2."composition",
d2."company-id" as "company-id",
d2."composition-master-id" as "composition-master-id",
d2."drug-name" as "zeno-drug-name",
d2."type" as "zeno-drug-type",
zpa."pincode" as "source-pincode",
zocrm."order-cancellation-reason-id" as "order-cancellation-reason-id" ,
zocrm."comments" as "cancel-comment",
zocrm."created-at" as "cancelled-at",
zocrm."created-by" as "cancelled-by",
zer."reason-name" as "cancel-reason",
zer."type" as "cancel-type",
mpr."pso-requested-quantity" ,
mpr."patient-request-id" ,
mpr."created-at" as "pso-created-at",
mpr."created-by" as "pso-created-by",
mpr."pso-inventory-quantity" ,
mpr."pso-status" ,
mpr."store-id" ,
mpr."bill-id",
mhd."slot-id" ,
mhd."turnaround-time",
mhd."delivered-at",
mhd."assigned-to" ,
mhd."slot-type",
mhd."per-slot-capacity",
mhd."vendor-bill-number",
mzol."prescription-needed",
mzol."prescreptions-created",
mzol."completed-at",
case
when zos."mrp" is null
and d2."type" = 'generic' then 35
when zos."mrp" is null
and d2."type" != 'generic' then 100
else zos."mrp"
end as "mrp",
case
when zos."selling-rate" is null
and d2."type" = 'generic' then 35
when zos."selling-rate" is null
and d2."type" != 'generic' then 100
else zos."selling-rate"
end as "selling-rate",
msa."gross-quantity" ,
case
when msa."gross-quantity" is null then false
else true
end as "sale-flag",
msa."gross-revenue-value" ,
msa."returned-quantity" ,
msa."returned-revenue-value",
mp."promo-code" ,
mp."promo-code-type" ,
mp."promo-eligibility" ,
mp."campaign-name" ,
msm.store as "store-name",
msm.city as "store-city",
msm."store-b2b",
msm.abo,
msm."line-manager" ,
zc."name" as "order-origin",
zos."created-by" as "zeno-drug-created-by",
msa."created-at" as "billed-at",
mhd."delivery-type",
zo."order-lead"
from
"prod2-generico"."zeno-order" as zo
left join "prod2-generico"."zeno-order-sku" zos on
zos."zeno-order-id" = zo."id"
left join "prod2-generico"."patients-store-orders" pso on
pso."zeno-order-id" = zos."zeno-order-id"
and zos."drug-id" = pso."drug-id"
left join "prod2-generico"."patient-requests-metadata" mpr on
pso."id" = mpr."id"
left join "prod2-generico"."home-delivery-metadata" mhd on
pso."id" = mhd."id"
left join "prod2-generico"."zeno-order-logs" mzol on
zo."id" = mzol."id"
left join "prod2-generico".stores s2 on
s2."id" = pso."store-id"
left join "prod2-generico"."zeno-patient-address" zpa on
zo."patient-address-id" = zpa."id"
left join "prod2-generico"."store-groups" zc on
zc."id" = zo."store-group-id"
left join "prod2-generico".drugs d2 on
d2."id" = zos."drug-id"
left join "prod2-generico"."patients-metadata-2" pm on
zo."patient-id" = pm."id"
left join "prod2-generico"."zeno-order-cancellation-reason-mapping" zocrm on
zocrm."zeno-order-id" = zo."id"
left join "prod2-generico"."zeno-escalation-reason" zer on
zer."id" = zocrm."order-cancellation-reason-id"
left join "prod2-generico"."sales-agg" msa on
NVL(pso."bill-id",
0) = msa."bill-id"
and pso."drug-id" = msa."drug-id"
left join "prod2-generico"."promo" mp on
pso."promo-id" = mp."id"
left join "prod2-generico"."stores-master" msm on
pso."store-id" = msm."id" ;
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."ecomm";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm/ecomm.py | ecomm.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--full_run', default="no", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
email_to = args.email_to
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
# Connections
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
# Run date
run_date = datetime.today().strftime('%Y-%m-%d')
# runtime_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
# Period start-date
if full_run == 'yes':
period_start_d = '2021-01-01'
else:
# Run only for last 30days, because for earlier, status woult not change
# It can as well be, run only for last 15days, but 30 is a good buffer
period_start_d = (pd.to_datetime(run_date) - timedelta(days=30)).strftime('%Y-%m-%d')
logger.info("Running from {} to {}".format(period_start_d, run_date))
# Remaining data to be fetched
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params = None)
data_q = """
SELECT
c.`patient-id`,
c.`store-id`,
c.`call-date` call_date,
max( (case when ((d.`call-recording-url` is not null
and d.`call-recording-url` !='') or (d.`connected` =1)) then 1 else 0 end)) as connected
FROM
`calling-dashboard` c
INNER JOIN `calling-history` d
on c.`id` = d.`calling-dashboard-id`
WHERE
c.`status`='closed'
and c.`call-date` >= '{0}'
GROUP BY
c.`patient-id`,
c.`store-id`,
c.`call-date`
""".format(period_start_d)
data_q = data_q.replace('`','"')
logger.info(data_q)
rs_db.execute(data_q, params=None)
calling_dashboard_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if calling_dashboard_data is None:
calling_dashboard_data = pd.DataFrame(columns = ['patient_id', 'store_id', 'call_date'])
calling_dashboard_data.columns = [c.replace('-', '_') for c in calling_dashboard_data.columns]
logger.info(len(calling_dashboard_data))
calling_dashboard_data.head()
calling_dashboard_data['call_date'] = pd.to_datetime(calling_dashboard_data['call_date'])
calling_dashboard_data['call_year'] = calling_dashboard_data['call_date'].dt.year
calling_dashboard_data['call_month'] = calling_dashboard_data['call_date'].dt.month
# For maximum call date in month
# Order on connected desc, then can take drop duplicates
calling_dashboard_data = calling_dashboard_data.sort_values(by = ['patient_id', 'store_id',
'call_year', 'call_month',
'connected'],
ascending = [True, True,
True, True,
False])
logger.info(len(calling_dashboard_data))
calling_dashboard_data = calling_dashboard_data.drop_duplicates(subset = ['patient_id', 'store_id',
'call_year', 'call_month'])
logger.info(len(calling_dashboard_data))
# Find window date that is 20 days after the calling date
calling_dashboard_data['window_date'] = calling_dashboard_data['call_date'] + timedelta(days=20)
# Make a tuple of those unique patient ids
unique_patient_ids_tuple = tuple(calling_dashboard_data['patient_id'].drop_duplicates().to_list())
logger.info("Look up for {} patients".format(len(unique_patient_ids_tuple)))
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params = None)
data_q = """
SELECT
`patient-id`,
date(`created-at`) bill_date,
sum("revenue-value") spend
FROM
sales
WHERE
`patient-id` in {0}
AND "bill-flag" = 'gross'
GROUP BY
`patient-id`,
date(`created-at`)
""".format(unique_patient_ids_tuple)
data_q = data_q.replace('`','"')
#logger.info(data_q)
rs_db.execute(data_q, params=None)
patient_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if patient_data is None:
patient_data = pd.DataFrame(columns = ['patient_id', 'bill_date', 'spend'])
patient_data.columns = [c.replace('-', '_') for c in patient_data.columns]
logger.info(len(patient_data))
patient_data.head()
patient_data['bill_date'] = pd.to_datetime(patient_data['bill_date'])
# Round the spend values
patient_data['spend'] = patient_data['spend'].astype(float).round(2)
# Merge patient bill data and calling dashboard data
conversion_data = pd.merge(patient_data, calling_dashboard_data, how='left', on='patient_id')
# Keep only those records from merged dataframe
# where calling date falls within the window date (converted customners)
conversion_data = conversion_data[((conversion_data['bill_date'] >= conversion_data['call_date']) & (
conversion_data['bill_date'] <= conversion_data['window_date']))]
conversion_data.sort_values('patient_id', inplace=True)
# Find out the minimum bill date of converted customer
min_bill_date_after_conversion = conversion_data.groupby(['patient_id', 'call_date'])['bill_date'].min().to_frame(
name='min_bill_date_after_conversion').reset_index()
# Merge minimum bill date of converted customer with conversion data from above
conversion_data = pd.merge(conversion_data, min_bill_date_after_conversion,
left_on=['patient_id', 'call_date', 'bill_date'],
right_on=['patient_id', 'call_date', 'min_bill_date_after_conversion'])
conversion_data.drop(['call_month', 'call_year', 'window_date',
'min_bill_date_after_conversion'], axis=1,inplace=True)
# Take latest call-date, for any bill-date, to avoid bondary cases
conversion_data = conversion_data.sort_values(by=['patient_id', 'bill_date', 'call_date'],
ascending=[True, False, False])
conversion_data = conversion_data.drop_duplicates(subset=['patient_id', 'bill_date'])
# Sort again to ascending
conversion_data = conversion_data.sort_values(by=['patient_id', 'call_date'])
conversion_data = conversion_data[['patient_id', 'store_id',
'call_date', 'bill_date', 'spend', 'connected']]
#################################################
# Write to dB
################################################
# Truncate table data and reset index
write_schema = 'prod2-generico'
write_table_name = 'crm-conversion-overall'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id','created-at','updated-at'])]
rs_db_write.execute(f"set search_path to '{write_schema}'", params = None)
truncate_q = f"""
DELETE FROM
"{write_table_name}"
WHERE
"call-date" >= '{period_start_d}'
"""
truncate_q = truncate_q.replace('`','"')
logger.info(truncate_q)
rs_db_write.execute(truncate_q, params=None)
data_export = conversion_data.copy()
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# Mandatory lines
data_export['created-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['created-by'] = 'etl-automation'
data_export['updated-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['updated-by'] = 'etl-automation'
# Upload data to dB, sensitive
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name, db=rs_db_write,
schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/crm-conversion-overall/crm-conversion-overall.py | crm-conversion-overall.py |
import argparse
import sys
import os
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper.aws.s3 import S3
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-ss', '--source_schema_name', default="test-generico", type=str,
required=False)
parser.add_argument('-ts', '--target_schema_name', default="test-generico", type=str,
required=False)
parser.add_argument('-lot', '--list_of_tables',
default="""drugs""",
type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
source_schema_name = args.source_schema_name
target_schema_name = args.target_schema_name
list_of_tables = args.list_of_tables.split(",")
""" write connection """
db = DB(read_only=False)
db.open_connection()
""" read connection """
mysql_db = MySQL(read_only=False)
mysql_db.open_connection()
s3 = S3()
def df_type_change(df: pd.DataFrame):
type_map = {"company-id": int, "pack-of": int, "preferred-distributor": int}
df_cols = df.columns
for key, type_name in type_map.items():
if key in df_cols:
df[key] = df[key].fillna(0).astype(type_name)
return df
for table in list_of_tables:
logger.info(f"Select table: {table}")
""" read the data from source database """
query = f""" select * from `{source_schema_name}`.`{table}` ; """
df = pd.read_sql(con=mysql_db.connection, sql=query)
""" clean the table first """
logger.info(f"Delete started: {table}")
query = f""" delete from "{target_schema_name}"."{table}" ; """
db.execute(query=query)
logger.info(f"Insert started: {table}")
""" insert the data """
df = df_type_change(df)
s3.write_df_to_db(df=df, table_name=table, db=db, schema=target_schema_name)
logger.info(f"End table: {table}") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/mysql-to-redshift-table-sync/mysql-to-redshift-table-sync.py | mysql-to-redshift-table-sync.py |
import json
from datetime import date, timedelta
# Warnings
from warnings import filterwarnings as fw
import pandas as pd
fw('ignore')
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper.email.email import Email
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sd1', '--start_date', default=None, type=str, required=False)
parser.add_argument('-ed1', '--end_date', default=None, type=str, required=False)
parser.add_argument('-sec', '--section', default='all', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
# parameters
email_to = args.email_to
start_date = args.start_date
end_date = args.end_date
section = args.section
if end_date is None:
end_date = date.today()
start_date = end_date - timedelta(days=365)
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB(read_only=True)
rs_db.open_connection()
mysql_read_db = MySQL(read_only=True)
mysql_read_db.open_connection()
s3 = S3()
read_schema = 'prod2-generico'
file_paths = []
if ('all' in section) or ('tech' in section):
##############################################
############# Tech Section ###################
##############################################
# CASE1
# Where total bill value is less than promo-min-purchase condition
case1_t_q = f"""
select
id as "bill-id",
"patient-id",
"store-id",
store as "store-name",
"promo-code-id",
"promo-code",
"promo-discount",
"created-by" as "staff",
"total-spend"
"promo-min-purchase",
"created-at",
"ecom-flag"
from
"{read_schema}"."retention-master" rm
where
"promo-min-purchase" > "total-spend"
and date("bill-date") between '{start_date}' and '{end_date}';"""
case1_t = rs_db.get_df(case1_t_q)
case1_t_agg = case1_t.groupby(['store-name', 'ecom-flag'], as_index=False).agg({'promo-discount' : 'sum'})
# CASE2
# where promo total used is above max-time
case2_t_q = f"""select
*
from
(
select
rm.id,
rm."patient-id",
rm."bill-date",
rm.store as "store-name",
rm."promo-discount",
rm."promo-code",
pc."max-time",
rm."promo-code-type",
rm."ecom-flag",
rank() over(partition by rm."promo-code-id" order by rm."created-at") Rank_
from
"{read_schema}"."retention-master" rm
left join "{read_schema}"."promo-codes" pc on rm."promo-code-id" = pc.id) T
where
T.Rank_>T."max-time"
and DATE(T."bill-date") between '{start_date}' and '{end_date}';"""
case2_t = rs_db.get_df(case2_t_q)
case2_t_agg = case2_t.groupby(['store-name', 'ecom-flag'], as_index=False).agg({'promo-discount' : 'sum'})
# CASE 3
# where promo per-patient-total-used is above max-per-patient
case3_t_q = f"""select
*
from
(
select
rm.id,
rm."patient-id",
rm."bill-date",
rm.store as "store-name",
rm."promo-discount",
rm."promo-code",
pc."max-per-patient",
rm."promo-code-type",
rm."ecom-flag",
rank() over(partition by rm."patient-id", rm."promo-code-id" order by rm."created-at") Rank_
from
"{read_schema}"."retention-master" rm
left join "{read_schema}"."promo-codes" pc on rm."promo-code-id" = pc.id) T
where
T.Rank_>T."max-per-patient"
and DATE(T."bill-date") between '{start_date}' and '{end_date}';"""
case3_t = rs_db.get_df(case3_t_q)
case3_t_agg = case3_t.groupby(['store-name', 'ecom-flag'], as_index=False).agg({'promo-discount' : 'sum'})
# CASE 4
# After expiry bills
case4_t_q = f"""
select
rm.id,
rm."patient-id",
rm.store as "store-name",
rm."bill-date",
rm."promo-discount",
rm."promo-code",
pc."max-time",
rm."promo-code-type",
rm."ecom-flag",
pc.expiry
from
"{read_schema}"."retention-master" rm
left join "{read_schema}"."promo-codes" pc on
rm."promo-code-id" = pc.id
where
DATE(rm."bill-date") between '{start_date}' and '{end_date}'
and rm."bill-date" > DATE(pc.expiry);"""
case4_t = rs_db.get_df(case4_t_q)
case4_t_agg = case4_t.groupby(['store-name', 'ecom-flag'], as_index=False).agg({'promo-discount' : 'sum'})
# Generating Excel File
file_name = 'promo_leakage_tech_issues.xlsx'
file_path_t = s3.write_df_to_excel(data={'bill-value > promo-min-purchase': case1_t_agg,
'exceeded max limit of usage': case2_t_agg,
'exceeded max-per-patient usage': case3_t_agg,
'bills after promo expiry': case4_t_agg}, file_name=file_name)
file_paths.append(file_path_t)
if ('all' in section) or ('mdm' in section):
##############################################
############# Category Section ###############
##############################################
bills_q = f"""select
"created-at",
s."patient-id",
s."store-id",
s."store-name",
s."bill-id",
s."drug-id",
s."type-at-selling",
s."type",
s."promo-code-id",
s."promo-code",
s."promo-discount" as "promo-spend",
s.mrp * "net-quantity" as "mrp-value"
from
"prod2-generico".sales s
where
date(s."created-at") between '{start_date}' and '{end_date}'
and s."promo-discount" > 0
and s."code-type" != 'referral'
and "bill-flag" = 'gross';"""
bills = rs_db.get_df(bills_q)
promo_q = f"""
SELECT
*
FROM
`promo-codes` pc
where
rules is not null
and date(`start`)<= '{end_date}'
and date(expiry)>= '{start_date}'
and `code-type` != 'referral';"""
promo = pd.read_sql_query(promo_q, mysql_read_db.connection)
def drug(x):
try:
return json.loads(x.decode("utf-8"))
except:
return []
def burn(x):
try:
if x['type'] not in x['rules_drugs'][0]:
return 'MDM_Burn'
else:
return 'Correct'
except:
return 0
promo['rules_json'] = promo['rules'].apply(lambda x: drug(x))
promo['rules_drugs'] = promo['rules_json'].apply(
lambda x: [i['match-values'] for i in x if i['level'] == 'drug-type'])
promo['rules_drugs'] = promo['rules_drugs'].apply(lambda x: [[]] if len(x) == 0 else x)
promo.rename(columns={'id': 'promo-code-id'}, inplace=True)
promo['rules_drugs_type'] = promo['rules_drugs'].apply(lambda x: type(x[0]))
mdm_issue = pd.merge(bills, promo, on='promo-code-id', how='inner')
mdm_issue['Burn'] = mdm_issue.apply(lambda x: burn(x), 1)
mdm_issue['Burn'] = mdm_issue.apply(lambda x: 'Correct' if x['rules_drugs'] == [[]] else x['Burn'], 1)
mdm_issue = mdm_issue[(mdm_issue['Burn'] == 'MDM_Burn')]
# Generating excel file for mdm issues
file_name = 'promo_leakage_mdm_issue.xlsx'
file_path_mdm = s3.write_df_to_excel(data={'category changed later': mdm_issue}, file_name=file_name)
file_paths.append(file_path_mdm)
if ('all' in section) or ('marketing' in section):
##############################################
############# Marketing Section ##############
##############################################
# CASE 1
# Marketing perspective
# Assuming the latest code is the correct one, where min purchase in increased
# So cases where min purchase was set low is promo leakage
case1_m_q = f"""
select
*
from
(
select
rm.id,
rm."promo-code-id" as "old-promo-code-id",
rm."promo-code" as "old-promo-code",
rm."total-spend",
rm.store as "store-name",
rm."ecom-flag",
rm."promo-discount",
pc."min-purchase" as "old-min-purchase condition"
from
"prod2-generico"."retention-master" rm
left join "prod2-generico"."promo-codes" pc on
rm."promo-code-id" = pc.id
where
date(rm."created-at") between '{start_date}' and '{end_date}'
and rm."promo-code-id" is not null
and rm."promo-code-type" != 'referral') t1
inner join
(
select
*
from
(
select
pc.id as "new-promo-code-id",
pc."promo-code" as "new-promo-code",
pc."min-purchase" as "new-min-purchase",
rank() over(partition by pc."promo-code"
order by
pc."created-at" desc) as "promo-rank"
from
"prod2-generico"."promo-codes" pc
where
"code-type" != 'referral') t2
where
t2."promo-rank" = 1) t2_2 on
t1."old-promo-code" = t2_2."new-promo-code"
and t1."old-promo-code-id" != t2_2."new-promo-code-id"
and t1."old-min-purchase condition" < t2_2."new-min-purchase";"""
case1_m = rs_db.get_df(case1_m_q)
case1_m_agg = case1_m.groupby(['store-name', 'ecom-flag'], as_index=False).agg({'promo-discount' : 'sum'})
# CASE 2
# Promo created again user got benefited more than how much is supposed to
cas2_m_q = f"""
select
*
from
(
select
rm.id as "bill-id",
rm."patient-id",
rm."created-at",
rm."promo-code",
rm."promo-discount",
rm.store as "store-name",
rm."ecom-flag",
rank() over(partition by rm."patient-id",
rm."promo-code"
order by
rm."created-at") "promo-used-rank"
from
"prod2-generico"."retention-master" rm
where
rm."promo-code" is not null
and date("created-at") between '{start_date}' and '{end_date}'
and rm."promo-code-type" != 'referral') t1
left join
(
select
*
from
(
select
pc.id as "new-promo-code-id",
pc."promo-code" as "new-promo-code",
pc."max-per-patient" as "new-max-per-patient",
rank() over(partition by pc."promo-code"
order by
pc."created-at" desc) as "promo-rank"
from
"prod2-generico"."promo-codes" pc
where
"code-type" != 'referral') t2
where
t2."promo-rank" = 1) t2_2 on
t1."promo-code" = t2_2."new-promo-code"
where t1."promo-used-rank" > t2_2."new-max-per-patient";
"""
case2_m = rs_db.get_df(cas2_m_q)
case2_m_agg = case2_m.groupby(['store-name', 'ecom-flag'], as_index=False).agg({'promo-discount' : 'sum'})
# Generating Excel for Marketing issues
file_name = 'promo_leakage_marketing_issue.xlsx'
file_path_m = s3.write_df_to_excel(data={'Lower min purchase limit': case1_m_agg,
'Max-per-patient limit exceeded': case2_m_agg,
}, file_name=file_name)
file_paths.append(file_path_m)
if ('all' in section) or ('referral' in section):
##############################################
############# Referral Section ##############
##############################################
ref_q = """
select
t1.*,
rm_d."patient-id" "abuser-patient-id",
rm_d."id" "abused-bill-id",
rm_d."created-at",
rm_d."created-by" "abuser",
rm_d."total-spend",
rm_d."redeemed-points"
from
(
select
rm.store,
rm."created-by",
rm."promo-code-id",
count(distinct rm.id) "bill-count",
max(rm."created-at") "last-promo-use"
from
"prod2-generico"."retention-master" rm
where
rm."promo-code-id" is not null
and rm."promo-code-type" = 'referral'
group by
rm.store,
rm."created-by",
rm."promo-code-id"
having
"bill-count" = 11
and DATE("last-promo-use") between '2022-08-01' and '2022-08-31') t1
left join "prod2-generico"."patients-promo-codes" ppc on
t1."promo-code-id" = ppc."promo-code-id"
left join "prod2-generico"."retention-master" rm_d on
ppc."patient-id" = rm_d."patient-id"
where
rm_d."created-at" >= t1."last-promo-use"
and t1."created-by" = rm_d."created-by"
and rm_d."redeemed-points" > 0
"""
ref = rs_db.get_df(ref_q)
ref_user = ref[['store', 'created-by', 'promo-code-id', 'bill-count']].drop_duplicates()
ref_user['abused-amount'] = 51
ref_user = ref_user.groupby(['store',
'created-by'],
as_index=False).agg({'bill-count': 'sum',
'abused-amount': 'sum'})
ref_owner = ref[['store', 'abuser', 'abused-bill-id', 'redeemed-points']].drop_duplicates()
ref_owner = ref_owner.groupby(['store',
'abuser'],
as_index=False).agg({'abused-bill-id': 'nunique',
'redeemed-points': 'sum'})
# Generating Excel for referral issues
file_name = 'promo_leakage_referral_issue.xlsx'
file_path_r = s3.write_df_to_excel(data={'ref user': ref_user,
'ref_owner': ref_owner,
}, file_name=file_name)
file_paths.append(file_path_r)
mail_body = f"""
Hi,
This report contains a summary of promo leakage that happened during {start_date} and {end_date}
There are three types of promo - leakage
1. Tech issues ( Leakage happened due to code logic/failure of logic )
2. Category issue ( Promo discount supposed to be available on some selected categories but category got changed after availing the offer; previous set category was not correct )
3. Marketing issue ( Issue where multiple times new code got introduced with some minor correction; we assumed the latest code is correct )
For each leakage, there could be obvious explanations, please provide explanations and I will iterate the logic accordingly
1. Tech Issue
Min-Purchase (Min-Purchase logic is failing ):
Ecomm - {case1_t_agg[case1_t_agg['ecom-flag'] == True]['promo-discount'].sum()}
Store - {case1_t_agg[case1_t_agg['ecom-flag'] == False]['promo-discount'].sum()}
Max-time ( Max-time code use logic failure ) :
Ecomm - {case2_t_agg[case2_t_agg['ecom-flag'] == True]['promo-discount'].sum()}
Store - {case2_t_agg[case2_t_agg['ecom-flag'] == False]['promo-discount'].sum()}
Max-per-patient (Max-per-patient logic failure) :
Ecomm - {case3_t_agg[case3_t_agg['ecom-flag'] == True]['promo-discount'].sum()}
Store - {case3_t_agg[case3_t_agg['ecom-flag'] == False]['promo-discount'].sum()}
bill-after expiry ( expiry logic failed )
Ecomm - {case4_t_agg[case4_t_agg['ecom-flag'] == True]['promo-discount'].sum()}
Store - {case4_t_agg[case4_t_agg['ecom-flag'] == False]['promo-discount'].sum()}
For all ecomm cases: Tech has given the explanation that the agent has overwritten access for promo codes (in multiple scenarios they can use it ) but that will still come under leakage and can be discussed.
2. Category issue: {mdm_issue['Burn'].sum()}
3. Marketing Issue :
Min-Purchase (Min-Purchase logic changed in latest code ): {case1_m_agg['promo-discount'].sum()}
Max-per-patient (Max-per-patient logic failure because of multiple code creation) : {case2_m_agg['promo-discount'].sum()}
**This report will be generated monthly**
Thanks & regards"""
email = Email()
email.send_email_file(subject="Promo Leakage",
mail_body=mail_body,
to_emails=email_to,
file_uris=[],
file_paths=file_paths) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/promo-leakage/promo-leakage.py | promo-leakage.py |
# Version 2 plan
# For providing front end - few changes can be done to make it more user friedly input (Groups can be identified, User can add store infront of group)
# Drop whole table is risky - Provide option of update and new entry while providing UI
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
import calendar
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
# SLA Provided via Retool
as_ms_sla = pd.read_csv(s3.download_file_from_s3(file_name="SLA/sla_tat/AS_MS_SLA.csv"))
pr_sla = pd.read_csv(s3.download_file_from_s3(file_name="SLA/sla_tat/PR_SLA.csv"))
# as_ms_sla = pd.read_csv(r'D:\Dashboards TAT\SLA Automations\AS_MS_SLA.csv')
# pr_sla = pd.read_csv(r'D:\Dashboards TAT\SLA Automations\PR_sla.csv')
sla = pd.concat([as_ms_sla,pr_sla],sort=True)
logger.info('fetched SLA sheet provided by planning team')
# Fetching Active stores
store_query = '''
select
s.id as "store-id",
case
when s."franchisee-id" = 1 then 'coco'
else 'fofo'
end as "franchisee-flag",
s."franchisee-id" ,
s."city-id",
s."is-active" ,
s.category
from
"prod2-generico".stores s
'''
stores_data = rs_db.get_df(store_query)
stores_data.columns = [c.replace('-', '_') for c in stores_data.columns]
# Fetching only active, retail stores
stores_data = stores_data[stores_data['category']=='retail']
stores_data = stores_data[stores_data['is_active']==1]
del stores_data['category']
logger.info('fetched current active store list')
# Creating SLA table
sla_db = pd.DataFrame()
for as_ms_pr_flag in sla['as_ms_pr_flag'].unique():
if as_ms_pr_flag == 'as_ms':
auto_short = 1
elif as_ms_pr_flag == 'pr':
auto_short = 0
for franchisee_flag in sla['franchisee_flag'].unique():
for distributor_type in sla['distributor_type'].unique():
for round in sla['round'].unique():
for day in sla['day'].unique():
sla_temp = sla[(sla['as_ms_pr_flag']==as_ms_pr_flag)&
(sla['franchisee_flag']==franchisee_flag)&
(sla['distributor_type']==distributor_type)&
(sla['round']==round)&
(sla['day']==day)]
sla_temp['store_id'] = sla_temp['store_ids'].str.split('|')
sla_temp = sla_temp.explode('store_id')
sla_temp['store_id'] = sla_temp['store_id'].astype(int)
store_temp = sla_temp['store_id'].unique()
stores_data_temp = stores_data[stores_data['franchisee_flag']==franchisee_flag]
sla_temp = sla_temp.merge(stores_data_temp ,on = ['franchisee_flag','store_id'],how = 'outer')
for col in sla_temp.columns:
fillna_number = sla_temp[sla_temp['store_id'] == 0][col].squeeze()
sla_temp[col] = sla_temp[col].fillna(fillna_number)
sla_temp['auto_short'] = auto_short
sla_temp = sla_temp[sla_temp['store_id']!=0]
sla_temp = sla_temp[sla_temp['is_active']==1]
sla_db = pd.concat([sla_temp,sla_db],sort=True)
del sla_db['store_ids']
del sla_db['is_active']
logger.info('table created at same granularity that is required in DB')
# Checking If any duplicate entry
dup_col_check_cols = ['as_ms_pr_flag', 'day','round','store_id','distributor_type']
boolean = sla_db[dup_col_check_cols ].duplicated().any()
if boolean:
logger.info(f'duplicated entry found - {boolean}')
duplicateRowsDF = sla_db[sla_db.duplicated(subset=dup_col_check_cols,keep=False)]
sla_db.drop_duplicates(subset=dup_col_check_cols,
keep='first', inplace=True)
else:
logger.info(f'duplicated entry found - {boolean}')
duplicateRowsDF = pd.DataFrame()
# formatting sla_db to upload
sla_db.columns = [c.replace('_', '-') for c in sla_db.columns]
cols_to_convert_to_int = ['auto-short', 'city-id', 'day', 'delivery-date',
'delivery-time', 'dispatch-date', 'dispatch-time',
'franchisee-id', 'invoice-date', 'invoice-time',
'order-date', 'order-time', 'reorder-date', 'reorder-time', 'round',
'store-id']
sla_db[cols_to_convert_to_int] = sla_db[cols_to_convert_to_int].astype(int)
# Uploading to RS
schema = 'prod2-generico'
table_name = 'tat-sla'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
status2 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(str(table_name) + ' table deleted')
s3.write_df_to_db(df=sla_db[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status2 = True
if status2 is True:
status = 'Success'
else:
status = 'Failed'
# Email
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
duplicate_entry_uri = s3.save_df_to_s3(df=duplicateRowsDF, file_name=f'sla_duplicate_entries_{cur_date}.csv')
sla_db_uri = s3.save_df_to_s3(df=sla_db, file_name=f'tat_sla_final_{cur_date}.csv')
email = Email()
email.send_email_file(subject=f"{env}-{status} : SLA update",
mail_body=f"duplicated entry found - {boolean}",
to_emails=email_to, file_uris=[duplicate_entry_uri,sla_db_uri])
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/tat_sla/as_ms_sla.py | as_ms_sla.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import numpy as np
import datetime
import dateutil
from dateutil.tz import gettz
from datetime import timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'ecomm-pso-substitution'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# max of data
eco_q = """
select
date(max("zeno-created-at")) max_exp
from
"prod2-generico"."ecomm-pso-substitution"
"""
max_exp_date = rs_db.get_df(eco_q)
max_exp_date['max_exp'].fillna(np.nan, inplace=True)
logger.info(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
logger.info(max_exp_date)
# params
if full_run or max_exp_date == 'NaN':
start = '2021-01-01'
else:
start = (pd.to_datetime(max_exp_date) - timedelta(days=15)).strftime('%Y-%m-%d')
start = dateutil.parser.parse(start)
logger.info(start)
# ethical generic leads
base_q = f"""
select
e."zeno-order-id",
e."patient-id" ,
e."order-type" ,
e."zeno-created-at" ,
e."zeno-drug-created-by" ,
e."order-number" ,
e."preferred-store-id" ,
e."type",
e.category ,
e."composition-master-id",
e.composition ,
e.status,
e."zeno-drug-id",
e."zeno-drug-name"
from
"prod2-generico".ecomm e
where
e."zeno-created-at" >= '{start}'
and e."type" in ('generic', 'ethical')
and e."zeno-drug-created-by" = '[email protected]'
"""
base = rs_db.get_df(base_q)
base['gen-cnt'] = np.where(base['type'] == 'generic', 1, 0)
base['eth-cnt'] = np.where(base['type'] == 'ethical', 1, 0)
fixed_gen_eth_cnt = base.groupby(['zeno-order-id', 'composition']).agg(
{'gen-cnt': "sum", 'eth-cnt': "sum"}).reset_index()
# to exclude case where lead contains same composition both ethical and generic drug
exclusion = fixed_gen_eth_cnt[(fixed_gen_eth_cnt['gen-cnt'] > 0) & (fixed_gen_eth_cnt['eth-cnt'] > 0)]
# take only ethical as left table
base_eth = base[(base['type'] == 'ethical')]
base_eth.drop(['gen-cnt', 'eth-cnt'],
axis='columns', inplace=True)
# base_eth['zeno-order-id'] = base_eth['zeno-order-id'].astype(str)
# exclusion['zeno-order-id'] = exclusion['zeno-order-id'].astype(str)
# base_eth['composition'] = base_eth['composition'].astype(str)
# exclusion['composition'] = exclusion['composition'].astype(str)
base_eth[['zeno-order-id', 'composition']] = base_eth[['zeno-order-id', 'composition']].astype(str)
exclusion[['zeno-order-id', 'composition']] = exclusion[['zeno-order-id', 'composition']].astype(str)
datamerge = pd.merge(base_eth, exclusion, how='left', on=['zeno-order-id', 'composition'])
# exclude leads with both eth gen same composition
final_eth = datamerge[datamerge['gen-cnt'].isna()]
final_eth.drop(['gen-cnt', 'eth-cnt'],
axis='columns', inplace=True)
# join pso to check whether ethical was substituted to gen by CRM
pso_q = f"""
select
pso."zeno-order-id",
pso."drug-id" as "pso-drug-id",
pso."drug-name" as "pso-drug-name",
pso."store-id",
d.composition,
d."type" as "pso-drug-type",
pso."created-by" as "pso-created-by"
from
(select * ,ROW_NUMBER () over (partition by pso."zeno-order-id" order by pso."created-at" desc) rk
from "prod2-generico"."patients-store-orders" pso
where "order-source" = 'zeno' and "created-at" >= '{start}'
) pso
left join "prod2-generico"."drugs" d on
d.id = pso."drug-id"
where rk = 1
"""
pso = rs_db.get_df(pso_q)
# pso['zeno-order-id'] = pso['zeno-order-id'].astype(str)
# pso['composition'] = pso['composition'].astype(str)
pso[['zeno-order-id', 'composition']] = pso[['zeno-order-id', 'composition']].astype(str)
datamerge = pd.merge(final_eth, pso, how='left', on=['zeno-order-id', 'composition'])
joined = datamerge.copy()
# substitutables
substitutable_q = """
select
distinct id as "composition-master-id" ,
"composition" as "substitutable-composition" from
"prod2-generico"."substitutable-compositions"
"""
substitutable = rs_db.get_df(substitutable_q)
joined['composition-master-id'] = joined['composition-master-id'].astype(int, errors='ignore')
substitutable['composition-master-id'] = substitutable['composition-master-id'].astype(int, errors='ignore')
datamerge = pd.merge(joined, substitutable, how='left', on=['composition-master-id'])
datamerge['substitutable-composition-flag'] = np.where(pd.isnull(datamerge['substitutable-composition']) == False, 1, 0)
datamerge.drop(['substitutable-composition'],
axis='columns', inplace=True)
ecomm_pso_sub = datamerge.copy()
# only consider leads for which pso was made
ecomm_pso_sub = ecomm_pso_sub[(pd.isnull(ecomm_pso_sub['pso-drug-id']) == False)]
# ecomm_pso_sub['preferred-store-id'] = ecomm_pso_sub['preferred-store-id'].astype(int, errors='ignore')
ecomm_pso_sub[['store-id', 'preferred-store-id']] = ecomm_pso_sub[['store-id', 'preferred-store-id']]\
.astype(int, errors='ignore')
# etl
ecomm_pso_sub['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ecomm_pso_sub['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ecomm_pso_sub['created-by'] = 'etl-automation'
ecomm_pso_sub['updated-by'] = 'etl-automation'
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "zeno-created-at" >='{start}' '''
logger.info(truncate_query)
rs_db.execute(truncate_query)
logger.info(ecomm_pso_sub.head())
s3.write_df_to_db(df=ecomm_pso_sub[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm_pso_substitution/ecomm_pso_substitution.py | ecomm_pso_substitution.py |
```
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.clervertap.clevertap import CleverTap
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
print(f"env: {env}")
# ct = CleverTap(api_name="profiles.json", event_name="App Launched", batch_size=100, query={"event_name": "App Launched", "from": 20220601, "to": 20220601})
# ct.get_profile_data_all_records()
# print(f"All records count: {len(ct.all_records)}")
ct = CleverTap(api_name="profiles.json", event_name="App Launched", batch_size=100,
query={"event_name": "App Launched", "from": 20220921, "to": 20220921})
ct.get_profile_data_all_records()
df = ct.all_records
print(len(df))
import pandas as pd
df1=pd.DataFrame(df)
df1.head()
df2 = pd.json_normalize(df)
df2.head()
df2[['identity','profileData.patientid']]
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/clevertap/clevertap-rs.ipynb | clevertap-rs.ipynb |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "drug-primary-disease"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert into
"prod2-generico"."{table_name}" (
"created-by",
"created-at",
"updated-by",
"updated-at",
"drug-id",
"drug-primary-disease"
)
select
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
"drug-id",
"subgroup" as "drug-primary-disease"
from
(
select
ab."drug-id", ab."subgroup", ab."molecules_count", row_number() over (partition by ab."drug-id"
order by
ab."molecules_count" desc,
ab."subgroup" asc) as "rank"
from
(
select
a."id" as "drug-id", c."subgroup", count(c."id") as molecules_count
from
"prod2-generico".drugs a
inner join "prod2-generico"."composition-master-molecules-master-mapping" b on
a."composition-master-id" = b."composition-master-id"
inner join "prod2-generico"."molecule-master" c on
b."molecule-master-id" = c."id"
where
c.subgroup != 'others'
group by
a."id", c."subgroup") ab ) sub
where
"rank" = 1
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drug-primary-disease/drug_primary_disease.py | drug_primary_disease.py |
import os
import sys
import argparse
import pandas as pd
import datetime
import numpy as np
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MSSql
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from dateutil.tz import gettz
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Populates table purchase_margin")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected]",
type=str, required=False)
parser.add_argument('-sd', '--start_date', default='NA', type=str, required=False)
parser.add_argument('-ed', '--end_date', default='NA', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
start_date = args.start_date
end_date = args.end_date
err_msg = ''
logger = get_logger()
logger.info("Script begins")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
d = datetime.timedelta(days=15)
start_dt = cur_date - d
end_dt = cur_date - datetime.timedelta(1)
status = False
if start_date == 'NA' and end_date == 'NA':
start_date = start_dt
end_date = end_dt
try:
# MSSql connection
mssql = MSSql(connect_via_tunnel=False)
mssql_connection = mssql.open_connection()
# RS Connection
rs_db = DB()
rs_db.open_connection()
q1 = """
select
'bhiwandi-warehouse' as source,
cast(b.Barcode as int) as drug_id,
cast(a.Vdt as date) as purchase_date,
c.Altercode as distributor_id,
a.Qty as quantity,
f.mrp as mrp,
(a.NetAmt + a.Taxamt)/ qty as purchase_rate,
cast(a.Vno as varchar) + cast('-' as varchar) + cast(a.Itemc as varchar) + cast('-' as varchar) + cast(a.Psrlno as varchar) as id,
199 as store_id
from
SalePurchase2 a
left join Item b on
b.code = a.Itemc
left join acm c on
c.code = a.Acno
left join Master m on
m.code = b.ItemCat
left join FIFO f on
(f.Pbillno = a.Pbillno
and f.Psrlno = a.Psrlno
and f.Itemc = a.Itemc
and f.Vdt = a.Vdt)
where
b.Barcode not like '%[^0-9]%'
and c.Altercode not like '%[^0-9]%'
and a.Vtype in (N'PB')
and a.Qty > 0
and a.vdt >= '{}' and a.vdt <= '{}'
""".format(start_date,end_date)
logger.info("getting data from WMS tables")
bhiwandi_wh_purchase = pd.read_sql(q1, mssql_connection)
logger.info("Data pulled from WMS tables")
mssql = MSSql(connect_via_tunnel=False, db='Esdata_WS_2')
cnxn = mssql.open_connection()
cursor = cnxn.cursor()
q_2='''
select
'goodaid-warehouse' as source,
cast(b.Barcode as int) as drug_id,
cast(a.Vdt as date) as purchase_date,
c.Altercode as distributor_id,
a.Qty as quantity,
f.mrp as mrp,
(a.NetAmt + a.Taxamt)/ qty as purchase_rate,
cast(a.Vno as varchar) + cast('-' as varchar) + cast(a.Itemc as varchar) + cast('-' as varchar) + cast(a.Psrlno as varchar) as id,
343 as store_id
from
SalePurchase2 a
left join Item b on
b.code = a.Itemc
left join acm c on
c.code = a.Acno
left join Master m on
m.code = b.ItemCat
left join FIFO f on
(f.Pbillno = a.Pbillno
and f.Psrlno = a.Psrlno
and f.Itemc = a.Itemc
and f.Vdt = a.Vdt)
where
b.Barcode not like '%[^0-9]%'
and c.Altercode not like '%[^0-9]%'
and a.Vtype in (N'PB')
and a.Qty > 0
and a.vdt >= '{}' and a.vdt <= '{}'
'''.format(start_date,end_date)
goodaid_warehouse_purchase = pd.read_sql(q_2, cnxn)
q3 = """
select
'dc' as source,
ii."drug-id" as drug_id,
date(i."approved-at") as purchase_date,
cast(i."distributor-id" as varchar) as distributor_id,
ii."actual-quantity" as quantity,
ii.mrp,
(ii."net-value" / ii."actual-quantity") as purchase_rate,
cast(ii.id as varchar) as id,
i."store-id" as store_id
from
"prod2-generico".invoices i
left join "prod2-generico"."invoice-items" ii on
ii."invoice-id" = i.id
where
"distributor-id" <> 8105
and ii."drug-id" is not null and date(i."approved-at") >='{}' and
date(i."approved-at") <='{}'
""".format(start_date,end_date)
logger.info("Getting data from RS")
dc = rs_db.get_df(q3)
logger.info("Data pulled from RS")
drugs = rs_db.get_df("""
select
drg.id as drug_id,
drg."drug-name" as drug_name,
drg."type",
drg.company,
drg.category,
drg."available-in" as available_in,
drg."sub-type" as sub_type,
drg."category-drug-type" as category_drug_type
from
"prod2-generico".drugs drg
""")
stores = rs_db.get_df("""
select
sm.id as store_id,
sm.store as store_name,
sm.city as city_name,
sm."franchisee-name" as franchisee_name,
sm."opened-at" as store_opened_at
from
"prod2-generico"."stores-master" sm
""")
distributors = rs_db.get_df("""
select
id as distributor_id,
d."name" as distributor_name
from
"prod2-generico".distributors d
""")
df = pd.concat([dc, bhiwandi_wh_purchase,goodaid_warehouse_purchase])
df = df.merge(drugs, on='drug_id', how='left')
df = df.merge(stores, on='store_id', how='left')
df['distributor_id'] = np.where(df['distributor_id'] == '', 0, df['distributor_id'])
df['distributor_id'] = df['distributor_id'].astype(int)
df = df.merge(distributors, on='distributor_id', how='left')
created_at = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
df['created-at'] = created_at
df['created-by'] = 'etl-automation'
df['updated-at'] = created_at
df['updated-by'] = 'etl-automation'
df['quantity'] = df['quantity'].astype(int)
#writing to RS
delete_q = """
DELETE
FROM
"prod2-generico"."purchase-margin"
WHERE
date("created_at") >= '{start_date_n}'
and date("created_at") <= '{end_date_n}'
""".format(start_date_n=start_date, end_date_n=end_date)
rs_db.execute(delete_q)
s3 = S3()
delete_one_year='''
DELETE
FROM
"prod2-generico"."purchase-margin"
WHERE
date("created_at") < date_trunc('month', current_date) - INTERVAL '1 year'
'''
s3.write_df_to_db(df=df, table_name='purchase-margin', db=rs_db, schema='prod2-generico')
rs_db.execute(delete_one_year)
status = True
except Exception as e:
err_msg = str(e)
logger.info('purchase_margin job failed')
logger.exception(e)
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"purchase_margin ({env}): {result}",
mail_body=f"Run time: {cur_date}",
to_emails=email_to, file_uris=[])
else:
result = 'Failed'
email.send_email_file(subject=f"purchase_margin ({env}): {result}",
mail_body=f"Run time: {cur_date} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended")
#DDL for table
"""
create table "prod2-generico"."purchase-margin" ( "source" text,
"drug_id" int,
"created_at" date,
"distributor_id" int,
"quantity" int,
"mrp" float,
"purchase_rate" float,
"id" text,
"store_id" int,
"drug_name" text,
"type" text,
"company" text,
"category" text,
"available_in" text,
"sub_type" text,
"category_drug_type" text,
"store_name" text,
"city_name" text,
"franchisee_name" text,
"store_opened_at" date,
"distributor_name" text,
"created-at" TIMESTAMP without TIME zone default getdate(),
"created-by" VARCHAR default 'etl-automation',
"updated-at" TIMESTAMP without TIME zone default getdate(),
"updated-by" VARCHAR default 'etl-automation'
);
alter table "prod2-generico"."purchase-margin" owner to admin;
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/purchase_margin/purchase_margin.py | purchase_margin.py |
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
import argparse
import pandas as pd
import datetime
import os
import re
from datetime import date
from datetime import datetime
import numpy as np
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
currentMonth = datetime.now(tz=gettz('Asia/Kolkata')).month
currentYear = datetime.now(tz=gettz('Asia/Kolkata')).year
run_date = str(datetime.now(tz=gettz('Asia/Kolkata')).date())
rs_db = DB()
rs_db.open_connection()
s3=S3()
warehouse_drug_query = '''
select
wssm."drug-id" as "drug-id-dss",
'Yes' as "add-wh"
from
"prod2-generico"."wh-sku-subs-master" wssm where wssm."add-wh" ='Yes'
'''
warehouse_drug = rs_db.get_df(query=warehouse_drug_query)
wh_prate_query = """
select
i.barcode as "drug-id-wh" ,
i.prate as "fixed-purchase-rate"
from
"prod2-generico".item i
where
i.prate > 0
and REGEXP_COUNT(i.barcode ,
'^[0-9]+$')= 1
"""
wh_prate=rs_db.get_df(query=wh_prate_query)
wh_prate["drug-id-wh"] = pd.to_numeric(wh_prate["drug-id-wh"])
q_aa='''
select
"year-created-at" as "year",
"month-created-at" as "month" ,
s."bill-flag" as "sales-type",
s."drug-id" ,
s."drug-name" ,
s."type" ,
s.category ,
s.company ,
d."available-in" ,
d.pack ,
d."pack-form" ,
d."release" ,
d."is-repeatable" ,
d."repeatability-index",
s."ecom-flag" ,
s.city ,
COUNT(distinct s."bill-id") as "bill-count",
COUNT(distinct s."patient-id") as "patient-count",
SUM(s.mrp*s."net-quantity") as "net-mrp",
SUM(s.rate* s."net-quantity") as "net-revenue",
SUM(s.ptr*s."net-quantity") as "net-cogs-zp",
SUM(s."purchase-rate"*s."net-quantity") as "net-cogs-wc"
from
"prod2-generico".sales s
left join "prod2-generico".drugs d on s."drug-id" =d.id
where "year-created-at" ={currentYear} and "month-created-at" ={currentMonth}
group by
"year-created-at" ,
"month-created-at" ,
s."bill-flag",
s."drug-id" ,
s."drug-name" ,
s."type" ,
s.category ,
s.company ,
d."available-in" ,
d.pack ,
d."pack-form" ,
d."release" ,
d."is-repeatable" ,
d."repeatability-index",
s."ecom-flag" ,
s.city
'''.format(currentYear=currentYear,currentMonth=currentMonth)
gross_and_returns = rs_db.get_df(query=q_aa)
conditions = [(gross_and_returns['repeatability-index'] >= 80 ),
(gross_and_returns['repeatability-index'] >= 40) & (gross_and_returns['category'] =='chronic'),
(gross_and_returns['repeatability-index']<80) ]
choices = ['repeatable','repeatable', 'non-repeatable']
gross_and_returns['repeatable_flag'] = np.select(conditions, choices)
q_inventory='''
select
i."drug-id",
SUM(i.quantity + i."locked-for-check" +
i."locked-for-audit" + i."locked-for-return" + i."locked-for-transfer") as "current-inventory-qty",
SUM((i.quantity + i."locked-for-check" +
i."locked-for-audit" + i."locked-for-return" + i."locked-for-transfer")* i.ptr) as "current-inventory-value",
avg(srs."selling-rate") as "avg-fixed-selling-rate"
from
"prod2-generico"."inventory-1" i
left join "prod2-generico"."selling-rate-settings" srs on
i."drug-id" = srs."drug-id"
group by
i."drug-id";
'''
df_inventory = rs_db.get_df(query=q_inventory)
gross_and_returns_inventory = pd.merge(left=gross_and_returns,
right=df_inventory,
how='left', on=['drug-id'])
gross_and_returns_inventory['run_date'] = run_date
gross_and_returns_inventory = pd.merge(gross_and_returns_inventory, wh_prate,
how='left', left_on='drug-id',
right_on='drug-id-wh')
del gross_and_returns_inventory["drug-id-wh"]
gross_and_returns_inventory = pd.merge(gross_and_returns_inventory,
warehouse_drug, how='left',
left_on='drug-id',
right_on='drug-id-dss')
gross_and_returns_inventory['add-wh'].fillna('No', inplace=True)
del gross_and_returns_inventory["drug-id-dss"]
consumer_dump = gross_and_returns_inventory
#Supply Dump
#Purchase data
q_invoices='''
SELECT
date_part(year,i."received-at") AS "year",
date_part(month,i."received-at") AS "month",
ii."drug-id" ,
d."drug-name" ,
d."type" ,
d.category ,
d.composition ,
d.company ,
d."available-in" ,
d.pack ,
d."pack-form" ,
d."release" ,
i."distributor-id" ,
di."name" as "distributor-name",
cty.name as "city-name",
COUNT(DISTINCT si."short-book-id") AS "order-count",
SUM(si."po-quantity") AS "po-quantity",
SUM(ii."actual-quantity") AS "purchase-quantity",
SUM(ii."net-value") AS "puchase-value",
AVG(ii."net-value" /ii."actual-quantity") AS "avg-purchase-rate",
AVG(n."selling-rate") AS "avg-selling-rate",
AVG(ii.mrp) AS "avg-mrp",
AVG(srs."selling-rate") as "avg-fixed-selling-rate"
FROM
"prod2-generico"."invoice-items" ii
left JOIN "prod2-generico".invoices i on i.id =ii."invoice-id"
left JOIN "prod2-generico".distributors di on di.id = i."distributor-id"
left JOIN "prod2-generico"."short-book-invoice-items" si ON si."invoice-item-id" = ii.id
left JOIN "prod2-generico".drugs d ON d.id = ii."drug-id"
left JOIN "prod2-generico"."invoice-items-1" it ON it."invoice-item-reference" = ii.id
left JOIN "prod2-generico"."inventory-1" n ON n."invoice-item-id" = it.id
LEFT JOIN "prod2-generico"."selling-rate-settings" srs on ii."drug-id" =srs."drug-id"
LEFT JOIN (
select
s.id,
zc.name
from
"prod2-generico".stores s
left join "prod2-generico"."zeno-city" zc on
zc.id =s."city-id"
) as cty on cty.id = i."store-id"
WHERE
date_part(year,i."received-at") ={currentYear}
and date_part(month ,i."received-at")= {currentMonth}
GROUP BY
date_part(year,i."received-at") ,
date_part(month,i."received-at") ,
ii."drug-id" ,
d."drug-name" ,
d."type" ,
d.category ,
d.composition ,
d.company ,
d."available-in" ,
d.pack ,
d."pack-form" ,
d."release" ,
i."distributor-id" ,
di."name" ,
cty.name '''.format(currentYear=currentYear,currentMonth=currentMonth)
df_invoices = rs_db.get_df(query=q_invoices)
df_invoices['run-date'] = run_date
df_invoices = pd.merge(df_invoices, wh_prate,
how='left', left_on='drug-id',
right_on='drug-id-wh')
del df_invoices["drug-id-wh"]
# adding add_wh column
df_invoices = pd.merge(df_invoices, warehouse_drug,
how='left', left_on='drug-id',
right_on='drug-id-dss')
df_invoices['add-wh'].fillna('No', inplace=True)
del df_invoices["drug-id-dss"]
supply_dump = df_invoices
consumer_data_dump = 'cat_data_dump/consumer_data_dump_{}.csv'.format(run_date)
supply_data_dump = 'cat_data_dump/supply_data_dump_{}.csv'.format(run_date)
# Uploading File to S3
consumer_dump_uri = s3.save_df_to_s3(df=consumer_dump, file_name=consumer_data_dump)
supply_dump_uri= s3.save_df_to_s3(df=supply_dump, file_name=supply_data_dump)
#Sending the email
#email = Email()
#email.send_email_file(subject=f"Category Data Dump {run_date}",
#mail_body="category supply data ",
#to_emails=email_to, file_uris=[supply_dump_uri])
#email.send_email_file(subject=f"Category Data Dump {run_date}",
#mail_body="category consumer data ",
#git to_emails=email_to, file_uris=[consumer_dump_uri])
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/category_data_dump/category-data-dump.py | category-data-dump.py |
# =============================================================================
# purpose: MIS Automation
# Author: Saurav Maskar
# =============================================================================
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from dateutil.tz import gettz
from zeno_etl_libs.queries.mis.mis_class import Mis
from zeno_etl_libs.queries.mis import mis_queries
import datetime
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-met', '--mis_email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-dt', '--mis_db_date', default="NA", type=str, required=False)
parser.add_argument('-sc', '--schema_to_select', default="public", type=str, required=False)
parser.add_argument('-cy', '--choose_year', default="NA", type=str, required=False)
parser.add_argument('-cm', '--choose_month', default="NA", type=str, required=False)
parser.add_argument('-pc', '--power_consumer_value', default=2000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
mis_email_to = args.mis_email_to
mis_db_date = args.mis_db_date
schema_to_select = args.schema_to_select
choose_year = args.choose_year
choose_month = args.choose_month
power_consumer_value = args.power_consumer_value
if choose_month=='NA':
choose_month = datetime.datetime.now(tz=gettz('Asia/Kolkata')).month
if choose_month == 1:
choose_month = 12
else:
choose_month = choose_month-1
if choose_year=='NA':
choose_year = datetime.datetime.now(tz=gettz('Asia/Kolkata')).year
if choose_month==1:
choose_year = choose_year-1
else:
choose_year = choose_year
analysis_start_time = datetime.datetime(int(choose_year),int(choose_month),1,0,0,0).strftime('%Y-%m-%d %H:%M:%S')
if int(choose_month)== 12:
analysis_end_time = (datetime.datetime((int(choose_year)+1),1,1,23,59,59)-datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S')
else:
analysis_end_time = (datetime.datetime(int(choose_year), (int(choose_month) + 1), 1, 23, 59, 59) - datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S')
if mis_db_date == 'NA':
if int(choose_month)==12:
mis_db_date = (datetime.datetime((int(choose_year)+1),1,1,23,59,59)-datetime.timedelta(days=1)).strftime('%Y-%m-%d')
else:
mis_db_date = (datetime.datetime(int(choose_year), (int(choose_month) + 1), 1, 23, 59, 59) - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
suffix_to_table = '-mis-' + str(mis_db_date)
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
today_date = start_time.strftime('%Y-%m-%d')
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("mis_email_to - " + mis_email_to)
logger.info("mis_db_date - " + str(mis_db_date))
logger.info("schema_to_select - " + str(schema_to_select))
logger.info("choose_year - " + str(choose_year))
logger.info("choose_month - " + str(choose_month))
logger.info("analysis_start_time - " + str(analysis_start_time))
logger.info("analysis_end_time - " + str(analysis_end_time))
logger.info("suffix_to_table - " + str(suffix_to_table))
logger.info("power_consumer_value - " + str(power_consumer_value))
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
mis = Mis(analysis_start_time,analysis_end_time,suffix_to_table,schema_to_select,choose_year,choose_month,rs_db,logger,mis_queries)
# order_source and stores are used in almost all jobs, please run them beforehand
order_source = mis.order_source()
stores = mis.store_list()
stores_fofo = stores[stores['franchisee_id'] != 1]
del stores['franchisee_id']
# =============================================================================
# MIS - Breakup/Unified
# two versions of MIS are provided to Accounts team, Breakup - Counts Goodaid as seperate entity
# Table name with no suffix will be used for Breakup
# For unified suffix _unified will be added
# =============================================================================
breakup_master = pd.DataFrame()
unified_master = pd.DataFrame()
status = False
try:
# =============================================================================
# prerequisite tables
# =============================================================================
# sales
sales = mis.sales()
sales['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(sales['franchisee_id'],sales['franchisee_invoice'])
sales_unified = sales.copy(deep=True)
sales['order_source'] = np.where(sales.bill_id.isin(order_source.zeno_bill_id),
"ecomm", "store")
sales_unified['order_source'] = 'all'
sales['type1'] = np.vectorize(mis.order_type_tag)(sales['company'],sales['type'],'breakup')
sales_unified['type1'] = np.vectorize(mis.order_type_tag)(sales_unified['company'],sales_unified['type'],'unified')
logger.info('fetched sales for selected period')
# customer returns
customer_returns = mis.customer_returns()
customer_returns['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(customer_returns['franchisee_id'],
customer_returns['franchisee_invoice'])
customer_returns_unified = customer_returns.copy(deep = True)
customer_returns['order_source'] = np.where(customer_returns.bill_id.isin(order_source.zeno_bill_id),
"ecomm", "store")
customer_returns_unified['order_source'] = 'all'
customer_returns['type1'] = np.vectorize(mis.order_type_tag)(customer_returns['company'],customer_returns['type'],'breakup')
customer_returns_unified['type1'] = np.vectorize(mis.order_type_tag)(customer_returns_unified['company'],customer_returns_unified['type'],'unified')
logger.info('fetched customer returns data for selected period')
# inventory
inventory = mis.inventory()
inventory['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(inventory['franchisee_id'],inventory['franchisee_invoice'])
inventory_unified = inventory.copy(deep = True)
inventory['type1'] = np.vectorize(mis.order_type_tag)(inventory['company'],inventory['type'],'breakup')
inventory_unified['type1'] = np.vectorize(mis.order_type_tag)(inventory_unified['company'],inventory_unified['type'],'unified')
logger.info('fetched inventory data')
# cumulative_consumer_data
cumulative_consumers_data = mis.cumulative_consumers_data()
cumulative_consumers_data = pd.merge(left=cumulative_consumers_data, right=stores,
how='left',
on=['store_id'])
logger.info('fetched cumulative consumers data')
# cumulative_consumer_fofo_data
workcell_cumulative_consumers_fofo_data,others_cumulative_consumers_fofo_data = mis.cumulative_consumers_fofo_data()
workcell_cumulative_consumers_fofo_data = pd.merge(left=workcell_cumulative_consumers_fofo_data, right=stores,
how='left',
on=['store_id'])
others_cumulative_consumers_fofo_data = pd.merge(left=others_cumulative_consumers_fofo_data, right=stores,
how='left',
on=['store_id'])
logger.info('fetched cumulative consumers data')
# all_cons_initial_bill_date
all_cons_initial_bill_date = mis.cons_initial_bill_date()
logger.info('fetched customers_initial_bill_date data')
# home delivery data
home_delivery_data = mis.home_delivery_data()
home_delivery_data_unified = home_delivery_data.copy(deep = True)
logger.info('fetched home delivery data')
home_delivery_data['order_source'] = np.where(home_delivery_data.bill_id.isin(order_source.zeno_bill_id),
"ecomm", "store")
home_delivery_data_unified['order_source'] = 'all'
delivery_bill_ids = mis.delivery_bill_ids()
logger.info('fetched delivery bill ids')
# purchase_from_wc_data
purchase_from_wc_data = mis.purchase_from_wc_data()
purchase_from_wc_data['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(purchase_from_wc_data['franchisee_id'],
purchase_from_wc_data['franchisee_invoice'])
purchase_from_wc_data_unified = purchase_from_wc_data.copy(deep=True)
purchase_from_wc_data['type1'] = np.vectorize(mis.order_type_tag)(purchase_from_wc_data['company'],purchase_from_wc_data['type'],'breakup')
purchase_from_wc_data_unified['type1'] = np.vectorize(mis.order_type_tag)(purchase_from_wc_data_unified['company'],purchase_from_wc_data_unified['type'],'unified')
logger.info('fetched purchase from wc data for selected period')
# zippin_return_data
zippin_return_data = mis.zippin_return_data()
zippin_return_data['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(zippin_return_data['franchisee_id'],
zippin_return_data['franchisee_invoice'])
zippin_return_data_unified = zippin_return_data.copy(deep=True)
zippin_return_data['type1'] = np.vectorize(mis.order_type_tag)(zippin_return_data['company'],zippin_return_data['type'],'breakup')
zippin_return_data_unified['type1'] = np.vectorize(mis.order_type_tag)(zippin_return_data_unified['company'],zippin_return_data_unified['type'],'unified')
logger.info('fetched zippin return data for selected period')
# workcell_return_data
workcell_return_data = mis.workcell_return_data()
workcell_return_data['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(workcell_return_data['franchisee_id'],
workcell_return_data['franchisee_invoice'])
workcell_return_data_unified = workcell_return_data.copy(deep=True)
workcell_return_data['type1'] = np.vectorize(mis.order_type_tag)(workcell_return_data['company'],workcell_return_data['type'],'breakup')
workcell_return_data_unified['type1'] = np.vectorize(mis.order_type_tag)(workcell_return_data_unified['company'],workcell_return_data_unified['type'],'unified')
logger.info('fetched workcell return data for selected period')
# local_purchase_data
local_purchase_data = mis.local_purchase_data()
local_purchase_data['fofo_distributor'] = np.vectorize(mis.fofo_final_distributor)(local_purchase_data['franchisee_id'],local_purchase_data['franchisee_invoice'])
local_purchase_data_unified = local_purchase_data.copy(deep=True)
local_purchase_data['type1'] = np.vectorize(mis.order_type_tag)(local_purchase_data['company'],local_purchase_data['type'],'breakup')
local_purchase_data_unified['type1'] = np.vectorize(mis.order_type_tag)(local_purchase_data['company'],local_purchase_data['type'],'unified')
logger.info('fetched local purchase data for selected period')
# =============================================================================
# 1. GMV_gross_payment
# =============================================================================
# prerequisite = sales
breakup_gmv_gross_payment = mis.gmv_gross_payment(sales,stores)
unified_gmv_gross_payment = mis.gmv_gross_payment(sales_unified,stores)
logger.info('1. - GMV, Gross, Payment ran successfully')
# =============================================================================
# 2. netsale_tax_cogs
# =============================================================================
# prerequisite = sales, customer_returns
breakup_netsale_tax_cogs = mis.netsale_tax_cogs(sales,customer_returns,stores)
unified_netsale_tax_cogs = mis.netsale_tax_cogs(sales_unified,customer_returns_unified,stores)
logger.info('2. - Net sale, Taxes, COGS ran successfully')
breakup_master = pd.concat([breakup_gmv_gross_payment,breakup_netsale_tax_cogs],sort=True)
unified_master = pd.concat([unified_gmv_gross_payment,unified_netsale_tax_cogs], sort=True)
# =============================================================================
# 3. inventoryageing_nearexpiry
# =============================================================================
# prerequisite = inventory
near_expiry = mis.near_expiry(inventory,stores,'breakup')
near_expiry_unified = mis.near_expiry(inventory_unified,stores,'unified')
inventory_ageing = mis.inventory_ageing(inventory,stores,'breakup')
inventory_ageing_unified = mis.inventory_ageing(inventory_unified,stores,'unified')
logger.info('3. - Inventory ageing, Near expiry ran successfully')
breakup_master = pd.concat([breakup_master, inventory_ageing, near_expiry], sort=True)
unified_master = pd.concat([unified_master, inventory_ageing_unified, near_expiry_unified], sort=True)
# =============================================================================
# 4. Sales by volume
# =============================================================================
# prerequisite = sales
sale_by_volume = mis.sales_by_volume(sales,stores)
sale_by_volume_unified = mis.sales_by_volume(sales_unified,stores)
logger.info('4. - Sales by volume ran successfully')
breakup_master = pd.concat([breakup_master, sale_by_volume], sort=True)
unified_master = pd.concat([unified_master, sale_by_volume_unified], sort=True)
# =============================================================================
# 5. Gross revenue - Chronic and Acute
# =============================================================================
# prerequisite = sales, customer_returns
gross_rev_chronic_sale_vol, gross_rev_acute_sale_vol = mis.gross_rev_chronic_acute(sales,customer_returns,stores)
gross_rev_chronic_sale_vol_unified, gross_rev_acute_sale_vol_unified = mis.gross_rev_chronic_acute(sales_unified,customer_returns_unified,stores)
logger.info('5. - Gross revenue - Chronic and Acute ran successfully')
breakup_master = pd.concat([breakup_master, gross_rev_chronic_sale_vol, gross_rev_acute_sale_vol], sort=True)
unified_master = pd.concat([unified_master, gross_rev_chronic_sale_vol_unified, gross_rev_acute_sale_vol_unified], sort=True)
# =============================================================================
# 6. Cummulative consumers
# =============================================================================
# prerequisite = cumulative_consumers_data
cummulative_cons = mis.cummulative_cons(cumulative_consumers_data,'breakup')
cummulative_cons_unified = mis.cummulative_cons(cumulative_consumers_data,'unified')
logger.info('6. - Cummulative consumers ran successfully')
breakup_master = pd.concat([breakup_master, cummulative_cons], sort=True)
unified_master = pd.concat([unified_master, cummulative_cons_unified], sort=True)
# =============================================================================
# 7. Total customers (in MIS month)
# =============================================================================
# prerequisite = sales
total_cons_mis_month = mis.total_cons_mis_month(sales,stores)
total_cons_mis_month_unified = mis.total_cons_mis_month(sales_unified,stores)
logger.info('7. - Total customers (in MIS month) ran successfully')
breakup_master = pd.concat([breakup_master, total_cons_mis_month], sort=True)
unified_master = pd.concat([unified_master, total_cons_mis_month_unified], sort=True)
# =============================================================================
# 8. Customer type Category Wise Count
# =============================================================================
# prerequisite = sales
category_wise_customer_type_count = mis.category_wise_customer_type_count(sales,stores)
category_wise_customer_type_count_unified = mis.category_wise_customer_type_count(sales_unified,stores)
logger.info('8. - Customer type Category Wise Count ran successfully')
breakup_master = pd.concat([breakup_master, category_wise_customer_type_count], sort=True)
unified_master = pd.concat([unified_master, category_wise_customer_type_count_unified], sort=True)
# =============================================================================
# 9. New customers
# =============================================================================
# prerequisite = sales, cons_initial_bill_date
new_customers = mis.new_customers(sales,all_cons_initial_bill_date,stores)
new_customers_unified = mis.new_customers(sales_unified,all_cons_initial_bill_date,stores)
logger.info('9. - New Customers ran successfully')
breakup_master = pd.concat([breakup_master, new_customers], sort=True)
unified_master = pd.concat([unified_master, new_customers_unified], sort=True)
# =============================================================================
# 10. Total repeat consumers
# =============================================================================
# prerequisite = sales, cons_initial_bill_date
tot_repeat_consumers = mis.tot_repeat_consumers(sales,all_cons_initial_bill_date,stores)
tot_repeat_consumers_unified = mis.tot_repeat_consumers(sales_unified,all_cons_initial_bill_date,stores)
logger.info('10. - total repeat customers ran successfully')
breakup_master = pd.concat([breakup_master, tot_repeat_consumers], sort=True)
unified_master = pd.concat([unified_master, tot_repeat_consumers_unified], sort=True)
# =============================================================================
# 11. New consumers - value and volume
# =============================================================================
# prerequisite = sales, cons_initial_bill_date
new_cons_vol_qty = mis.new_cons_vol_qty(sales,all_cons_initial_bill_date,stores)
new_cons_vol_qty_unified = mis.new_cons_vol_qty(sales_unified,all_cons_initial_bill_date,stores)
logger.info('11. - New consumers - value and volume ran successfully')
breakup_master = pd.concat([breakup_master, new_cons_vol_qty], sort=True)
unified_master = pd.concat([unified_master, new_cons_vol_qty_unified], sort=True)
# =============================================================================
# 12. Total bills - new and repeat
# =============================================================================
# prerequisite = sales, cons_initial_bill_date
total_bills_new_repeat = mis.total_bills_new_repeat(sales,all_cons_initial_bill_date,stores,choose_year,choose_month)
total_bills_new_repeat_unified = mis.total_bills_new_repeat(sales_unified,all_cons_initial_bill_date,stores,choose_year,choose_month)
logger.info('12. - Total bills - new and repeat ran successfully')
breakup_master = pd.concat([breakup_master, total_bills_new_repeat], sort=True)
unified_master = pd.concat([unified_master, total_bills_new_repeat_unified], sort=True)
# =============================================================================
# 13. Total bills - chronic and acute
# =============================================================================
# prerequisite = sales, customer_returns
total_bills_chronic_acute = mis.total_bills_chronic_acute(sales,customer_returns,stores)
total_bills_chronic_acute_unified = mis.total_bills_chronic_acute(sales_unified,customer_returns_unified,stores)
logger.info('13. - Total bills - chronic and acute ran successfully')
breakup_master = pd.concat([breakup_master, total_bills_chronic_acute], sort=True)
unified_master = pd.concat([unified_master, total_bills_chronic_acute_unified], sort=True)
# =============================================================================
# 14. Bills per consumer - new and repeat
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
bills_per_cons_new_repeat = mis.bills_per_cons_new_repeat(sales,all_cons_initial_bill_date,stores,choose_year,choose_month)
bills_per_cons_new_repeat_unified = mis.bills_per_cons_new_repeat(sales_unified,all_cons_initial_bill_date,stores,choose_year,choose_month)
logger.info('14. - Bills per consumer - new and repeat ran successfully')
breakup_master = pd.concat([breakup_master, bills_per_cons_new_repeat], sort=True)
unified_master = pd.concat([unified_master, bills_per_cons_new_repeat_unified], sort=True)
# =============================================================================
# 15. ABV - new, repeat and chronic
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
abv_new_repeat_chronic = mis.abv_new_repeat_chronic(sales,all_cons_initial_bill_date,stores,choose_year,choose_month)
abv_new_repeat_chronic_unified = mis.abv_new_repeat_chronic(sales_unified,all_cons_initial_bill_date,stores,choose_year,choose_month)
logger.info('15. - ABV - new, repeat and chronic - new and repeat ran successfully')
breakup_master = pd.concat([breakup_master, abv_new_repeat_chronic], sort=True)
unified_master = pd.concat([unified_master, abv_new_repeat_chronic_unified], sort=True)
# =============================================================================
# 16. Items per consumer
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
items_per_cons_new_repeat = mis.items_per_cons_new_repeat(sales,all_cons_initial_bill_date,stores,choose_year,choose_month)
items_per_cons_new_repeat_unified = mis.items_per_cons_new_repeat(sales_unified,all_cons_initial_bill_date,stores,choose_year,choose_month)
logger.info('16. - Items per consumer - new and repeat ran successfully')
breakup_master = pd.concat([breakup_master, items_per_cons_new_repeat], sort=True)
unified_master = pd.concat([unified_master, items_per_cons_new_repeat_unified], sort=True)
# =============================================================================
# 17. Total items sold- new and repeat
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
tot_items_sold_new_repeat = mis.tot_items_sold_new_repeat(sales,all_cons_initial_bill_date,stores,choose_year,choose_month)
tot_items_sold_new_repeat_unified = mis.tot_items_sold_new_repeat(sales_unified,all_cons_initial_bill_date,stores,choose_year,choose_month)
logger.info('17. - Total items sold - new and repeat ran successfully')
breakup_master = pd.concat([breakup_master, tot_items_sold_new_repeat], sort=True)
unified_master = pd.concat([unified_master, tot_items_sold_new_repeat_unified], sort=True)
# =============================================================================
# 18. Generic customers
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
generic_cons_overall_new = mis.generic_cons_overall_new(sales,all_cons_initial_bill_date,stores)
generic_cons_overall_new_unified = mis.generic_cons_overall_new(sales_unified,all_cons_initial_bill_date,stores)
logger.info('18. - Generic customers ran successfully')
breakup_master = pd.concat([breakup_master, generic_cons_overall_new], sort=True)
unified_master = pd.concat([unified_master, generic_cons_overall_new_unified], sort=True)
# =============================================================================
# 19. Power consumers - Count
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
power_cons_overall_new = mis.power_cons_overall_new(sales,all_cons_initial_bill_date,stores,power_consumer_value)
power_cons_overall_new_unified = mis.power_cons_overall_new(sales_unified,all_cons_initial_bill_date,stores,power_consumer_value)
logger.info('19. - Power consumers - Count ran successfully')
breakup_master = pd.concat([breakup_master, power_cons_overall_new], sort=True)
unified_master = pd.concat([unified_master, power_cons_overall_new_unified], sort=True)
# =============================================================================
# 20. Power consumers - Sales ran successfully
# =============================================================================
# prerequisite = sales
power_consumers_sale = mis.power_consumers_sale(sales,stores,power_consumer_value,'breakup')
power_consumers_sale_unified = mis.power_consumers_sale(sales_unified,stores,power_consumer_value,'unified')
logger.info('20. - Power consumers - Sales ran successfully')
breakup_master = pd.concat([breakup_master, power_consumers_sale], sort=True)
unified_master = pd.concat([unified_master, power_consumers_sale_unified], sort=True)
# =============================================================================
# 21. Power consumer - Bills
# =============================================================================
# prerequisite = sales
power_cons_bills = mis.power_cons_bills(sales,stores,power_consumer_value)
power_cons_bills_unified = mis.power_cons_bills(sales_unified,stores,power_consumer_value)
logger.info('21. - Power consumers - Bills ran successfully')
breakup_master = pd.concat([breakup_master, power_cons_bills], sort=True)
unified_master = pd.concat([unified_master, power_cons_bills_unified], sort=True)
# =============================================================================
# 22. Home delivery
# =============================================================================
# prerequisite = sales, customer_returns, home_delivery_data
home_delivery = mis.home_delivery(sales,customer_returns,home_delivery_data,stores,delivery_bill_ids,'breakup')
home_delivery_unified = mis.home_delivery(sales_unified,customer_returns_unified,home_delivery_data_unified,stores,delivery_bill_ids,'unified')
logger.info('22. - Home delivery ran successfully')
breakup_master = pd.concat([breakup_master, home_delivery], sort=True)
unified_master = pd.concat([unified_master, home_delivery_unified], sort=True)
# =============================================================================
# 23. Purchase from Workcell
# =============================================================================
# prerequisite = purchase_from_wc_data
purchase_from_worckell = mis.purchase_from_worckell(purchase_from_wc_data,stores,'breakup')
purchase_from_worckell_unified = mis.purchase_from_worckell(purchase_from_wc_data_unified,stores,'unified')
logger.info('23. - Purchase from Workcell ran successfully')
breakup_master = pd.concat([breakup_master, purchase_from_worckell], sort=True)
unified_master = pd.concat([unified_master, purchase_from_worckell_unified], sort=True)
# =============================================================================
# 23b. Launch_stock Purchase from Workcell
# =============================================================================
# prerequisite = purchase_from_wc_data
launch_stock_purchase_from_worckell = mis.purchase_from_worckell(purchase_from_wc_data, stores, 'breakup',launch_flag='launch_stock')
launch_stock_purchase_from_worckell_unified = mis.purchase_from_worckell(purchase_from_wc_data_unified, stores, 'unified', launch_flag='launch_stock')
logger.info('23b. - Launch_stock Purchase from Workcell ran successfully')
breakup_master = pd.concat([breakup_master, launch_stock_purchase_from_worckell], sort=True)
unified_master = pd.concat([unified_master, launch_stock_purchase_from_worckell_unified], sort=True)
# =============================================================================
# 23c. Purchase from Workcell (including tax)
# =============================================================================
# prerequisite = purchase_from_wc_data
purchase_from_worckell_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data, stores, 'breakup')
purchase_from_worckell_unified_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data_unified, stores, 'unified')
logger.info('23. - Purchase from Workcell ran successfully')
breakup_master = pd.concat([breakup_master, purchase_from_worckell_including_tax], sort=True)
unified_master = pd.concat([unified_master, purchase_from_worckell_unified_including_tax], sort=True)
# =============================================================================
# 23b. Launch_stock Purchase from Workcell (including tax)
# =============================================================================
# prerequisite = purchase_from_wc_data
launch_stock_purchase_from_worckell_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data, stores, 'breakup',
launch_flag='launch_stock')
launch_stock_purchase_from_worckell_unified_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data_unified, stores,
'unified', launch_flag='launch_stock')
logger.info('23b. - Launch_stock Purchase from Workcell ran successfully')
breakup_master = pd.concat([breakup_master, launch_stock_purchase_from_worckell_including_tax], sort=True)
unified_master = pd.concat([unified_master, launch_stock_purchase_from_worckell_unified_including_tax], sort=True)
# =============================================================================
# 24. COGS for Workcell
# =============================================================================
# prerequisite = purchase_from_wc_data
cogs_for_wc = mis.cogs_for_wc(purchase_from_wc_data,stores,'breakup')
cogs_for_wc_unified = mis.cogs_for_wc(purchase_from_wc_data_unified,stores,'unified')
logger.info('24. - COGS from Workcell ran successfully')
breakup_master = pd.concat([breakup_master, cogs_for_wc], sort=True)
unified_master = pd.concat([unified_master, cogs_for_wc_unified], sort=True)
# =============================================================================
# 25. Return from Zippin
# =============================================================================
# prerequisite = zippin_return_data
return_from_zippin = mis.return_from_zippin(zippin_return_data,'breakup')
return_from_zippin_unified = mis.return_from_zippin(zippin_return_data_unified,'unified')
logger.info('25. - Return from Zippin ran successfully')
breakup_master = pd.concat([breakup_master, return_from_zippin], sort=True)
unified_master = pd.concat([unified_master, return_from_zippin_unified], sort=True)
# =============================================================================
# 26. Return from Workcell
# =============================================================================
# prerequisite = workcell_return_data
return_from_workcell = mis.return_from_workcell(workcell_return_data,'breakup')
return_from_workcell_unified = mis.return_from_workcell(workcell_return_data_unified,'unified')
logger.info('26. - Return from Workcell ran successfully')
breakup_master = pd.concat([breakup_master, return_from_workcell], sort=True)
unified_master = pd.concat([unified_master, return_from_workcell_unified], sort=True)
# =============================================================================
# 27. Total SKUs in stock
# =============================================================================
# prerequisite = inventory
total_sku_instock = mis.total_sku_instock(inventory, 'breakup')
total_sku_instock_unified = mis.total_sku_instock(inventory_unified, 'unified')
logger.info('27. - Total SKUs in stock ran successfully')
breakup_master = pd.concat([breakup_master, total_sku_instock], sort=True)
unified_master = pd.concat([unified_master, total_sku_instock_unified], sort=True)
# =============================================================================
# 28. Chronic Acute quantity
# =============================================================================
# prerequisite = inventory
chronic_acute_qty = mis.chronic_acute_qty(inventory,stores)
chronic_acute_qty_unified = mis.chronic_acute_qty(inventory_unified ,stores)
logger.info('28. - Chronic Acute quantity ran successfully')
breakup_master = pd.concat([breakup_master, chronic_acute_qty], sort=True)
unified_master = pd.concat([unified_master, chronic_acute_qty_unified], sort=True)
# =============================================================================
# 29. Local purchase
# =============================================================================
# prerequisite = local_purchase_data, sales
lp_chronic_acute = mis.lp_chronic_acute(local_purchase_data,sales)
lp_chronic_acute_unified = mis.lp_chronic_acute(local_purchase_data_unified ,sales_unified)
logger.info('29. - Local purchase ran successfully')
breakup_master = pd.concat([breakup_master, lp_chronic_acute], sort=True)
unified_master = pd.concat([unified_master, lp_chronic_acute_unified], sort=True)
# =============================================================================
# 30. Repeat consumer sales
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date
repeat_consumer_chronic_acute = mis.repeat_consumer_chronic_acute(sales,all_cons_initial_bill_date,stores,choose_year,choose_month)
repeat_consumer_chronic_acute_unified = mis.repeat_consumer_chronic_acute(sales_unified,all_cons_initial_bill_date,stores,choose_year,choose_month)
logger.info('30. - Repeat consumer sales ran successfully')
breakup_master = pd.concat([breakup_master, repeat_consumer_chronic_acute], sort=True)
unified_master = pd.concat([unified_master, repeat_consumer_chronic_acute_unified], sort=True)
# =============================================================================
# 31. Inventory 6 to 12 months
# =============================================================================
# prerequisite = inventory
inventory_6to12months = mis.inventory_6to12months(inventory,stores,'breakup')
inventory_6to12months_unified = mis.inventory_6to12months(inventory_unified,stores,'unified')
logger.info('31. - Inventory 6 to 12 months ran successfully')
breakup_master = pd.concat([breakup_master, inventory_6to12months], sort=True)
unified_master = pd.concat([unified_master, inventory_6to12months_unified], sort=True)
# =============================================================================
# 32. Zippin P &L COGS
# =============================================================================
# prerequisite = sales, customer_returns
zippin_pl_cogs = mis.zippin_pl_cogs(sales,customer_returns,stores)
zippin_pl_cogs_unified = mis.zippin_pl_cogs(sales_unified,customer_returns_unified,stores)
logger.info('32. - Zippin P &L COGS ran successfully')
breakup_master = pd.concat([breakup_master, zippin_pl_cogs], sort=True)
unified_master = pd.concat([unified_master, zippin_pl_cogs_unified], sort=True)
# =============================================================================
# 33. Repeat consumer other definition
# =============================================================================
# # prerequisite = None
# from datetime import date, timedelta
# from dateutil.relativedelta import relativedelta
#
#
# last_day_of_prev_month = date.today().replace(day=1) - timedelta(days=1)
#
# six_months_first_date = (date.today() + relativedelta(months=-6)).replace(day=1)
#
# last_day_of_prev_month = datetime.datetime.combine(last_day_of_prev_month, datetime.time(23,59,59)).strftime('%Y-%m-%d %H:%M:%S')
#
# six_months_first_date = datetime.datetime.combine(six_months_first_date, datetime.time(0,0,0)).strftime('%Y-%m-%d %H:%M:%S')
#
# # last_day_of_prev_month = '2021-02-28 23:59:59'
# # six_months_first_date = '2020-09-01 00:00:00'
#
# sales_data_for_repeat_customer = mis.sales_data_for_repeat_customer(six_months_first_date,last_day_of_prev_month)
#
# logger.info('fetched data for sales of last 6 months, for repeat customer other definition')
#
# repeat_cons_other_def_curr_month = mis.repeat_cons_other_def_curr_month(sales_data_for_repeat_customer,stores,choose_month)
#
# three_month_last_date = ((date.today() +relativedelta(months=-3)).replace(day=1) -timedelta(days=1))
#
# five_months_first_date = (three_month_last_date + relativedelta(months=-5)).replace(day=1)
#
# three_month_last_date = datetime.datetime.combine(three_month_last_date, datetime.time(23,59,59)).strftime('%Y-%m-%d %H:%M:%S')
#
# five_months_first_date = datetime.datetime.combine(five_months_first_date, datetime.time(0,0,0)).strftime('%Y-%m-%d %H:%M:%S')
#
# # three_month_last_date = '2020-11-30 23:59:59'
# # five_months_first_date = '2020-06-01 00:00:00'
#
# sales_data_for_repeat_customer2 = mis.sales_data_for_repeat_customer(five_months_first_date,three_month_last_date)
# logger.info('fetched data for sales of last n-3 to n-8 months, for repeat customer other definition')
#
# repeat_cons_other_def_past3_month = mis.repeat_cons_other_def_past3_month(sales_data_for_repeat_customer2,stores,choose_month)
#
# repeat_cons_other_def = pd.concat([repeat_cons_other_def_curr_month,
# repeat_cons_other_def_past3_month])
#
# logger.info('33. - Repeat consumer other definition ran successfully')
#
# breakup_master = pd.concat([breakup_master, repeat_cons_other_def], sort=True)
# unified_master = pd.concat([unified_master, repeat_cons_other_def], sort=True)
# =============================================================================
# 34. Drug count by type
# =============================================================================
# prerequisite = inventory
composition_count = mis.comp_count(inventory,'breakup')
composition_count_unified = mis.comp_count(inventory_unified,'unified')
logger.info('34. - Drug count by type ran successfully')
breakup_master = pd.concat([breakup_master, composition_count], sort=True)
unified_master = pd.concat([unified_master, composition_count_unified], sort=True)
# =============================================================================
# 35. Generic composition type
# =============================================================================
# prerequisite = None
generic_composition_count = mis.generic_composition_count()
generic_composition_count_unified = generic_composition_count.copy(deep = True)
logger.info('35. - Generic composition type ran successfully')
breakup_master = pd.concat([breakup_master, generic_composition_count], sort=True)
unified_master = pd.concat([unified_master, generic_composition_count_unified], sort=True)
# =============================================================================
# 36. Ethical margin
# =============================================================================
# prerequisite = None
ethical_margin = mis.ethical_margin()
ethical_margin_unified = ethical_margin.copy(deep = True)
logger.info('36. - Ethical margin ran successfully')
breakup_master = pd.concat([breakup_master, ethical_margin], sort=True)
unified_master = pd.concat([unified_master, ethical_margin_unified], sort=True)
# =============================================================================
# 37. Chronic customers buying generics
# =============================================================================
# prerequisite = Sales
chronic_generic_count = mis.chronic_generic_count(sales)
chronic_generic_count_unified = mis.chronic_generic_count(sales_unified)
logger.info('37. - Chronic customers buying generics ran successfully')
breakup_master = pd.concat([breakup_master, chronic_generic_count], sort=True)
unified_master = pd.concat([unified_master, chronic_generic_count_unified], sort=True)
cols_to_move = ['tag_flag', 'type1', 'category', 'age_bracket_',
'payment', 'count', 'margin']
breakup_master = breakup_master[ cols_to_move + [col for col in breakup_master.columns
if col not in cols_to_move]]
del unified_master['order_source']
unified_master = unified_master[ cols_to_move + [col for col in unified_master.columns
if col not in cols_to_move]]
# breakup_master.to_csv(r'D:\MIS Automation\data validation unified\37_breakup.csv')
# unified_master.to_csv(r'D:\MIS Automation\data validation unified\37_unified.csv')
logger.info('Successfully ran MIS breakup & unified snippet, Now compiling other files')
# =============================================================================
# fetching other files that we sent with MIS
# =============================================================================
other_files_ethical_margin = mis.other_files_ethical_margin()
logger.info('fetched other_files_ethical_margin')
other_files_distributor_margin = mis.other_files_distributor_margin()
logger.info('fetched other_files_distributor_margin')
other_files_inventory_at_dc_near_expiry = mis.other_files_inventory_at_dc_near_expiry()
logger.info('fetched other_files_inventory_at_dc_near_expiry')
goodaid_gross_return = mis.goodaid_gross_return()
logger.info("fetched goodaid_gross_return")
goodaid_zippin_inventory = mis.goodaid_zippin_inventory()
logger.info("fetched goodaid_zippin_inventory")
goodaid_dc_inventory = mis.goodaid_dc_inventory()
logger.info("fetched goodaid_dc_inventory")
goodaid_wh_inventory = mis.goodaid_wh_inventory()
logger.info("fetched goodaid_wh_inventory")
store_info = mis.store_info()
logger.info('fetched store info')
logger.info('MIS - other files complete')
# =============================================================================
# fetching FOFO MIS
# =============================================================================
logger.info('FOFO MIS - start')
breakup_master_fofo = pd.DataFrame()
unified_master_fofo = pd.DataFrame()
# =============================================================================
# 1. GMV_gross_payment
# =============================================================================
# prerequisite = sales
breakup_gmv_gross_payment = mis.gmv_gross_payment(sales,stores_fofo,fofo_tag='yes')
unified_gmv_gross_payment = mis.gmv_gross_payment(sales_unified,stores_fofo,fofo_tag='yes')
logger.info('1. - GMV, Gross, Payment ran successfully')
# =============================================================================
# 2. netsale_tax_cogs
# =============================================================================
# prerequisite = sales, customer_returns
breakup_netsale_tax_cogs = mis.netsale_tax_cogs(sales, customer_returns, stores,fofo_tag='yes')
unified_netsale_tax_cogs = mis.netsale_tax_cogs(sales_unified, customer_returns_unified, stores,fofo_tag='yes')
logger.info('2. - Net sale, Taxes, COGS ran successfully')
breakup_master_fofo = pd.concat([breakup_gmv_gross_payment,breakup_netsale_tax_cogs],sort=True)
unified_master_fofo = pd.concat([unified_gmv_gross_payment,unified_netsale_tax_cogs], sort=True)
# =============================================================================
# 3. inventoryageing_nearexpiry
# =============================================================================
# prerequisite = inventory
near_expiry = mis.near_expiry(inventory,stores,'breakup',fofo_tag='yes')
near_expiry_unified = mis.near_expiry(inventory_unified,stores,'unified',fofo_tag='yes')
inventory_ageing = mis.inventory_ageing(inventory,stores,'breakup',fofo_tag='yes')
inventory_ageing_unified = mis.inventory_ageing(inventory_unified,stores,'unified',fofo_tag='yes')
logger.info('3. - Inventory ageing, Near expiry ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, inventory_ageing, near_expiry], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, inventory_ageing_unified, near_expiry_unified], sort=True)
# =============================================================================
# 6. Cummulative consumers
# =============================================================================
# prerequisite = cumulative_consumers_fofo_data, Main file block 6
cummulative_cons_fofo = mis.cummulative_cons_fofo(workcell_cumulative_consumers_fofo_data,others_cumulative_consumers_fofo_data,'breakup')
cummulative_cons_unified_fofo = mis.cummulative_cons_fofo(workcell_cumulative_consumers_fofo_data,others_cumulative_consumers_fofo_data,'unified')
cummulative_cons_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(cummulative_cons,cummulative_cons_fofo,['tag_flag'])
cummulative_cons_unified_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(cummulative_cons_unified,cummulative_cons_unified_fofo, ['tag_flag'])
# To Deal With - Now Acute consumers are defined as Total-chronic, If same consumer buys acute from workcell dist and chronic from other dist, we are getting some negative values
cummulative_cons_fofo = cummulative_cons_fofo.reset_index()
cummulative_cons_fofo['index']= cummulative_cons_fofo['index'] + 1
cummulative_cons_fofo_tags = cummulative_cons_fofo[['index','tag_flag','fofo_distributor']]
cummulative_cons_unified_fofo = cummulative_cons_unified_fofo.reset_index()
cummulative_cons_unified_fofo['index']= cummulative_cons_unified_fofo['index'] + 1
cummulative_cons_unified_fofo_tags = cummulative_cons_unified_fofo[['index','tag_flag','fofo_distributor']]
cols_to_check = [x for x in cummulative_cons_fofo.columns if x not in ['tag_flag','fofo_distributor']]
cummulative_cons_fofo = cummulative_cons_fofo[cummulative_cons_fofo[cols_to_check]>0].fillna(0)
cummulative_cons_fofo.replace(0, np.nan, inplace=True)
del cummulative_cons_fofo['index']
cummulative_cons_fofo = cummulative_cons_fofo.reset_index()
cummulative_cons_fofo['index'] = cummulative_cons_fofo['index']+1
cummulative_cons_fofo = cummulative_cons_fofo[cols_to_check].merge(cummulative_cons_fofo_tags,how = 'left', on = 'index')
del cummulative_cons_fofo['index']
cummulative_cons_unified_fofo = cummulative_cons_unified_fofo[cummulative_cons_unified_fofo[cols_to_check]>0].fillna(0)
cummulative_cons_fofo.replace(0, np.nan, inplace=True)
del cummulative_cons_unified_fofo['index']
cummulative_cons_unified_fofo = cummulative_cons_unified_fofo.reset_index()
cummulative_cons_unified_fofo['index'] = cummulative_cons_unified_fofo['index']+1
cummulative_cons_unified_fofo = cummulative_cons_unified_fofo[cols_to_check].merge(cummulative_cons_unified_fofo_tags,how = 'left', on = 'index')
del cummulative_cons_unified_fofo['index']
logger.info('6. - Cummulative consumers ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, cummulative_cons_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, cummulative_cons_unified_fofo], sort=True)
# =============================================================================
# 7. Total customers (in MIS month)
# =============================================================================
# prerequisite = sales
total_cons_mis_month_fofo = mis.total_cons_mis_month(sales,stores,fofo_tag='yes')
total_cons_mis_month_unified_fofo = mis.total_cons_mis_month(sales_unified,stores, fofo_tag='yes')
total_cons_mis_month_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(total_cons_mis_month,total_cons_mis_month_fofo,['tag_flag', 'order_source'])
total_cons_mis_month_unified_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(total_cons_mis_month_unified,total_cons_mis_month_unified_fofo,['tag_flag', 'order_source'])
logger.info('7. - Total customers (in MIS month) ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, total_cons_mis_month_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, total_cons_mis_month_unified_fofo], sort=True)
# =============================================================================
# 12. Total bills - new and repeat
# =============================================================================
# prerequisite = sales, cons_initial_bill_date, Main file block 12
total_bills_new_repeat_fofo = mis.total_bills_new_repeat(sales, all_cons_initial_bill_date, stores, choose_year,
choose_month,fofo_tag='yes')
total_bills_new_repeat_unified_fofo = mis.total_bills_new_repeat(sales_unified, all_cons_initial_bill_date, stores,
choose_year, choose_month,fofo_tag='yes')
total_bills_new_repeat_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(total_bills_new_repeat,total_bills_new_repeat_fofo,['tag_flag', 'order_source'])
total_bills_new_repeat_unified_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(total_bills_new_repeat_unified,total_bills_new_repeat_unified_fofo,['tag_flag', 'order_source'])
logger.info('12. - Total bills - new and repeat ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, total_bills_new_repeat_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, total_bills_new_repeat_unified_fofo], sort=True)
# =============================================================================
# 13. Total bills - chronic and acute
# =============================================================================
# prerequisite = sales, customer_returns, Main file block 13
total_bills_chronic_acute_fofo = mis.total_bills_chronic_acute(sales,customer_returns,stores,fofo_tag='yes')
total_bills_chronic_acute_unified_fofo = mis.total_bills_chronic_acute(sales_unified,customer_returns_unified,stores,fofo_tag='yes')
total_bills_chronic_acute_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(total_bills_chronic_acute,total_bills_chronic_acute_fofo,['tag_flag', 'order_source','type1','category'])
total_bills_chronic_acute_unified_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(total_bills_chronic_acute_unified,total_bills_chronic_acute_unified_fofo,['tag_flag', 'order_source','type1','category'])
logger.info('13. - Total bills - chronic and acute ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, total_bills_chronic_acute_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, total_bills_chronic_acute_unified_fofo], sort=True)
# =============================================================================
# 18. Generic customers
# =============================================================================
# prerequisite = sales, all_cons_initial_bill_date, Main file block 18
generic_cons_overall_new_fofo = mis.generic_cons_overall_new(sales, all_cons_initial_bill_date, stores, fofo_tag='yes')
generic_cons_overall_new_unified_fofo = mis.generic_cons_overall_new(sales_unified, all_cons_initial_bill_date, stores, fofo_tag='yes')
generic_cons_overall_new_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(generic_cons_overall_new, generic_cons_overall_new_fofo, ['tag_flag', 'order_source'])
generic_cons_overall_new_unified_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps( generic_cons_overall_new_unified, generic_cons_overall_new_unified_fofo,['tag_flag', 'order_source'])
logger.info('18. - Generic customers ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, generic_cons_overall_new_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, generic_cons_overall_new_unified_fofo], sort=True)
# =============================================================================
# 20. Power consumers - Sales ran successfully
# =============================================================================
# prerequisite = sales
power_consumers_sale = mis.power_consumers_sale(sales,stores,power_consumer_value,'breakup',fofo_tag='yes')
power_consumers_sale_unified = mis.power_consumers_sale(sales_unified,stores,power_consumer_value,'unified',fofo_tag='yes')
logger.info('20. - Power consumers - Sales ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, power_consumers_sale], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, power_consumers_sale_unified], sort=True)
# =============================================================================
# 21. Power consumer - Bills
# =============================================================================
# prerequisite = sales
power_cons_bills_fofo = mis.power_cons_bills(sales,stores,power_consumer_value,fofo_tag='yes')
power_cons_bills_unified_fofo = mis.power_cons_bills(sales_unified,stores,power_consumer_value,fofo_tag='yes')
power_cons_bills_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps(power_cons_bills, power_cons_bills_fofo, ['tag_flag', 'order_source'])
power_cons_bills_unified_fofo = mis.fofo_distributor_bifurcation_next_calculation_steps( power_cons_bills_unified, power_cons_bills_unified_fofo, ['tag_flag', 'order_source'])
logger.info('21. - Power consumers - Bills ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, power_cons_bills_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, power_cons_bills_unified_fofo], sort=True)
# =============================================================================
# 22. Home delivery
# =============================================================================
# prerequisite = sales, customer_returns, home_delivery_data
home_delivery_fofo = mis.home_delivery(sales, customer_returns, home_delivery_data, stores, delivery_bill_ids, 'breakup',fofo_tag='yes')
home_delivery_unified_fofo = mis.home_delivery(sales_unified, customer_returns_unified, home_delivery_data_unified,
stores, delivery_bill_ids, 'unified',fofo_tag='yes')
logger.info('22. - Home delivery ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, home_delivery_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, home_delivery_unified_fofo], sort=True)
# =============================================================================
# 23. Purchase from Workcell
# =============================================================================
# prerequisite = purchase_from_wc_data
purchase_from_worckell = mis.purchase_from_worckell(purchase_from_wc_data, stores, 'breakup',fofo_tag='yes')
purchase_from_worckell_unified = mis.purchase_from_worckell(purchase_from_wc_data_unified, stores, 'unified',fofo_tag='yes')
logger.info('23. - Purchase from Workcell ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, purchase_from_worckell], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, purchase_from_worckell_unified], sort=True)
# =============================================================================
# 23b. Launch Stock Purchase from Workcell
# =============================================================================
# prerequisite = purchase_from_wc_data
launch_stock_purchase_from_worckell = mis.purchase_from_worckell(purchase_from_wc_data, stores, 'breakup',fofo_tag='yes',launch_flag='launch_stock')
launch_stock_purchase_from_worckell_unified = mis.purchase_from_worckell(purchase_from_wc_data_unified, stores, 'unified',fofo_tag='yes',launch_flag='launch_stock')
logger.info('23b. - launch_stock Purchase from Workcell ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, launch_stock_purchase_from_worckell], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, launch_stock_purchase_from_worckell_unified], sort=True)
# =============================================================================
# 23c. Purchase from Workcell (Including Workcell)
# =============================================================================
# prerequisite = purchase_from_wc_data
purchase_from_worckell_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data, stores, 'breakup',fofo_tag='yes')
purchase_from_worckell_unified_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data_unified, stores, 'unified',fofo_tag='yes')
logger.info('23c. - Purchase from Workcell (Including Workcell) ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, purchase_from_worckell_including_tax], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, purchase_from_worckell_unified_including_tax], sort=True)
# =============================================================================
# 23d. Launch Stock Purchase from Workcell (Including Tax)
# =============================================================================
# prerequisite = purchase_from_wc_data
if len(launch_stock_purchase_from_worckell)>= 1:
launch_stock_purchase_from_worckell_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data, stores, 'breakup',fofo_tag='yes',launch_flag='launch_stock')
launch_stock_purchase_from_worckell_unified_including_tax = mis.purchase_from_worckell_including_tax(purchase_from_wc_data_unified, stores, 'unified',fofo_tag='yes',launch_flag='launch_stock')
logger.info('23d. - launch_stock Purchase from Workcell(Including Tax) ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo,launch_stock_purchase_from_worckell_including_tax], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, launch_stock_purchase_from_worckell_unified_including_tax], sort=True)
# =============================================================================
# 24. COGS for Workcell
# =============================================================================
# prerequisite = purchase_from_wc_data
cogs_for_wc = mis.cogs_for_wc(purchase_from_wc_data, stores, 'breakup',fofo_tag='yes')
cogs_for_wc_unified = mis.cogs_for_wc(purchase_from_wc_data_unified, stores, 'unified',fofo_tag='yes')
logger.info('24. - COGS from Workcell ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, cogs_for_wc], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, cogs_for_wc_unified], sort=True)
# =============================================================================
# 25. Return from Zippin
# =============================================================================
# prerequisite = zippin_return_data
return_from_zippin = mis.return_from_zippin(zippin_return_data, 'breakup',fofo_tag='yes')
return_from_zippin_unified = mis.return_from_zippin(zippin_return_data_unified, 'unified',fofo_tag='yes')
logger.info('25. - Return from Zippin ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, return_from_zippin], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, return_from_zippin_unified], sort=True)
# =============================================================================
# 26. Return from Workcell
# =============================================================================
# prerequisite = workcell_return_data
return_from_workcell = mis.return_from_workcell(workcell_return_data, 'breakup',fofo_tag='yes')
return_from_workcell_unified = mis.return_from_workcell(workcell_return_data_unified, 'unified',fofo_tag='yes')
logger.info('26. - Return from Workcell ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, return_from_workcell], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, return_from_workcell_unified], sort=True)
# =============================================================================
# 27. Total SKUs in stock
# =============================================================================
# prerequisite = inventory
total_sku_instock_fofo = mis.total_sku_instock(inventory, 'breakup',fofo_tag='yes')
total_sku_instock_unified_fofo = mis.total_sku_instock(inventory_unified, 'unified', fofo_tag='yes')
logger.info('27. - Total SKUs in stock ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, total_sku_instock_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, total_sku_instock_unified_fofo], sort=True)
# =============================================================================
# 29. Local purchase
# =============================================================================
# prerequisite = local_purchase_data, sales
lp_chronic_acute_fofo = mis.lp_chronic_acute(local_purchase_data, sales,fofo_tag='yes')
lp_chronic_acute_unified_fofo = mis.lp_chronic_acute(local_purchase_data_unified, sales_unified,fofo_tag='yes')
logger.info('29. - Local purchase ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, lp_chronic_acute_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, lp_chronic_acute_unified_fofo], sort=True)
# =============================================================================
# 32. Zippin P &L COGS
# =============================================================================
# prerequisite = sales, customer_returns
zippin_pl_cogs = mis.zippin_pl_cogs(sales, customer_returns, stores,fofo_tag='yes')
zippin_pl_cogs_unified = mis.zippin_pl_cogs(sales_unified, customer_returns_unified, stores,fofo_tag='yes')
logger.info('32. - Zippin P &L COGS ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, zippin_pl_cogs], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, zippin_pl_cogs_unified], sort=True)
# =============================================================================
# 35. Generic composition type
# =============================================================================
# prerequisite = None
generic_composition_count_fofo = mis.generic_composition_count()
generic_composition_count_unified_fofo = generic_composition_count_fofo.copy(deep=True)
logger.info('35. - Generic composition type ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, generic_composition_count_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, generic_composition_count_unified_fofo], sort=True)
# =============================================================================
# 36. Ethical margin
# =============================================================================
# prerequisite = None
ethical_margin_fofo = mis.ethical_margin_fofo()
ethical_margin_unified_fofo = ethical_margin_fofo.copy(deep=True)
logger.info('36. - Ethical margin ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, ethical_margin_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, ethical_margin_unified_fofo], sort=True)
# =============================================================================
# 37. Chronic customers buying generics
# =============================================================================
# prerequisite = Sales
chronic_generic_count_fofo = mis.chronic_generic_count(sales,fofo_tag='yes')
chronic_generic_count_unified_fofo = mis.chronic_generic_count(sales_unified,fofo_tag='yes')
logger.info('37. - Chronic customers buying generics ran successfully')
breakup_master_fofo = pd.concat([breakup_master_fofo, chronic_generic_count_fofo], sort=True)
unified_master_fofo = pd.concat([unified_master_fofo, chronic_generic_count_unified_fofo], sort=True)
# =============================================================================
# Uniform Format Even if data is not present
# =============================================================================
breakup_master_fofo1 = breakup_master_fofo
unified_master_fofo1 = unified_master_fofo
breakup_master_fofo = breakup_master_fofo1
unified_master_fofo = unified_master_fofo1
fofo_breakup_format = pd.read_csv(s3.download_file_from_s3(file_name="mis_format/fofo_breakup_format.csv"))
fofo_unified_format = pd.read_csv(s3.download_file_from_s3(file_name="mis_format/fofo_unified_format.csv"))
breakup_master_fofo = fofo_breakup_format.merge(breakup_master_fofo,on = ['tag_flag','type1','category','age_bracket_','payment','order_source','fofo_distributor'],how = 'outer')
unified_master_fofo= fofo_unified_format.merge(unified_master_fofo,on = ['tag_flag','type1','category','age_bracket_','payment','order_source','fofo_distributor'],how = 'outer')
cols_to_move = ['tag_flag', 'type1', 'category', 'age_bracket_',
'payment', 'count', 'margin','fofo_distributor','order_source']
breakup_master_fofo = breakup_master_fofo[ cols_to_move + [col for col in breakup_master_fofo.columns
if col not in cols_to_move]]
cols_to_move = ['tag_flag', 'type1', 'category', 'age_bracket_',
'payment', 'count', 'margin','order_source']
unified_master_fofo = unified_master_fofo[ cols_to_move + [col for col in unified_master_fofo.columns
if col not in cols_to_move]]
# breakup_master_fofo.to_csv('breakup_master_fofo.csv')
# unified_master_fofo.to_csv('unified_master_fofo.csv')
logger.info('FOFO MIS fetch complete')
status = True
logger.info('MIS - all data fetch complete')
except Exception as error:
logger.info('MIS - MIS breakup & unified snippet run failed, with error - {}'.format(error))
status = False
finally:
if status:
status_final = 'Success'
else:
status_final = 'Failed'
email = Email()
if status:
breakup_uri = s3.save_df_to_s3(df=breakup_master, file_name='mis-main/final_breakup.csv')
unified_uri = s3.save_df_to_s3(df=unified_master, file_name='mis-main/final_unified.csv')
other_files_ethical_margin_uri = s3.save_df_to_s3(df=other_files_ethical_margin, file_name='mis-main/ethical_margin.csv')
other_files_inventory_at_dc_near_expiry_uri = s3.save_df_to_s3(df=other_files_inventory_at_dc_near_expiry, file_name='mis-main/inventory_at_dc_near_expiry.csv')
other_files_distributor_margin_uri = s3.save_df_to_s3(df=other_files_distributor_margin, file_name='mis-main/distributor_margin.csv')
goodaid_gross_return_uri = s3.save_df_to_s3(df=goodaid_gross_return, file_name='mis-main/goodaid_gross_return.csv')
goodaid_wh_inventory_uri = s3.save_df_to_s3(df=goodaid_wh_inventory, file_name='mis-main/goodaid_wh_inventory.csv')
goodaid_zippin_inventory_uri = s3.save_df_to_s3(df=goodaid_zippin_inventory, file_name='mis-main/goodaid_zippin_inventory.csv')
goodaid_dc_inventory_uri = s3.save_df_to_s3(df=goodaid_dc_inventory, file_name='mis-main/goodaid_dc_inventory.csv')
store_info_uri = s3.save_df_to_s3(df=store_info, file_name='mis-main/store_info.csv')
breakup_fofo_uri = s3.save_df_to_s3(df=breakup_master_fofo, file_name='mis-main/fofo_breakup.csv')
unified_fofo_uri = s3.save_df_to_s3(df=unified_master_fofo, file_name='mis-main/fofo_unified.csv')
email.send_email_file(subject='{} - MIS- {}/{}'.format(env,choose_month,choose_year),
mail_body= f"Dear MIS Team,\n"
f"\n"
f"Please find attached MIS for year - {choose_year}, month - {choose_month}\n"
f"Please review it at earliest\n"
f"\n"
f"Regards,\n"
f"Data Team\n",
to_emails=mis_email_to, file_uris=[breakup_uri,unified_uri,other_files_ethical_margin_uri,other_files_inventory_at_dc_near_expiry_uri,other_files_distributor_margin_uri,goodaid_gross_return_uri,goodaid_wh_inventory_uri,goodaid_zippin_inventory_uri, goodaid_dc_inventory_uri,store_info_uri,breakup_fofo_uri,unified_fofo_uri])
else:
email.send_email_file(subject='{}-{}-MIS-{}-{}'.format(env,status_final,choose_year,choose_month),
mail_body=f"MIS job failed, Please review it\n",
to_emails=email_to, file_uris=[])
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/mis/mis_main.py | mis_main.py |
import os
import sys
import argparse
import pandas as pd
import datetime
import numpy as np
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
def short_book_data(rs_db=None, logger=None, cur_date=None, last_date=None, stores=None):
# AR/MS/PR data load
short_book_query = '''
select
case
when "auto-short" = 1
and "home-delivery" = 0
and "patient-id" = 4480 then 'Auto Short'
when "auto-short" = 1
and "home-delivery" = 0
and "patient-id" != 4480 then 'Manual Short'
when "auto-short" = 0
and "auto-generated" = 0
and "home-delivery" = 0 then 'Patient Request'
when "auto-short" = 0
and "auto-generated" = 0
and "home-delivery" = 1 then 'Patient Request with HD'
else 'Invalid'
end as request_type,
a.id,
a."store-id",
s.name as store_name,
a."ordered-distributor-id",
f.name as distributor_name,
a."drug-id",
a."drug-name",
b."type" as drug_type,
b."category",
c."drug-grade",
"inventory-at-creation",
"inventory-at-ordering",
"quantity",
"status",
a."created-at" as "sb-created-at",
a."invoiced-at",
a."dispatched-at",
a."received-at",
a."completed-at",
a."delivered-at",
date(a."created-at") as "created-date",
a."decline-reason"
from
"prod2-generico"."short-book-1" a
left join "prod2-generico".drugs b on
a."drug-id" = b.id
left join "prod2-generico"."drug-order-info" c on
a."drug-id" = c."drug-id"
and a."store-id" = c."store-id"
left join "prod2-generico"."stores" s on
a."store-id" = s.id
left join "prod2-generico"."distributors" f on
f."id" = a."ordered-distributor-id"
where
(("auto-short" = 1
and "home-delivery" = 0)
or ("auto-short" = 0
and "auto-generated" = 0))
and quantity > 0
and date(a."created-at") >= '{last_date}'
and date(a."created-at") <= '{cur_date}'
and a."store-id" in {stores}
'''.format(last_date=str(last_date), cur_date=str(cur_date), stores=stores)
short_book = rs_db.get_df(short_book_query)
short_book[['invoiced-at', 'dispatched-at', 'received-at', 'completed-at', 'delivered-at']] = \
short_book[['invoiced-at', 'dispatched-at', 'received-at', 'completed-at', 'delivered-at']]\
.astype(str).replace('0101-01-01 00:00:00','0000-00-00 00:00:00')
short_book.columns = list(map(
lambda st: str.replace(st, '-', '_'), list(short_book.columns.values)))
short_book['week_day'] = pd.to_datetime(short_book['sb_created_at']).dt.weekday
short_book['ops_ff_date'] = str(cur_date)
short_book['store_ff_date'] = str(cur_date)
logger.info('# of rows ' + str(short_book.shape[0]))
return short_book
def as_ms_fulfilment_calc(short_book=None, logger=None):
'''Auto Short Calculations'''
# filter auto short for fulfilment calculation
as_ms = short_book[short_book['request_type'].isin(['Auto Short', 'Manual Short'])]
# ops fulfilment calculation
as_ms['ops_ff_days'] = -1
# ops store days calculation
as_ms['ops_ff_days'] = np.where(
as_ms['invoiced_at'] == '0000-00-00 00:00:00',
-1,
(pd.to_datetime(
as_ms.loc[as_ms['invoiced_at'] != '0000-00-00 00:00:00', 'invoiced_at'],
format='%Y-%m-%d', errors='ignore').dt.date -
pd.to_datetime(as_ms['sb_created_at']).dt.date) / np.timedelta64(1, 'D')
)
as_ms.loc[(as_ms['invoiced_at'] != '0000-00-00 00:00:00'), 'ops_ff_date'] = pd.to_datetime(
as_ms.loc[(as_ms['invoiced_at'] != '0000-00-00 00:00:00'), 'invoiced_at'],
format='%Y-%m-%d', errors='ignore').dt.date
# correcting for order days when AS created on friday(4) and saturday (5)
as_ms.loc[(as_ms['week_day'] == 5) &
(as_ms['ops_ff_days'] > 0), 'ops_ff_days'] -= 1
as_ms.loc[(as_ms['week_day'] == 4) &
(as_ms['drug_type'].isin(['generic']) &
(as_ms['ops_ff_days'] > 0)), 'ops_ff_days'] -= 1
# setting ops status
as_ms['ops_status'] = ''
as_ms.loc[as_ms['status'].isin(['declined']), 'ops_status'] = 'Declined'
as_ms.loc[as_ms['status'].isin(['lost', 'failed', 're-ordered']), 'ops_status'] = 'Lost'
as_ms.loc[as_ms['ops_status'].isin(['Declined']), 'ops_ff_date'] = as_ms.loc[
as_ms['ops_status'].isin(['Declined']), 'created_date']
as_ms.loc[as_ms['ops_status'].isin(['Lost']), 'ops_ff_date'] = as_ms.loc[
as_ms['ops_status'].isin(['Lost']), 'created_date']
as_ms.loc[as_ms['status'].isin(['saved', 'ordered']), 'ops_status'] = 'Pending'
as_ms.loc[(as_ms['status'].isin(['invoiced', 'dispatched', 'received'])) &
(as_ms['ops_ff_days'] <= 1), 'ops_status'] = '24 hours'
as_ms.loc[(as_ms['status'].isin(['invoiced', 'dispatched', 'received'])) &
(as_ms['ops_ff_days'] == 2), 'ops_status'] = '48 hours'
as_ms.loc[(as_ms['status'].isin(['invoiced', 'dispatched', 'received'])) &
(as_ms['ops_ff_days'] == 3), 'ops_status'] = '72 hours'
as_ms.loc[(as_ms['status'].isin(['invoiced', 'dispatched', 'received'])) &
(as_ms['ops_ff_days'] > 3), 'ops_status'] = 'Delayed'
as_ms.loc[as_ms['ops_status'] == '', 'ops_status'] = 'None'
logger.info(
'# of entries in Short book with no ops status' +
str(as_ms[as_ms['ops_status'] == 'None'].shape[0]))
# store fulfilment calculation
as_ms['store_ff_days'] = -1
# store store days calculation
as_ms['store_ff_days'] = np.where(
as_ms['received_at'] == '0000-00-00 00:00:00',
-1,
(pd.to_datetime(
as_ms.loc[as_ms['received_at'] != '0000-00-00 00:00:00', 'received_at'],
format='%Y-%m-%d', errors='ignore').dt.date -
pd.to_datetime(as_ms['sb_created_at']).dt.date).fillna(pd.to_timedelta('NaT')) / np.timedelta64(1, 'D')
)
as_ms.loc[(as_ms['received_at'] != '0000-00-00 00:00:00'), 'store_ff_date'] = pd.to_datetime(
as_ms.loc[(as_ms['received_at'] != '0000-00-00 00:00:00'), 'received_at'],
format='%Y-%m-%d', errors='ignore').dt.date
# correcting for order days when AS created on friday(4) and saturday (5)
as_ms.loc[(as_ms['week_day'] == 5) &
(as_ms['store_ff_days'] > 0), 'store_ff_days'] -= 1
as_ms.loc[(as_ms['week_day'] == 4) &
(as_ms['drug_type'].isin(['generic'])) &
(as_ms['store_ff_days'] > 0), 'store_ff_days'] -= 1
# setting store status
as_ms['store_status'] = ''
as_ms.loc[as_ms['status'].isin(['declined']), 'store_status'] = 'Declined'
as_ms.loc[as_ms['status'].isin(['lost', 'failed', 're-ordered']), 'store_status'] = 'Lost'
as_ms.loc[as_ms['store_status'].isin(['Declined']), 'store_ff_date'] = as_ms.loc[
as_ms['store_status'].isin(['Declined']), 'created_date']
as_ms.loc[as_ms['store_status'].isin(['Lost']), 'store_ff_date'] = as_ms.loc[
as_ms['store_status'].isin(['Lost']), 'created_date']
as_ms.loc[as_ms['status'].isin(['saved', 'ordered', 'invoiced', 'dispatched']), 'store_status'] = 'Pending'
as_ms.loc[(as_ms['status'].isin(['received'])) &
(as_ms['store_ff_days'] <= 1), 'store_status'] = '24 hours'
as_ms.loc[(as_ms['status'].isin(['received'])) &
(as_ms['store_ff_days'] == 2), 'store_status'] = '48 hours'
as_ms.loc[(as_ms['status'].isin(['received'])) &
(as_ms['store_ff_days'] == 3), 'store_status'] = '72 hours'
as_ms.loc[(as_ms['status'].isin(['received'])) &
(as_ms['store_ff_days'] > 3), 'store_status'] = 'Delayed'
as_ms.loc[as_ms['store_status'] == '', 'store_status'] = 'None'
logger.info('# of entries in Short book with no ops status' +
str(as_ms[as_ms['store_status'] == 'None'].shape[0]))
return as_ms
def pr_fulfilment_calc(short_book=None, logger=None):
'''Patient Request Calculations'''
# filter auto short for fulfilment calculation
pr = short_book[~short_book['request_type'].isin(['Auto Short', 'Manual Short'])]
# ops fulfilment calculation
pr['ops_ff_days'] = -1
# ops store days calculation
pr.loc[(pr['invoiced_at'] != '0000-00-00 00:00:00'), 'ops_ff_days'] = (pd.to_datetime(
pr.loc[(pr['invoiced_at'] != '0000-00-00 00:00:00'), 'invoiced_at'],
format='%Y-%m-%d', errors='ignore').dt.date -
pd.to_datetime(
pr.loc[(pr[
'invoiced_at'] != '0000-00-00 00:00:00'), 'sb_created_at']
).dt.date) / np.timedelta64(1, 'D')
pr.loc[(pr['invoiced_at'] != '0000-00-00 00:00:00'), 'ops_ff_date'] = pd.to_datetime(
pr.loc[(pr['invoiced_at'] != '0000-00-00 00:00:00'), 'invoiced_at'],
format='%Y-%m-%d', errors='ignore').dt.date
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'ops_ff_days'] = (pd.to_datetime(
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'completed_at'],
format='%Y-%m-%d', errors='ignore').dt.date -
pd.to_datetime(
pr.loc[(pr[
'completed_at'] != '0000-00-00 00:00:00'), 'sb_created_at']
).dt.date) / np.timedelta64(1, 'D')
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'ops_ff_date'] = pd.to_datetime(
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'completed_at'],
format='%Y-%m-%d', errors='ignore').dt.date
# correcting for order days when AS created on friday(4) and saturday (5)
pr.loc[(pr['week_day'] == 5) &
(pr['ops_ff_days'] > 0), 'ops_ff_days'] -= 1
pr.loc[(pr['week_day'] == 4) &
(pr['drug_type'].isin(['generic']) &
(pr['ops_ff_days'] > 0)), 'ops_ff_days'] -= 1
# setting ops status
pr['ops_status'] = ''
pr.loc[pr['status'].isin(['declined']), 'ops_status'] = 'Declined'
pr.loc[pr['status'].isin(['lost', 'failed', 're-ordered']), 'ops_status'] = 'Lost'
pr.loc[pr['ops_status'].isin(['Declined']), 'ops_ff_date'] = pr.loc[
pr['ops_status'].isin(['Declined']), 'created_date']
pr.loc[pr['ops_status'].isin(['Lost']), 'ops_ff_date'] = pr.loc[pr['ops_status'].isin(['Lost']), 'created_date']
pr.loc[pr['status'].isin(['saved', 'ordered']), 'ops_status'] = 'Pending'
pr.loc[(pr['status'].isin(['invoiced', 'dispatched', 'received', 'completed'])) &
(pr['ops_ff_days'] <= 1), 'ops_status'] = '24 hours'
pr.loc[(pr['status'].isin(['invoiced', 'dispatched', 'received', 'completed'])) &
(pr['ops_ff_days'] == 2), 'ops_status'] = '48 hours'
pr.loc[(pr['status'].isin(['invoiced', 'dispatched', 'received', 'completed'])) &
(pr['ops_ff_days'] == 3), 'ops_status'] = '72 hours'
pr.loc[(pr['status'].isin(['invoiced', 'dispatched', 'received', 'completed'])) &
(pr['ops_ff_days'] > 3), 'ops_status'] = 'Delayed'
pr.loc[pr['ops_status'] == '', 'ops_status'] = 'None'
logger.info(
'# of entries in Short book with no ops status' +
str(pr[pr['ops_status'] == 'None'].shape[0]))
# store fulfilment calculation
pr['store_ff_days'] = -1
# store store days calculation
pr.loc[(pr['received_at'] != '0000-00-00 00:00:00'), 'store_ff_days'] = (pd.to_datetime(
pr.loc[(pr['received_at'] != '0000-00-00 00:00:00'), 'received_at'],
format='%Y-%m-%d', errors='ignore').dt.date -
pd.to_datetime(
pr.loc[(pr[
'received_at'] != '0000-00-00 00:00:00'), 'sb_created_at']
).dt.date) / np.timedelta64(1, 'D')
pr.loc[(pr['received_at'] != '0000-00-00 00:00:00'), 'store_ff_date'] = pd.to_datetime(
pr.loc[(pr['received_at'] != '0000-00-00 00:00:00'), 'received_at'],
format='%Y-%m-%d', errors='ignore').dt.date
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'store_ff_days'] = (pd.to_datetime(
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'completed_at'],
format='%Y-%m-%d', errors='ignore').dt.date -
pd.to_datetime(
pr.loc[(pr[
'completed_at'] != '0000-00-00 00:00:00'), 'sb_created_at']
).dt.date) / np.timedelta64(1, 'D')
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'store_ff_date'] = pd.to_datetime(
pr.loc[(pr['completed_at'] != '0000-00-00 00:00:00'), 'completed_at'],
format='%Y-%m-%d', errors='ignore').dt.date
# correcting for order days when AS created on friday(4) and saturday (5)
pr.loc[(pr['week_day'] == 5) &
(pr['store_ff_days'] > 0), 'store_ff_days'] -= 1
pr.loc[(pr['week_day'] == 4) &
(pr['drug_type'].isin(['generic'])) &
(pr['store_ff_days'] > 0), 'store_ff_days'] -= 1
# setting store status
pr['store_status'] = ''
pr.loc[pr['status'].isin(['declined']), 'store_status'] = 'Declined'
pr.loc[pr['status'].isin(['lost', 'failed', 're-ordered']), 'store_status'] = 'Lost'
pr.loc[pr['ops_status'].isin(['Declined']), 'store_ff_date'] = pr.loc[
pr['store_status'].isin(['Declined']), 'created_date']
pr.loc[pr['ops_status'].isin(['Lost']), 'store_ff_date'] = pr.loc[pr['store_status'].isin(['Lost']), 'created_date']
pr.loc[pr['status'].isin(['saved', 'ordered', 'invoiced', 'dispatched']) &
(pr['request_type'] == 'Patient Request'), 'store_status'] = 'Pending'
pr.loc[pr['status'].isin(['saved', 'ordered', 'invoiced', 'dispatched', 'received']) &
(pr['request_type'] == 'Patient Request with HD'), 'store_status'] = 'Pending'
pr.loc[(pr['status'].isin(['received', 'completed'])) &
(pr['store_ff_days'] <= 1) &
(pr['request_type'] == 'Patient Request'), 'store_status'] = '24 hours'
pr.loc[(pr['status'].isin(['received', 'completed'])) &
(pr['store_ff_days'] == 2) &
(pr['request_type'] == 'Patient Request'), 'store_status'] = '48 hours'
pr.loc[(pr['status'].isin(['received', 'completed'])) &
(pr['store_ff_days'] == 3) &
(pr['request_type'] == 'Patient Request'), 'store_status'] = '72 hours'
pr.loc[(pr['status'].isin(['received', 'completed'])) &
(pr['store_ff_days'] > 3) &
(pr['request_type'] == 'Patient Request'), 'store_status'] = 'Delayed'
pr.loc[(pr['status'].isin(['completed'])) &
(pr['store_ff_days'] <= 1) &
(pr['request_type'] == 'Patient Request with HD'), 'store_status'] = '24 hours'
pr.loc[(pr['status'].isin(['completed'])) &
(pr['store_ff_days'] == 2) &
(pr['request_type'] == 'Patient Request with HD'), 'store_status'] = '48 hours'
pr.loc[(pr['status'].isin(['completed'])) &
(pr['store_ff_days'] == 3) &
(pr['request_type'] == 'Patient Request with HD'), 'store_status'] = '72 hours'
pr.loc[(pr['status'].isin(['completed'])) &
(pr['store_ff_days'] > 3) &
(pr['request_type'] == 'Patient Request with HD'), 'store_status'] = 'Delayed'
pr.loc[pr['store_status'] == '', 'store_status'] = 'None'
logger.info('# of entries in Short book with no ops status' +
str(pr[pr['store_status'] == 'None'].shape[0]))
return pr
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="auto short fulfilment percentages.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
s3 = S3()
# pg_write = PostGreWrite()
# pg_write.open_connection()
rs_db = DB()
rs_db.open_connection()
logger.info("Scripts begins")
status = False
err_msg = ''
table_name = 'ops-fulfillment'
schema = 'prod2-generico'
# getting dates for refreshing the fulfilment percentages
cur_date = datetime.datetime.now().date()
last_date = cur_date - datetime.timedelta(days=8)
# running as fulfilment func
logger.info('Latest date to pull' + str(cur_date))
logger.info('Last date to pull' + str(last_date))
try:
# getting store list
store_list_query = '''
select
distinct "store-id"
from
"prod2-generico"."short-book-1"
where
(("auto-short" = 1
and "home-delivery" = 0)
or ("auto-short" = 0
and "auto-generated" = 0))
and quantity > 0
and date("created-at") >= '{last_date}'
and date("created-at") <= '{cur_date}'
'''.format(last_date=last_date, cur_date=cur_date)
stores = rs_db.get_df(store_list_query)
stores = str(stores['store-id'].to_list()).replace('[', '(').replace(']', ')')
fptr_query = '''
select
"drug-id",
avg("final-ptr") as fptr
from
"prod2-generico"."inventory-1" i2
group by
"drug-id"
'''
fptr = rs_db.get_df(fptr_query)
fptr.columns = list(map(
lambda st: str.replace(st, '-', '_'), list(fptr.columns)))
rs_db.execute(""" delete from "prod2-generico"."ops-fulfillment" """)
as_ms_fulfilment_all = pd.DataFrame()
pr_fulfilment_all = pd.DataFrame()
logger.info('getting data for store: ' + str(stores))
# getting short book info for last 8 days
short_book = short_book_data(
rs_db=rs_db, logger=logger, cur_date=cur_date,
last_date=last_date, stores=stores)
'''Auto short Manual short calculation'''
as_ms_fulfilment = as_ms_fulfilment_calc(
short_book=short_book, logger=logger)
as_ms_fulfilment = as_ms_fulfilment.merge(fptr, on='drug_id', how='left')
logger.info('AS/MS size ' + str(as_ms_fulfilment.shape[0]))
as_ms_fulfilment_all = as_ms_fulfilment_all.append(as_ms_fulfilment)
'''Patient request calculation'''
pr_fulfilment = pr_fulfilment_calc(
short_book=short_book, logger=logger)
pr_fulfilment = pr_fulfilment.merge(fptr, on='drug_id', how='left')
logger.info('PR size ' + str(pr_fulfilment.shape[0]))
pr_fulfilment_all = pr_fulfilment_all.append(pr_fulfilment)
fdf = pd.concat([as_ms_fulfilment_all, pr_fulfilment_all], ignore_index=True)
fdf['created-at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fdf['created-by'] = 'etl-automation'
fdf['updated-at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fdf['updated-by'] = 'etl-automation'
fdf['short-book-id'] = fdf['id']
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
fdf.columns = [c.replace('_', '-') for c in fdf.columns]
logger.info("writing to rs")
s3.write_df_to_db(df=fdf[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info("written to rs")
# logger.info("writing to postgres")
# pg_write.engine.execute(""" delete from ops_fulfillment """)
# fdf = fdf[['request-type', 'short-book-id', 'store-id', 'store-name',
# 'ordered-distributor-id', 'distributor-name', 'drug-id', 'drug-name',
# 'drug-type', 'category', 'drug-grade', 'inventory-at-creation',
# 'inventory-at-ordering', 'quantity', 'fptr', 'status', 'sb-created-at',
# 'invoiced-at', 'dispatched-at', 'received-at', 'completed-at',
# 'delivered-at', 'created-date', 'decline-reason', 'ops-ff-days',
# 'store-ff-days', 'week-day', 'ops-status', 'ops-ff-date',
# 'store-status', 'store-ff-date']]
# fdf.columns = [c.replace('-', '_') for c in fdf.columns]
# fdf.rename(columns={'sb_created_at': 'created_at'}, inplace=True)
# fdf['fptr'] = fdf['fptr'].astype(float)
# fdf['ops_ff_date'] = pd.to_datetime(fdf['ops_ff_date'])
# fdf['store_ff_date'] = pd.to_datetime(fdf['store_ff_date'])
# fdf.to_sql(
# name='ops_fulfillment', con=pg_write.engine, if_exists='append',
# chunksize=500, method='multi', index=False)
# logger.info("written to postgres")
status = True
except Exception as error:
status = False
logger.info("exception incurred")
err_msg = str(error)
logger.info(str(error))
finally:
rs_db.close_connection()
# pg_write.close_connection()
# Sending email
logger.info("sending email")
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"ops_fulfillment ({env}): {result}",
mail_body=f"Run time: {datetime.datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[])
else:
result = 'Failed'
email.send_email_file(subject=f"ops_fulfillment ({env}): {result}",
mail_body=f"{err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ops_fulfillment/ops_fulfillment.py | ops_fulfillment.py |
# In[70]:
#!/usr/bin/env python
# coding: utf-8
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
import argparse
import pandas as pd
from datetime import date
from datetime import datetime
import dateutil.relativedelta
import numpy as np
# In[71]:
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
# In[72]:
env = args.env
os.environ['env'] = env
# In[73]:
rs_db = DB()
s3=S3()
# In[74]:
rs_db.open_connection()
# In[75]:
q_aa='''
select
a.id as "inventory-id",
a."invoice-item-id" ,
b.id as "invoice-item-id-1",
a."batch-number" ,
a.expiry ,
b.vat ,
c."invoice-date" ,
d."type" ,
d."drug-name" ,
s."name" as "store-name",
a."created-at" as "inventory-created-at" ,
"barcode-reference" ,
d.hsncode ,
a."drug-id" ,
c."invoice-number",
AVG(a.quantity) as "quantity" ,
AVG(a."locked-for-check") as "locked-for-check" ,
AVG(a."transferred-out-quantity") as "transferred-out-quantity",
AVG(a."transferred-in-quantity") as "transferred-in-quantity",
AVG(a."locked-for-audit") as "locked-for-audit",
AVG(a."locked-for-return") as "locked-for-return" ,
AVG(a."locked-for-transfer") as "locked-for-transfer",
AVG(a.ptr )as "actual-fptr",
AVG(b."actual-quantity") as "actual-quantity" ,
AVG(b."net-value") as "net-value" ,
SUM(s2."net-quantity") as "net-quantity" ,
AVG(s2.rate) as "selling-rate",
AVG(s2."cgst-rate" +s2."sgst-rate" +s2."igst-rate") as "GST"
from
"prod2-generico"."inventory-1" a
left join "prod2-generico"."invoices-1" c on
a."franchisee-invoice-id" = c.id
left join "prod2-generico"."invoice-items-1" b on
a."invoice-item-id" = b.id
left join "prod2-generico".drugs d on
b."drug-id" = d.id
left join "prod2-generico".stores s on
a."store-id" = s.id
left join "prod2-generico".sales s2 on a.id =s2."inventory-id"
where
c."invoice-reference" is null
group by
a.id ,
a."invoice-item-id" ,
b.id ,
a."batch-number" ,
a.expiry ,
b.vat ,
c."invoice-date" ,
d."type" ,
d."drug-name" ,
s."name",
a."created-at" ,
"barcode-reference" ,
d.hsncode ,
a."drug-id" ,
c."invoice-number"
'''
# In[95]:
local_purchase_loss=rs_db.get_df(query=q_aa)
# In[96]:
# In[97]:
local_purchase_loss[['actual-fptr','vat','net-value','selling-rate','gst']]=local_purchase_loss[['actual-fptr','vat','net-value','selling-rate','gst']].apply(pd.to_numeric, errors='ignore').astype('float64')
# In[98]:
# In[99]:
# In[101]:
created_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
local_purchase_loss['created-at']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
local_purchase_loss['updated-at']=datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
local_purchase_loss['created-by'] = 'etl-automation'
local_purchase_loss['updated-by'] = 'etl-automation'
# In[102]:
# In[103]:
local_purchase_loss['barcode-reference']=local_purchase_loss['barcode-reference'].fillna(0)
local_purchase_loss['barcode-reference']=local_purchase_loss['barcode-reference'].astype(str)
local_purchase_loss['barcode-reference'].replace(['0', '0.0'], '', inplace=True)
# In[104]:
local_purchase_loss[['invoice-item-id','invoice-item-id-1','actual-quantity','net-quantity']]=local_purchase_loss[['invoice-item-id','invoice-item-id-1','actual-quantity','net-quantity']].apply(pd.to_numeric, errors='ignore').astype('Int64')
# In[105]:
local_purchase_loss['invoice-date'] = pd.to_datetime(local_purchase_loss['invoice-date'], infer_datetime_format=True,errors = 'coerce')
# In[ ]:
schema = "prod2-generico"
table_name = "purchase-loss-accounts"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# In[ ]:
# In[90]:
#Writing to table
s3.write_df_to_db(df=local_purchase_loss[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# In[ ]:
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/local_purchase_loss/local_pur_loss.py | local_pur_loss.py |
""
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-et', '--email_to', default="[email protected],[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
s3 = S3()
status = 'Failed'
try:
# Read from gsheet
gs = GoogleSheet()
spreadsheet_id = "1-WRZbBTVX1ANeKPmc2I1kyj2XkG5hu2L-Tau9Fb6lWw"
schema = 'test-generico'
if env == 'prod':
spreadsheet_id = "1tFHCTr3CHdb0UOFseK_ntjAUJSHQHcjLmysPPCWRM04"
schema = 'prod2-generico'
ast_data = gs.download(data={
"spreadsheet_id": spreadsheet_id,
"sheet_name": "Sheet1",
"listedFields": []
})
df = pd.DataFrame(ast_data)
df['store-id'] = 4
df['is-active'] = 1
df['created-by'] = 'data team'
df['updated-by'] = 'data team'
df['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
today= datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df.columns = [c.replace('_', '-') for c in df.columns]
df['drug-id'] = df['drug-id'].astype(str)
df = df[['store-id', 'drug-id', 'is-active', 'created-at', 'created-by', 'updated-at', 'updated-by']]
df.drop_duplicates(subset=["store-id", "drug-id"],
keep=False, inplace=True)
# existing data
ast_q = f"""
select id,
`drug-id`,
`store-id`,
`is-active`
from
`{schema}`.`store-drug-assortment`
"""
ast_table = pd.read_sql_query(ast_q, mysql_write.connection)
ast_table['drug-id'] = ast_table['drug-id'].astype(str)
# set active to zero for below records
mysql_write.engine.execute(f"UPDATE `{schema}`.`store-drug-assortment` SET `is-active` = 0, `updated-at`='{today}'")
left_data = df.merge(ast_table, on=["drug-id"], how='left')
# set active to one for common records
keep_left_data = left_data.loc[left_data['id'].notnull()]
# append below new records
append_left_data = left_data.loc[left_data['id'].isnull()]
tuple_id = tuple(keep_left_data['id'].unique().tolist())
if not tuple_id:
tuple_id = str('(0)')
# update
mysql_write.engine.execute(f"UPDATE `{schema}`.`store-drug-assortment` SET `is-active` = 1,`updated-at`='{today}' where `id` in {tuple_id}")
print(f"UPDATE `{schema}`.`store-drug-assortment` SET `is-active` = 1 and `updated-at`='{today}' where `id` in {tuple_id}")
print(tuple_id)
# append
append_left_data.drop(['id', 'store-id_y', 'is-active_y'], axis=1, inplace=True)
append_left_data = append_left_data.reset_index()
append_left_data.rename({'store-id_x': 'store-id', 'is-active_x': 'is-active', 'index': 'id'}, axis=1, inplace=True)
ast_table_max_id = ast_table['id'].max()
if np.isnan(ast_table_max_id):
ast_table_max_id = 0
append_left_data['id'] = append_left_data['id'] + ast_table_max_id + 1
append_left_data['drug-id'] = append_left_data['drug-id'].astype('int')
append_left_data['created-at'] = pd.to_datetime(append_left_data['created-at'])
append_left_data['updated-at'] = pd.to_datetime(append_left_data['updated-at'])
append_left_data.drop_duplicates(subset=["store-id", "drug-id"],
keep=False, inplace=True)
append_left_data.to_sql(
name='store-drug-assortment',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=1000)
status = 'Success'
except:
status = 'Failed'
email = Email()
email.send_email_file(subject=f"{env}-{status} : store-drug-assortment table update",
mail_body=f"table update status - {status}",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
mysql_write.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-drug-assortment-mw/store-drug-assortment-mw.py | store-drug-assortment-mw.py |
""
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
from datetime import timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'store-drug-assortment-count'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
try:
# count of drugs in assortment
df_q = """
select
sda."store-id","type",
count(distinct sda."drug-id")"drug-id-count",
count(distinct case when d."is-validated" ='Yes' then sda."drug-id" end) "verified-drug-count",
count(distinct case when (quantity)>0 then sda."drug-id" end) "available-count"
from
"prod2-generico"."store-drug-assortment" sda
left join "prod2-generico".drugs d
on sda."drug-id" = d.id
left join (select "drug-id",sum(quantity + "locked-quantity") quantity from "prod2-generico"."inventory-1" i
where i."store-id" =4 group by 1) i
on sda."drug-id" = i."drug-id" and quantity >0
where sda."is-active"=1 and sda."store-id" =4
group by
1,2
"""
df = rs_db.get_df(df_q)
# etl
df['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df['created-by'] = 'etl-automation'
df['updated-by'] = 'etl-automation'
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where date("created-at")=current_date'''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
except :
raise Exception("error")
finally:
rs_db.close_connection()
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-drug-assortment-mw/store-drug-assortment-count.py | store-drug-assortment-count.py |
import argparse
import sys
sys.path.append('../../../..')
import os
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "group-activation-system-level"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert into
"prod2-generico"."{table_name}" (
"created-by",
"created-at",
"updated-by",
"updated-at",
"group",
"system-first-inv-date",
"system-first-bill-date")
select
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
y."group",
max(y.system_first_inv_date) as "system-first-inv-date",
max(y.system_first_bill_date) as "system-first-bill-date"
from
(
select
d1."group", MIN(b."created-at") as "system_first_bill_date", MIN(c."created-at") as "system_first_inv_date"
from
"prod2-generico"."inventory-1" c
left join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
left join "prod2-generico"."bills-1" b on
b."id" = a."bill-id"
left join "prod2-generico"."drugs" d on
d."id" = c."drug-id"
left join "prod2-generico"."drug-unique-composition-mapping" d1 on
c."drug-id" = d1."drug-id"
where
d1."group" is not null
and d."company-id" = 6984
group by
d1."group" ) as y
group by
y."group"
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
##Vacuum Clean
# clean = f"""
# VACUUM full "prod2-generico"."composition-activation-system-level";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/group-activation-system-level/group-activation-system-level.py | group-activation-system-level.py |
import argparse
import sys
sys.path.append('../../../..')
import os
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "composition-activation-system-level"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert into
"prod2-generico"."{table_name}" (
"created-by",
"created-at",
"updated-by",
"updated-at",
"composition-master-id",
"system-first-inv-date",
"system-first-bill-date")
select
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
y."composition-master-id",
max(y.system_first_inv_date) as "system-first-inv-date",
max(y.system_first_bill_date) as "system-first-bill-date"
from
(
select
d."composition-master-id", MIN(b."created-at") as "system_first_bill_date", MIN(c."created-at") as "system_first_inv_date"
from
"prod2-generico"."inventory-1" c
left join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
left join "prod2-generico"."bills-1" b on
b."id" = a."bill-id"
left join "prod2-generico"."drugs" d on
d."id" = c."drug-id"
where
d."composition-master-id" is not null
and d."company-id" = 6984
group by
d."composition-master-id" ) as y
group by y."composition-master-id"
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
##Vacuum Clean
# clean = f"""
# VACUUM full "prod2-generico"."composition-activation-system-level";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/composition-activation-system-level/composition-activation-system-level.py | composition-activation-system-level.py |
import argparse
import os
import sys
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import MySQL, DB
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-mtp', '--main_table_prefix', default="NA", type=str, required=False)
parser.add_argument('-ttp', '--temp_table_prefix', default="pre", type=str, required=False)
parser.add_argument('-bs', '--batch_size', default=10, type=int, required=False)
parser.add_argument('-td', '--total_drugs', default=30, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
batch_size = args.batch_size
total_drugs = args.total_drugs
main_table_prefix = args.main_table_prefix
temp_table_prefix = f"-{args.temp_table_prefix}"
main_table_prefix = "" if main_table_prefix == "NA" else main_table_prefix
os.environ['env'] = env
logger = get_logger()
table_name = "drug-sold-summary"
""" Setting the schema name as per the env """
if env == "dev":
ms_source_schema = "test-generico"
rs_source_schema = "test-generico"
elif env == "stage":
ms_source_schema = "test-generico"
rs_source_schema = "test-generico"
elif env == "prod":
ms_source_schema = "prod2-generico"
rs_source_schema = "prod2-generico"
else:
raise Exception("Set the env first!")
ms_target_schema = ms_source_schema
temp_table_name = f"`{ms_target_schema}`.`{table_name}{temp_table_prefix}`"
main_table_name = f"`{ms_target_schema}`.`{table_name}{main_table_prefix}`"
logger.info(f"temp_table_name: {temp_table_name}")
logger.info(f"main_table_name: {main_table_name}")
def get_drug_summary_mysql(mysql_read_db, batch=1, batch_size=10000):
query = f"""
select
i.`drug-id` ,
COUNT(distinct b2.`patient-id`) as `sold-count`
from
`{ms_source_schema}`.`inventory-1` i
inner join `{ms_source_schema}`.`bill-items-1` bi on
i.id = bi.`inventory-id`
inner join `{ms_source_schema}`.`bills-1` b2 on
bi.`bill-id` = b2.id
group by
i.`drug-id`
order by
i.`drug-id`
LIMIT {batch_size} OFFSET {(batch - 1) * batch_size};
"""
logger.info(f"query to get the data: {query}")
df = pd.read_sql_query(con=mysql_read_db.connection, sql=query)
return df
def get_drug_summary_redshift(redshift_read_db, batch=1, batch_size=10000):
query = f"""
select
i."drug-id" ,
COUNT(distinct b2."patient-id") as "sold-count"
from
"{rs_source_schema}"."inventory-1" i
inner join "{rs_source_schema}"."bill-items-1" bi on
i.id = bi."inventory-id"
inner join "{rs_source_schema}"."bills-1" b2 on
bi."bill-id" = b2.id
group by
i."drug-id"
order by
i."drug-id"
"""
logger.info(f"query to get the data: {query}")
df = redshift_read_db.get_df(query=query)
return df
def get_drug_summary_doctor_redshift(redshift_read_db, batch=1, batch_size=10000):
query = f"""
select
i."drug-id",
COUNT(distinct b2."patient-id") as "sold-to-doctors-count"
from
"{rs_source_schema}"."inventory-1" i
inner join "{rs_source_schema}"."bill-items-1" bi on
i.id = bi."inventory-id"
inner join "{rs_source_schema}"."bills-1" b2 on
bi."bill-id" = b2.id
inner join "{rs_source_schema}".patients p on
b2."patient-id" = p.id
where
lower(p."name") like 'dr %'
group by
i."drug-id"
order by
i."drug-id"
"""
logger.info(f"query to get the data: {query}")
df = redshift_read_db.get_df(query=query)
return df
mysql_write_db = MySQL(read_only=False)
mysql_write_db.open_connection()
# Truncate the temp table before starting
query = f""" delete from {temp_table_name};"""
mysql_write_db.engine.execute(query)
logger.info(f"deleted from temp table, query: {query}")
#
# mysql_read_db = MySQL()
# mysql_read_db.open_connection()
redshift_read_db = DB()
redshift_read_db.open_connection()
# query = "SELECT count(id) as `drug-count` FROM `{ms_source_schema}`.drugs "
# c_df = pd.read_sql_query(con=mysql_read_db.connection, sql=query)
# drug_summary_df = pd.DataFrame()
# total_drugs = 125000
# for i in range(1, round(total_drugs / batch_size) + 1):
# temp_df = get_drug_summary_mysql(mysql_read_db=mysql_read_db, batch=i,
# batch_size=batch_size) # to consider NA moles drugs
# drug_summary_df = pd.concat([drug_summary_df, temp_df])
# logger.info(f"fetched batch: {i}")
# mysql_read_db.close()
drug_summary_df = get_drug_summary_redshift(redshift_read_db=redshift_read_db)
drug_doctor_summary_df = get_drug_summary_doctor_redshift(redshift_read_db=redshift_read_db)
drug_summary_df = drug_summary_df.merge(drug_doctor_summary_df, how="left", on="drug-id")
drug_summary_df.fillna(0, inplace=True)
drug_summary_df['sold-to-doctors-count'] = drug_summary_df['sold-to-doctors-count'].astype(int)
redshift_read_db.close_connection()
total_count = len(drug_summary_df)
logger.info(f"Total drug count: {total_count}")
# store the data in the temp table
drug_summary_df[['drug-id', 'sold-count', 'sold-to-doctors-count']].to_sql(
con=mysql_write_db.engine, name=f"{table_name}{temp_table_prefix}", schema=ms_target_schema,
if_exists="append", chunksize=500, index=False)
# Delete the drugs from main table which are NOT in the temp table.
query = f""" DELETE FROM t1 USING {main_table_name} t1 LEFT JOIN {temp_table_name} t2 ON
t1.`drug-id` = t2.`drug-id` where t2.`drug-id` is null ;"""
response = mysql_write_db.engine.execute(query)
logger.info(
f"Deleted the records from main table, which are absent in temp table: {response.rowcount}")
# Delete the data from temp table which is already present in main table
query = f""" DELETE FROM t1 USING {temp_table_name} t1 INNER JOIN {main_table_name} t2 ON
t1.`drug-id` = t2.`drug-id` where t1.`sold-count` = t2.`sold-count`
and t1.`sold-to-doctors-count` = t2.`sold-to-doctors-count`; """
response = mysql_write_db.engine.execute(query)
present_correct_count = response.rowcount
logger.info(f"Correct drug-ids count: {present_correct_count}")
# Delete the incorrect substitutes from main table
query = f""" DELETE FROM t1 USING {main_table_name} t1 INNER JOIN {temp_table_name} t2 ON
t1.`drug-id` = t2.`drug-id`;"""
response = mysql_write_db.engine.execute(query)
present_incorrect_count = response.rowcount
logger.info(f"Incorrect drug-ids count: {present_incorrect_count}")
# Now Insert the records in main table
query = f""" INSERT INTO {main_table_name} (`drug-id`, `sold-count`, `sold-to-doctors-count`)
SELECT `drug-id`, `sold-count`, `sold-to-doctors-count` FROM {temp_table_name} """
response = mysql_write_db.engine.execute(query)
new_insert_count = response.rowcount
logger.info(f"Insert/Update drug-ids count: {new_insert_count}")
mysql_write_db.close()
if total_count == present_correct_count + new_insert_count:
logger.info("Drug sold-count data updated successfully")
else:
raise Exception("Data count mismatch") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drug-sold-summary/drug_sold_summary.py | drug_sold_summary.py |
import os
import sys
import numpy as np
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
import argparse
import pandas as pd
from datetime import datetime
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-ns', '--new_store_id_list', default="", type=str, required=False)
parser.add_argument('-pr', '--proxy_store_list', default="", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
new_store_id_list = args.new_store_id_list
proxy_store_list = args.proxy_store_list
email_to = args.email_to
email = Email()
if new_store_id_list and type(new_store_id_list) == str:
new_store_id_list = int(new_store_id_list)
else:
email.send_email_file(subject="nso_launch_stock_failed",
mail_body="please insert the new_store_id",
to_emails=email_to)
mssg = "wrong input parameters passed as store id list is {} and type of list " \
"is {}".format(new_store_id_list, type(new_store_id_list))
raise Exception(mssg)
if proxy_store_list and type(proxy_store_list) == str:
proxy_store_list = tuple([int(x) for x in proxy_store_list.split(",")])
else:
email.send_email_file(subject="nso_launch_stock_failed",
mail_body="please insert at-least two proxy_stores",
to_emails=email_to)
mssg = "wrong input parameters passed as proxy store list is {} and type of list " \
"is {}".format(proxy_store_list, type(proxy_store_list))
raise Exception(mssg)
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# Current Date
run_date = str(datetime.now(tz=gettz('Asia/Kolkata')).now())
# Proxy stores sales history available in last 90 days
proxy = '''
select
sa."store-id", (max(date(sa."created-at"))-min(date(sa."created-at"))) as days
from
"prod2-generico".sales as sa
where
sa."store-id" In {store_id}
and date(sa."created-at")
between date(trunc(GETDATE()) -interval '91 days')
and date(trunc(GETDATE()) -interval '1 days')
group by sa."store-id" '''.format(store_id=proxy_store_list)
proxy=rs_db.get_df(query=proxy)
# Base Logic
# Sales data in last 90 days
q_sales_data = '''select
"drug-id","store-id" ,SUM("net-quantity") as "net-quantity"
from
"prod2-generico".sales sa
where
sa."store-id" In {store_id}
and date(sa."created-at") between date(trunc(GETDATE()) -interval '91 days')
and date(trunc(GETDATE()) -interval '1 days')
group by "drug-id","store-id" '''.format(store_id=proxy_store_list)
sales_data=rs_db.get_df(query=q_sales_data)
# PR Loss in last 90 days
pr_loss_query = '''
select
cpr."drug-id",
cpr."store-id" ,
sum(cpr."loss-quantity") as "loss-quantity"
from
"prod2-generico"."cfr-patient-request" cpr
where
cpr."store-id" in {store_id}
and date(cpr."shortbook-date") between date(current_date -interval '91 days') and date(current_date -interval '1 days')
and cpr."drug-id" <> -1
and cpr."loss-quantity" > 0
group by
cpr."drug-id",
cpr."store-id"
'''.format(store_id=proxy_store_list)
pr_loss=rs_db.get_df(query=pr_loss_query)
pr_loss.columns = [c.replace('_', '-') for c in pr_loss.columns]
merge_demand = pd.merge(sales_data, pr_loss, on=['store-id', 'drug-id'], how='outer')
merge_demand['net-quantity'].fillna(0, inplace=True)
merge_demand['loss-quantity'].fillna(0, inplace=True)
merge_demand['loss-quantity']=merge_demand['loss-quantity'].astype(np.float64)
merge_demand['total_demand'] = (merge_demand['net-quantity'] + merge_demand['loss-quantity'])
merge_demand = merge_demand.drop(["net-quantity", "loss-quantity"], axis=1)
merge_demand.columns = [c.replace('_', '-') for c in merge_demand.columns]
base_logic = pd.merge(merge_demand, proxy, how='left', on='store-id')
base_logic['avg_per_month'] = (base_logic['total-demand'] / base_logic['days']) * 30
base_logic = base_logic.groupby(['drug-id']).sum()
base_logic = base_logic.drop(['store-id', 'total-demand', 'days'], axis=1)
avg_net_quan = base_logic[['avg_per_month']] / (len(proxy_store_list))
avg_net_quan = avg_net_quan[avg_net_quan['avg_per_month'] > 0.8]
base_logic = pd.merge(base_logic, avg_net_quan, left_index=True, right_index=True)
# Other Category
# Top 80 % other category drugs by quantity sold in last 90 days across the network
q_other_cat = '''
select
"drug-id" ,
"drug-name" ,
"type" ,
composition ,
SUM("net-quantity") as "net-quantity"
from
"prod2-generico".sales sa
where
sa."type" not in ('ethical', 'generic')
and date(sa."created-at") between date(trunc(GETDATE()) -interval '91 days') and date(trunc(GETDATE()) -interval '1 days')
group by
"drug-id",
"drug-name",
"type",
composition
order by SUM("net-quantity") desc
'''
other_cat=rs_db.get_df(query=q_other_cat)
a = other_cat['net-quantity'].sum() * 0.8
other_cat = other_cat[other_cat['net-quantity'].cumsum() < a]
# Generic Adjustment
# Warehouse generic Portfolio
q_generic_wh_portfolio = '''
select
composition,
"drug-id" as "drug-id"
from
(
select
d.composition,
wssm."drug-id",
d."drug-name" ,
SUM(sh."net-quantity"),
rank() over (partition by d.composition
order by
SUM(sh."net-quantity") desc) as rn
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on wssm."drug-id" =d.id
left join "prod2-generico".sales sh on
wssm."drug-id" =sh."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" ='generic'
group by
d.composition ,
wssm."drug-id" ,
d."drug-name") a
where
a.rn = 1;
'''
# All the drugs in warehouse generic portfolio
q_all_generic_portfolio = '''
select
distinct d.composition,
wssm."drug-id" as "drug-id"
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on wssm."drug-id" =d.id
where
d."type" ='generic'
and wssm."add-wh" = 'Yes'
'''
generic_wh_portfolio =rs_db.get_df(query=q_generic_wh_portfolio)
generic_wh_all_portfolio=rs_db.get_df(query=q_all_generic_portfolio)
# common_drug_adjustment
# Drug sold in 80 % of stores
t_1 = '''
select COUNT(a."store-id") from
(select
sa."store-id", (max(date(sa."created-at"))-min(date(sa."created-at"))) as days
from
"prod2-generico".sales as sa
where
date(sa."created-at") between date(trunc(GETDATE()) -interval '91 days') and date(trunc(GETDATE()) -interval '1 days')
group by sa."store-id" ) a
where
a.days >= 90;
'''
sales_90_days=rs_db.get_df(query=t_1)
total_stores = int(sales_90_days.iloc[0])
# 80 % of stores
total_stores_perc = round(0.8 * total_stores)
q_common_drug = '''
select
*
from
(
select
sa."drug-id",
COUNT(distinct sa."store-id") as "store-count"
from
"prod2-generico".sales as sa
where
date(sa."created-at") between date(trunc(GETDATE()) -interval '91 days') and date(trunc(GETDATE()) -interval '1 days')
and
sa."store-id"
in (
select a."store-id" from
(select
sa."store-id", (max(date(sa."created-at"))-min(date(sa."created-at"))) as days
from
"prod2-generico".sales as sa
where
date(sa."created-at") between date(trunc(GETDATE()) -interval '91 days') and date(trunc(GETDATE()) -interval '1 days')
group by sa."store-id" ) a
where
a.days >= 90)
group by
sa."drug-id" ) b
'''
rs_db.execute(query=q_common_drug, params=None)
common_drug: pd.DataFrame = rs_db.cursor.fetch_dataframe()
common_drug = common_drug[common_drug['store-count'] > total_stores_perc]
q_goodaid = '''select
composition,
"drug-id" as "drug-id"
from
(
select
d.composition,
wssm."drug-id",
d."drug-name"
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on wssm."drug-id"=d.id
where
wssm."add-wh" = 'Yes'
and d."company-id" = 6984
group by
d.composition ,
wssm."drug-id",
d."drug-name") a'''
goodaid_wh_portfolio=rs_db.get_df(query=q_goodaid)
# CFR-1 part-1
# Drug Search repeated in last four month in proxy stores
q_cfr_1 = '''
select
"drug-id" as "drug-id"
from
(
select
"drug-id",
"drug-name-y" ,
Count(distinct month_year) as m_y
from
(
select
to_char(cs."search-date", 'YYYY-MM') as month_year ,
cs."drug-id" ,
cs."drug-name-y"
from
"prod2-generico"."cfr-searches-v2" cs
where
date(cs."search-date") between date(date_trunc('month', current_date -interval '4 month')) and date(date_trunc('month', current_date)- interval '1 day')
and cs."store-id" in {store_id}
and cs."final-lost-sales" >0
group by
month_year ,
cs."drug-id",
cs."drug-name-y") a
group by
a."drug-id",
a."drug-name-y"
having
Count(distinct month_year)= 4) a
'''.format(store_id=proxy_store_list)
cfr_search_1 = rs_db.execute(query=q_cfr_1, params=None)
cfr_search_1: pd.DataFrame = rs_db.cursor.fetch_dataframe()
#cfr_search_1=rs_db.get_df(query=q_cfr_1)
# CFR Logic-1 part-2
# Drug search repeated in 3-month (Rolling) (last four months CFR history considered)
q_cfr_2 = '''select
"drug-id" as "drug-id"
from
(
select
a."drug-id",
MONTHS_BETWEEN (date(date_trunc('month', max("search-date"))) , date(date_trunc('month', min("search-date")))) as diff,
date(date_trunc('month', max("search-date"))),date(date_trunc('month', min("search-date"))),
Count(distinct month_year) as m_y
from
(
select
cs."search-date",
to_char(cs."search-date", 'YYYY-MM') as month_year ,
cs."drug-id" ,
cs."drug-name-y"
from
"prod2-generico"."prod2-generico"."cfr-searches-v2" cs
where
date(cs."search-date") between date(date_trunc('month', current_date -interval '4 month')) and date(date_trunc('month', current_date)- interval '1 day')
and cs."store-id" in {store_id}
and cs."final-lost-sales" >0
group by
cs."search-date",
month_year ,
cs."drug-id",
cs."drug-name-y") a
group by
a."drug-id"
having
Count(distinct month_year)= 3) b
where
b.diff = 2
'''.format(store_id=proxy_store_list)
#cfr_search_2=rs_db.get_df(query=q_cfr_2)
cfr_search_2 = rs_db.execute(query=q_cfr_2, params=None)
cfr_search_2: pd.DataFrame = rs_db.cursor.fetch_dataframe()
# CFR Logic-2
# Drug search repeated in last two month throughout the proxy stores.
q_cfr_3 = '''select
"drug-id" as "drug-id"
from
(
select
"drug-id",
"drug-name-y" ,
Count(distinct month_year) as m_y,
Count(distinct "store-id") as stores
from
(
select
to_char(cs."search-date", 'YYYY-MM') as month_year,
cs."store-id" ,
cs."drug-id" ,
cs."drug-name-y"
from
"prod2-generico"."cfr-searches-v2" cs
where
date(cs."search-date") between date(date_trunc('month', current_date -interval '2 month')) and date(date_trunc('month', current_date)- interval '1 day')
and cs."store-id" in {store_id}
and cs."final-lost-sales" >0
group by
month_year ,
cs."store-id",
cs."drug-id",
cs."drug-name-y") a
group by
a."drug-id",
a."drug-name-y"
having
Count(distinct month_year)= 2
and Count(distinct "store-id")= {proxy_store}) b '''.format(store_id=proxy_store_list,proxy_store=len(proxy_store_list))
#cfr_search_3=rs_db.get_df(query=q_cfr_3)
cfr_search_3 = rs_db.execute(query=q_cfr_3, params=None)
cfr_search_3: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if cfr_search_1 is None:
cfr_search_1=pd.DataFrame()
cfr_search_1['drug-id']='drug-id'
cfr_search_1.to_csv(r"D:\NSO\cfr.csv")
else:
cfr_search_1=cfr_search_1
if cfr_search_2 is None:
cfr_search_2=pd.DataFrame()
cfr_search_2['drug-id']='drug-id'
else:
cfr_search_2=cfr_search_2
if cfr_search_3 is None:
cfr_search_3=pd.DataFrame()
cfr_search_3['drug-id']='drug-id'
else:
cfr_search_3=cfr_search_3
drug_info = '''
select
d.id as "drug-id" ,
d."drug-name" as "drug-name",
d."type" as "drug-type" ,
d."company-id" as "company-id",
d.company as company,
d.category as "drug-category",
d.composition as composition
from
"prod2-generico".drugs d
'''
drug_info=rs_db.get_df(drug_info)
# ptr-info
ptr_info = '''
select
i2."drug-id" ,avg(ptr) as "avg-ptr"
from
"prod2-generico"."inventory-1" i2
where
date(i2."created-at") between date(trunc(GETDATE()) -interval '366 days') and date(trunc(GETDATE()) -interval '1 days')
group by
i2."drug-id"
'''
avg_ptr=rs_db.get_df(query=ptr_info)
base_logic = base_logic.drop(["avg_per_month_x", "avg_per_month_y"], axis=1)
other_cat = other_cat.drop(["drug-name", "type", "composition", "net-quantity"], axis=1)
generic_wh_portfolio = generic_wh_portfolio.drop(['composition'], axis=1)
common_drug = common_drug.drop(['store-count'], axis=1)
goodaid_wh_portfolio = goodaid_wh_portfolio.drop(['composition'], axis=1)
# Merging with drug info
base_logic_new = pd.merge(base_logic, drug_info,
how='left', on=['drug-id'])
other_cat_new = pd.merge(other_cat, drug_info, how='left', on=['drug-id'])
generic_wh_portfolio_new = pd.merge(generic_wh_portfolio, drug_info, how='left', on=['drug-id'])
common_drug_new = pd.merge(common_drug, drug_info, how='left', on=['drug-id'])
goodaid_wh_portfolio_new = pd.merge(goodaid_wh_portfolio, drug_info, how='left', on=['drug-id'])
cfr_search_1_new = pd.merge(cfr_search_1, drug_info, how='left', on=['drug-id'])
cfr_search_2_new = pd.merge(cfr_search_2, drug_info, how='left', on=['drug-id'])
cfr_search_3_new = pd.merge(cfr_search_3, drug_info, how='left', on=['drug-id'])
rs_db.close_connection()
# Dropping the duplicated (if any)
base_logic_new = base_logic_new.drop_duplicates()
other_cat_new = other_cat_new.drop_duplicates()
generic_wh_portfolio_new = generic_wh_portfolio_new.drop_duplicates()
common_drug_new = common_drug_new.drop_duplicates()
goodaid_wh_portfolio_new = goodaid_wh_portfolio_new.drop_duplicates()
cfr_search_1_new = cfr_search_1_new.drop_duplicates()
cfr_search_2_new = cfr_search_2_new.drop_duplicates()
cfr_search_3_new = cfr_search_3_new.drop_duplicates()
base_logic_new['source'] = 'proxy_stores_base_logic'
other_cat_new['source'] = 'network_other_category'
generic_wh_portfolio_new['source'] = 'wh_generic'
common_drug_new['source'] = 'network_common_drugs'
goodaid_wh_portfolio_new['source'] = 'wh_goodaid'
cfr_search_1_new['source'] = 'CFR-1'
cfr_search_2_new['source'] = 'CFR-1'
cfr_search_3_new['source'] = 'CFR-2'
# merging base logic and other category Adjustment-1
merge_1 = pd.merge(base_logic_new, other_cat_new, how='outer',
left_on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'],
right_on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_1['source_x'] = merge_1['source_x'].fillna('network_other_category')
merge_1 = merge_1.drop(['source_y'], axis=1)
merge_1 = merge_1.rename(columns={"source_x": "source"})
# Dropping Duplicates
merge_1 = merge_1.drop_duplicates(subset=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
# Generic Adjustment
generic = merge_1[merge_1['drug-type'] == 'generic']
non_generic = merge_1[merge_1['drug-type'] != 'generic']
# Generic Composition
generic_composition = generic[["composition"]]
# Unique Generic Composition
generic_composition_unique = generic_composition.drop_duplicates()
# Warehouse Generic Portfolio
generic_portfolio = generic_wh_portfolio_new[["composition"]]
# Compoistions not part of base logic
portfolio_extra = generic_portfolio[
~generic_portfolio['composition'].isin(generic_composition_unique['composition'])]
generic_1 = pd.merge(generic_composition, generic, how='left', on=['composition'])
generic_1 = generic_1.drop_duplicates()
portfolio_extra_1 = pd.merge(portfolio_extra, generic_wh_portfolio_new, how='left', on=['composition'])
portfolio_extra_1 = portfolio_extra_1.drop_duplicates()
# Merging portfolio extra drugs and base logic generics
merge_2 = pd.concat([generic_1, portfolio_extra_1])
# Generic Adjustment-2
generic = merge_2[merge_2['drug-type'] == 'generic']
generic_drugs = generic[['drug-id']]
generic_drugs_composition = generic[['drug-id', 'composition']]
# All generic drugs in warehouse portfolio
generic_wh_all_portfolio_drug = generic_wh_all_portfolio[['drug-id']]
# Generic drugs not in the portfolio
generic_drug_not_in_portfolio = generic_drugs[~generic_drugs['drug-id'].isin(generic_wh_all_portfolio['drug-id'])]
# Generic drugs in the portfolio
generic_drug_in_portfolio = generic_drugs[generic_drugs['drug-id'].isin(generic_wh_all_portfolio['drug-id'])]
# Merging the generic drug in portfolio
generic_drug_in_portfolio = pd.merge(generic_drug_in_portfolio, generic, how='left', on='drug-id')
generic_drug_not_in_portfolio_composition = pd.merge(generic_drug_not_in_portfolio, generic_drugs_composition,
how='left', on='drug-id')
# replaced the drugs with highest selling drugs in that composition from warehouse portfolio
generic_adjustment_2 = pd.merge(generic_drug_not_in_portfolio_composition['composition'], generic_wh_portfolio_new,
how='inner',
on='composition')
# dropping duplicates
generic_adjustment_2 = generic_adjustment_2.drop_duplicates()
generic_adjustment_2 = generic_adjustment_2[
['drug-id', 'composition', 'drug-name', 'drug-type', 'company-id', 'company', 'drug-category', 'source']]
generic_composition_not_in_generic_portfolio = generic_drug_not_in_portfolio_composition[
~generic_drug_not_in_portfolio_composition['composition'].isin(generic_wh_portfolio_new['composition'])]
generic_composition_not_in_generic_portfolio = pd.merge(generic_composition_not_in_generic_portfolio, generic,
how='left', on=['drug-id', 'composition'])
merge_2 = pd.concat(
[generic_composition_not_in_generic_portfolio, generic_adjustment_2, generic_drug_in_portfolio])
merge_2 = merge_2.drop_duplicates(subset=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_2 = merge_2[
['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition', 'source']]
# merging the non-generic and generic drug after generic adjustment-2
merge_3 = pd.concat([non_generic, merge_2])
# Common Drug Adjustment
# Merging the with common drug adjustment
merge_4 = pd.merge(merge_3, common_drug_new, how='outer',
left_on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'],
right_on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_4['source_x'] = merge_4['source_x'].fillna('network_common_drugs')
merge_4 = merge_4.drop(['source_y'], axis=1)
merge_4 = merge_4.rename(columns={"source_x": "source"})
merge_4 = merge_4.drop_duplicates(subset=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
# Goodaid Adjustment
generic_2 = merge_4[merge_4['drug-type'] == 'generic']
non_generic_2 = merge_4[merge_4['drug-type'] != 'generic']
generic_composition_2 = generic_2[["composition"]]
# Goodaid composition
goodaid_composition = goodaid_wh_portfolio_new[['composition']]
# Composition which is part of goodaid portfolio
generic_removal = generic_composition_2[generic_composition_2['composition'].isin(goodaid_composition['composition'])]
# Composition not part of goodaid portfolio
generic_without_GA = generic_2[~generic_2['composition'].isin(generic_removal['composition'])]
df_goodaid = pd.merge(generic_removal, goodaid_wh_portfolio_new, how='left', on=['composition'])
df_goodaid = df_goodaid.drop_duplicates()
df_goodaid['source'] = 'wh_goodaid'
df_goodaid = df_goodaid[
['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition', 'source']]
merge_5 = pd.concat([generic_without_GA, df_goodaid])
merge_5 = pd.concat([non_generic_2, merge_5])
# CFR-Search
merge_6 = pd.merge(merge_5, cfr_search_1_new, how='outer',
on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_6['source_x'] = merge_6['source_x'].fillna('CFR-1')
merge_6 = merge_6.drop(['source_y'], axis=1)
merge_6 = merge_6.rename(columns={"source_x": "source"})
merge_7 = pd.merge(merge_6, cfr_search_2_new, how='outer',
on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_7['source_x'] = merge_7['source_x'].fillna('CFR-1')
merge_7 = merge_7.drop(['source_y'], axis=1)
merge_7 = merge_7.rename(columns={"source_x": "source"})
merge_8 = pd.merge(merge_7, cfr_search_3_new, how='outer',
on=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_8['source_x'] = merge_8['source_x'].fillna('CFR-2')
merge_8 = merge_8.drop(['source_y'], axis=1)
merge_8 = merge_8.rename(columns={"source_x": "source"})
merge_8 = merge_8.drop_duplicates(subset=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
# CFR Adjustment-1 (Generic)
generic_wh_all_portfolio_drug = generic_wh_all_portfolio[['drug-id']]
generic_3 = merge_8[merge_8['drug-type'] == 'generic']
generic_drugs_3 = generic_3[['drug-id']]
non_generic_3 = merge_8[merge_8['drug-type'] != 'generic']
generic_drug_in_portfolio_3 = generic_drugs_3[generic_drugs_3['drug-id'].isin(generic_wh_all_portfolio_drug['drug-id'])]
generic_drug_not_in_portfolio_3 = generic_drugs_3[
~generic_drugs_3['drug-id'].isin(generic_wh_all_portfolio_drug['drug-id'])]
df_generic_drugs_in_portfolio = pd.merge(generic_drug_in_portfolio_3, generic_3,
how='left', on='drug-id')
merge_9 = pd.concat([non_generic_3, df_generic_drugs_in_portfolio])
# CFR Adjustment-2 (Goodaid)
goodaid_composition = goodaid_wh_portfolio_new[['composition']]
generic_4 = merge_9[merge_9['drug-type'] == 'generic']
non_generic_4 = merge_9[merge_9['drug-type'] != 'generic']
generic_4_composition = generic_4[['composition']]
generic_composition_goodaid = generic_4_composition[
generic_4_composition['composition'].isin(goodaid_composition['composition'])]
generic_composition_non_goodaid = generic_4_composition[
~generic_4_composition['composition'].isin(goodaid_composition['composition'])]
generic_composition_goodaid = pd.merge(generic_composition_goodaid, generic_4, how='left',
left_index=True, right_index=True, on='composition')
generic_composition_goodaid = generic_composition_goodaid[
generic_composition_goodaid['company'] == 'GOODAID']
non_goodaid = pd.merge(generic_composition_non_goodaid, generic_4, how='left', left_index=True,
right_index=True, on='composition')
non_goodaid = non_goodaid[
['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition', 'source']]
goodaid = generic_composition_goodaid[
['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition', 'source']]
merge_10 = pd.concat([non_generic_4, non_goodaid, goodaid])
# Removing the banned products
merge_10 = merge_10[merge_10['drug-type'] != 'banned']
# removing the discontinued products
merge_10 = merge_10[merge_10['drug-type'] != 'discontinued-products']
merge_10 = merge_10.drop_duplicates(subset=['drug-id', 'drug-name', 'drug-type', 'company-id', 'company',
'drug-category', 'composition'])
merge_10 = pd.merge(merge_10, avg_net_quan, how='left', on='drug-id')
merge_10 = pd.merge(merge_10, avg_ptr, how='left', on='drug-id')
merge_10['avg_per_month'] = merge_10['avg_per_month'].fillna(1)
merge_10['avg_per_month'] = merge_10['avg_per_month'].round(0).astype(int)
merge_10 = merge_10.rename(columns={'avg_per_month': 'monthly_avg_quantity'})
merge_10['active'] = 1
merge_10['proxy_stores'] = str(proxy_store_list)
merge_10['new_store_id'] = new_store_id_list
merge_10['run_date'] = run_date
merge_10.columns = [c.replace('_', '-') for c in merge_10.columns]
nso_assortment_file_name = 'nso_assortment/nso_launch_stock_store_id_{}.csv'.format(new_store_id_list)
# Uploading File to S3
nso_assortment_uri = s3.save_df_to_s3(df=merge_10, file_name=nso_assortment_file_name)
email.send_email_file(subject=f"nso assortment for new_store_id {new_store_id_list}",
mail_body=f"nso assortment for new_store_id {new_store_id_list} proxy store list is {proxy_store_list}",
to_emails=email_to, file_uris=[nso_assortment_uri])
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/nso_launch_stock/nso_launch_stock.py | nso_launch_stock.py |
import argparse
# this is to include zeno_etl_libs in the python search path on the run time
import sys
import os
sys.path.append('../../../..')
import pandas as pd
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
source_pg_table = "customer_value_segment"
target_rs_table = "customer-value-segment"
def main(rs_db, pg_db, s3, limit, batch_size):
table_info = helper.get_table_info(db=rs_db, table_name=target_rs_table, schema='prod2-generico')
columns = list(table_info['column_name'])
# columns.remove('id')
rs_db.execute(query=f""" delete from "prod2-generico"."{target_rs_table}"; """)
incomplete = True
last_id = None
total_pushed = 0
while incomplete:
limit_str = f" limit {batch_size} " if batch_size else ""
filter_str = f" where id > {last_id} " if last_id else ""
query = f"""
select
id,
patient_id as "patient-id",
segment_calculation_date as "segment-calculation-date",
value_segment as "value-segment"
from
{source_pg_table} cvs
{filter_str}
order by id asc
{limit_str} ;
"""
df = pd.read_sql_query(query, pg_db.connection)
if df.empty:
incomplete = False
else:
last_id = int(df['id'].values[-1])
df = df[columns]
s3.write_df_to_db(df=df, table_name=target_rs_table, db=rs_db, schema='prod2-generico')
total_pushed += batch_size
if limit and limit < total_pushed:
incomplete = False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-l', '--limit', default=None, type=int, required=False, help="Total patients to process")
parser.add_argument('-b', '--batch_size', default=500000, type=int, required=False, help="batch size")
args, unknown = parser.parse_known_args()
env = args.env
limit = args.limit
batch_size = args.batch_size
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
_s3 = S3()
pg_db = PostGre()
pg_db.open_connection()
""" calling the main function """
main(rs_db=rs_db, pg_db=pg_db, s3=_s3, limit=limit, batch_size=batch_size)
# Closing the DB Connection
rs_db.close_connection()
pg_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/value-segnment/value_segment.py | value_segment.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
def main(debug_mode, run_stores, run_date, s3, mysql, rs_db_read, rs_db_write,
read_schema, write_schema, logger, online_email, offline_email):
status = 'Failed'
logger.info("PMF Conversion Code Execution Starts")
logger.info(f"Stores: {run_stores}")
logger.info(f"Run Date: {run_date}")
logger.info(f"Debug Mode: {debug_mode}")
logger.info(f"Online Session Login IDs: {online_email}")
logger.info(f"Offline Session Login IDs: {offline_email}")
online_email = online_email.split(",")
offline_email = offline_email.split(",")
logins = tuple(online_email + offline_email)
try:
# ======================================================================
# alternative searches
# ======================================================================
q_bb = f"""
select
ad.`unique-id` as `session-id`,
pso.`order-number`,
ad.`patients-store-orders-id`,
ad.`patient-id` as `ad-patient-id`,
pso.`patient-id` as `pso-patient-id`,
ad.`store-id`,
ad.`requested-drug-id`,
ad.`requested-drug-name`,
d.`type` as `drug-type`,
d.category,
ad.`required-drug-quantity`,
ad.`suggested-drug-id`,
ad.`suggested-drug-name`,
ad.`suggested-drug-inventory-quantity`,
sda.`is-active` as `assortment_active`,
date(ad.`created-at`) as `session-date`,
pso.`bill-id`,
pso.`order-type`,
ad.`created-by` as `session-created-by`
from
`alternate-drugs` ad
left join `patients-store-orders` pso on
pso.`id` = ad.`patients-store-orders-id`
left join `bills-1` b on
b.`id` = pso.`bill-id`
left join drugs d on
d.id = ad.`requested-drug-id`
left join `store-drug-assortment` sda on
sda.`store-id` = ad.`store-id` and sda.`drug-id` = ad.`requested-drug-id`
where
date(ad.`created-at`) = '{run_date}'
and ad.`store-id` in {run_stores}
and ad.`created-by` in {logins}
"""
df_bb = pd.read_sql_query(q_bb, mysql.connection)
df_bb.columns = [c.replace('-', '_') for c in df_bb.columns]
# have a patient across every session
df_bb['pid'] = np.where(df_bb['ad_patient_id'].isnull(),
df_bb['pso_patient_id'], df_bb['ad_patient_id'])
df_bb["patient_id"] = df_bb.groupby("session_id")['pid'].transform(
lambda x: x.fillna(x.mean()))
df_bb.drop(['ad_patient_id', 'pso_patient_id', 'pid'], axis=1, inplace=True)
df_bb['assortment_active'].fillna(0, inplace=True)
# add patient name/number.
tempp = df_bb[df_bb['patient_id'].notnull()][['patient_id']]
tempp['patient_id'] = tempp['patient_id'].apply(np.int64)
patients = tuple(map(int, list((tempp['patient_id']).unique())))
pt = """
select
`id` as `patient-id`, `name` as `patient-name`, `phone`
from
`patients` p
where id in %s
"""
pts = pd.read_sql_query(pt, mysql.connection, params=[patients])
pts.columns = [c.replace('-', '_') for c in pts.columns]
df_bb = pd.merge(left=df_bb, right=pts, how='left', on=['patient_id'])
cols_to_move = ['patient_id', 'patient_name', 'phone']
df_bb = df_bb[cols_to_move + [col for col in
df_bb.columns if col not in cols_to_move]]
# assortment flag
conditions = [(
(df_bb.suggested_drug_id.isnull()) &
(df_bb['assortment_active'] == 0)
),
(df_bb.suggested_drug_id.isnull()) &
(df_bb['assortment_active'] == 1),
(df_bb.suggested_drug_id.notnull())
]
choices = ['not-in-assortment', 'in-assortment', 'in-assortment']
df_bb['flag_assort'] = np.select(conditions, choices)
# availability flag
conditions = [
(df_bb['flag_assort'] == 'not-in-assortment'),
(df_bb['required_drug_quantity'] > df_bb[
'suggested_drug_inventory_quantity']),
(df_bb['required_drug_quantity'] <= df_bb[
'suggested_drug_inventory_quantity'])
]
choices = ['full', 'short', 'full']
df_bb['flag_availability'] = np.select(conditions, choices)
# conversion flag
# patients who have billed not in same session & same day, make them converted
# billed in whole day flag
bil = """
select
`patient-id`,
1 AS `billed`
from
`bills-1` b
where
`patient-id` in %s
and date(`created-at`) = %s
group by
`patient-id`;
"""
bills = pd.read_sql_query(bil, mysql.connection, params=[patients, run_date])
bills.columns = [c.replace('-', '_') for c in bills.columns]
df_bb = pd.merge(left=df_bb, right=bills[['patient_id', 'billed']],
how='left', on=['patient_id'])
bt = """
select
`id` as `pso_id`, `order-number` as `pso-order-number`,
`order-type` as `pso-order-type`,
`patient-id`, `drug-id` as `suggested_drug_id`,
`bill-id` as `pso-bill-id`, `created-at` as `pso-date`
from
`patients-store-orders` pso
where
`patient-id` in %s
and date(`created-at`) = %s
group by `id`, `order-number`, `order-type`,
`patient-id`, `drug-id`, `bill-id`, `created-at`;
"""
psos = pd.read_sql_query(bt, mysql.connection, params=[patients, run_date])
psos.columns = [c.replace('-', '_') for c in psos.columns]
psos.drop_duplicates(['patient_id', 'suggested_drug_id'],
keep='last', inplace=True)
df_bb = pd.merge(left=df_bb, right=psos,
how='left', on=['patient_id', 'suggested_drug_id'])
df_bb['patients_store_orders_id'] = np.where(
df_bb['patients_store_orders_id'].isnull(),
df_bb['pso_id'], df_bb['patients_store_orders_id'])
df_bb['order_number'] = np.where(
df_bb['order_number'].isnull(),
df_bb['pso_order_number'], df_bb['order_number'])
df_bb['bill_id'] = np.where(
df_bb['bill_id'].isnull(),
df_bb['pso_bill_id'], df_bb['bill_id'])
df_bb['order_type'] = np.where(
df_bb['order_type'].isnull(),
df_bb['pso_order_type'], df_bb['order_type'])
# conversion logic
df_temp = df_bb[(~df_bb.patients_store_orders_id.isnull()) |
(df_bb['billed'] == 1)][['session_id']]
df_temp1 = df_temp.drop_duplicates(subset=['session_id'])
df_temp1['flag_conversion'] = 'converted'
df_cc = pd.merge(left=df_bb, right=df_temp1, how='left', on=['session_id'])
conditions = [(df_cc.flag_conversion.isnull()),
(df_cc.flag_conversion.notnull())]
choices = ['not_converted', 'converted']
df_cc['flag_conversion'] = np.select(conditions, choices)
# patient metadata2
q_code = """
select
"id" as "patient_id",
"total-spend",
"average-bill-value",
"number-of-bills",
"system-age-days",
"avg-purchase-interval",
"previous-bill-date",
"is-repeatable",
"is-generic",
"is-chronic",
"is-ethical",
"value-segment-anytime",
"behaviour-segment-anytime",
"primary-disease"
from
"{schema}"."patients-metadata-2" pm
where
id in {ptt};
""".format(ptt=patients, schema=read_schema)
pmeta = rs_db_read.get_df(q_code)
pmeta.columns = [c.replace('-', '_') for c in pmeta.columns]
df_dd = pd.merge(left=df_cc, right=pmeta, how='left', on=['patient_id'])
df_to_upload = df_dd
df_to_upload.drop(['pso_id', 'pso_order_number', 'pso_order_type',
'pso_bill_id', 'pso_date'], axis=1, inplace=True)
# session-channel
df_to_upload['session_channel'] = np.where(df_to_upload['session_created_by'].isin(offline_email), 'offline',
'online')
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing to RS table: pmf-conversion")
df_to_upload['run-date'] = dt.datetime.strptime(run_date, '%Y-%m-%d').date()
df_to_upload['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_to_upload['created-by'] = 'etl-automation'
df_to_upload['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_to_upload['updated-by'] = 'etl-automation'
df_to_upload.columns = [c.replace('_', '-') for c in
df_to_upload.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='pmf-conversion',
schema=write_schema)
# To Avoid Duplication
truncate_query = f"""
DELETE
FROM
"{write_schema}"."pmf-conversion"
WHERE
"run-date" = '{run_date}';
"""
logger.info(truncate_query)
rs_db_write.execute(truncate_query)
columns = list(table_info['column_name'])
df_to_upload = df_to_upload[columns] # required column order
s3.write_df_to_db(df=df_to_upload,
table_name='pmf-conversion',
db=rs_db_write, schema=write_schema)
except Exception as error:
logger.exception(error)
logger.info(f"PMF Conversion Code Execution Status: {status}")
status = 'Success'
return status
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected],[email protected],[email protected]",
type=str, required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-rs', '--run_stores', default="4", type=str,
required=False)
parser.add_argument('-rd', '--run_date', default="YYYY-MM-DD", type=str,
required=False)
parser.add_argument('-offs', '--offline_session', default='[email protected], [email protected]',
type=str, required=False)
parser.add_argument('-ons', '--online_session', default='[email protected]', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
online_email = args.online_session
offline_email = args.offline_session
# JOB EXCLUSIVE PARAMS
run_stores = args.run_stores.replace(" ", "").split(",")
run_date = args.run_date
run_stores = list(map(int, run_stores))
run_stores = str(run_stores).replace('[', '(').replace(']', ')')
run_date_list = run_date.split(",")
logger = get_logger()
# define connections
s3 = S3()
mysql = MySQL(read_only=True)
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open connections
rs_db_read.open_connection()
rs_db_write.open_connection()
mysql.open_connection()
for run_date in run_date_list:
if run_date == 'YYYY-MM-DD': # Take current date
run_date = dt.date.today().strftime("%Y-%m-%d")
""" calling the main function """
status = main(debug_mode, run_stores, run_date, s3, mysql, rs_db_read,
rs_db_write, read_schema, write_schema, logger, online_email, offline_email)
if email_to:
# SEND EMAIL OF EXECUTION STATUS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"PMF Conversion Code (GLUE-{env}) {run_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Run Stores: {run_stores}
Run Date: {run_date}
Job Params: {args}
""",
to_emails=email_to, file_uris=[])
# close connections
rs_db_read.close_connection()
rs_db_write.close_connection()
mysql.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/pmf_conversion/pmf_conversion.py | pmf_conversion.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "pso-slot-changes"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"first-slot",
"last-slot",
"total-slot-changes",
"first-slot-date",
"last-slot-date"
)
select
ps."patient-store-order-id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
max(first_slot) as "first-slot",
max(last_slot) as "last-slot",
max(total_slot_changes) as "total-slot-changes",
max(first_slot_date) as "first-slot-date",
max(last_slot_date) as "last-slot-date"
from
(
select
"pso-id" as "patient-store-order-id",
first_value("old-slot") over (partition by "pso-id"
order by
"created-at" asc rows between unbounded preceding and unbounded following) as first_slot,
first_value("new-slot") over (partition by "pso-id"
order by
"created-at" desc rows between unbounded preceding and unbounded following) as last_slot,
count(id) over (partition by "pso-id") as "total_slot_changes",
first_value("old-slot-date") over (partition by "pso-id"
order by
"created-at" asc rows between unbounded preceding and unbounded following) as first_slot_date,
first_value("new-slot-date") over (partition by "pso-id"
order by
"created-at" desc rows between unbounded preceding and unbounded following) as last_slot_date
from
"prod2-generico"."pso-slot-changes-log" pscl ) ps
group by
"patient-store-order-id";
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."pso-slot-changes";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/pso_slot_change/pso_slot_change.py | pso_slot_change.py |
import os
import sys
from memory_profiler import profile
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
sys.path.append('../../../..')
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
os.environ['env'] = 'dev'
sc = SparkContext()
sq = SQLContext(sc)
logger = get_logger()
s3 = S3()
@profile
def read_data():
logger.info("trying to read data from redshift")
query = f"""select * from "prod2-generico".sales limit 10;"""
df = rs_db.get_df(query=query)
for c in df.columns:
df[c] = df[c].astype('str')
sparkDF = sq.createDataFrame(df)
logger.info("spark dataframe created")
return sparkDF
def process_data(df):
new_df = df.groupBy(["bill-id", "patient-id"]).count()
new_df.show()
logger.info("processing of data completed")
return new_df
def write_df_to_redshift_table(df, redshift_table, redshift_schema, load_mode, db_secrets):
host = db_secrets['REDSHIFT_HOST'],
database = db_secrets['REDSHIFT_DB'],
user = db_secrets['REDSHIFT_USER'],
password = db_secrets['REDSHIFT_PASSWORD'],
port = int(db_secrets['REDSHIFT_PORT']),
redshift_url = "jdbc:{}://{}:{}/{}".format(
"redshift",
host,
port,
database,
)
load_mode = "append"
temp_bucket = "s3://{}/{}/".format("aws-glue-temporary-921939243643-ap-south-1", "temp")
schema_qualified_table = '"prod2-generico".sales'
logger.info("Attempting to write dataframe into:{}".format(schema_qualified_table))
try:
df.write.format("com.databricks.spark.redshift").option(
"url",
redshift_url
+ "?user="
+ user
+ "&password="
+ password,
).option("dbtable", schema_qualified_table).option(
"tempdir", temp_bucket
).save(
mode=load_mode
)
status = True
logger.info("Dataframe written successfully into table:{}".format(schema_qualified_table))
except Exception as exception:
status = False
raise Exception("{}".format(exception))
return status
if __name__ == '__main__':
try:
configObj = Config.get_instance()
secrets = configObj.get_secrets()
rs_db = DB()
rs_db.open_connection()
df = read_data()
processed_df = process_data(df)
processed_df = processed_df.toPandas()
# status = write_df_to_redshift_table(processed_df, "spark-example-temp", "prod2-generico", "append", secrets)
s3.write_df_to_db(df=processed_df, table_name="spark-example-temp", db=rs_db,
schema="prod2-generico")
rs_db.close_connection()
except Exception as error:
raise Exception(error) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/spark_job/spark_example.py | spark_example.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "patient-requests-short-books-map"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"patient-request-id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"short-book-id"
)
select
a."patient-request-id" as "patient-request-id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
a."short-book-id" as "short-book-id"
from
(
select
"patient-request-id",
"short-book-id",
row_number() over (partition by "patient-request-id"
order by
sb."required-quantity" asc) as row_num
from
"prod2-generico"."patient-requests-short-books" prsb
inner join "prod2-generico"."short-book-1" sb on
prsb."short-book-id" = sb.id) a
where
a.row_num = 1;
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."patient-requests-short-books-map";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patient-requests-short-book/patinet-requests-short-book.py | patinet-requests-short-book.py |
import argparse
import sys
import os
import datetime
from io import StringIO
import dateutil
import numpy as np
from pandas.io.json import json_normalize
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.playstore.playstore import Reviews
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument(
'-sd', '--start_datetime', default="NA", type=str, required=False,
help="If start date is 'NA' then latest review for the day and month will be added, for full"
" run make the start date old")
parser.add_argument(
'-et', '--email_to',
default="[email protected],[email protected],[email protected]", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
start_datetime = args.start_datetime
os.environ['env'] = env
logger = get_logger()
logger.info(f"start_date: {start_datetime}")
# Read using google API
r = Reviews()
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'playstore-reviews'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
logger.info(f"Table:{table_name} exists")
def insert_reviews(df):
"""
Function helps in insert daily and monthly reviews in table, columns which are absent will be
filled with default values
"""
must_present = "RaiseException"
# Insert data
table_columns = {
"review-created-at": must_present,
"review-id": np.nan,
"star-rating": must_present,
"author-name": np.nan,
"user-image": np.nan,
"review": np.nan,
"reviewer-lang": "en",
"thumbsup-count": np.nan,
"review-link": np.nan,
"replied-at": np.nan,
"reply-content": np.nan,
"year": must_present,
"month": must_present,
"created-at": datetime.datetime.now(tz=dateutil.tz.gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S'),
"created-by": "etl-automation",
"updated-by": "etl-automation",
"updated-at": datetime.datetime.now(tz=dateutil.tz.gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')
}
for column in table_columns:
if column not in df.columns:
if table_columns[column] == must_present:
raise Exception(f"{column} column must be present.")
df[column] = table_columns[column]
s3.write_df_to_db(df=df[table_columns.keys()], table_name=table_name, db=rs_db, schema=schema)
def get_month_reviews(year, month):
"""
return the review of given month and year from s3 CSV file
"""
uri = f"s3://aws-glue-temporary-921939243643-ap-south-1/playstore-reviews/" \
f"reviews_reviews_com.zenohealth.android_{year}{str(month).zfill(2)}.csv"
logger.info(f"uri: {uri}")
csv_string = s3.get_file_object(uri=uri, encoding="utf-16")
df = pd.read_csv(StringIO(csv_string))
df['month'] = str(month).zfill(2)
df['year'] = str(year)
return df
def get_last_review_date():
"""
Gets the last review date in the table
:return: last review date
"""
query = f""" select max("review-created-at") last_review_date from "{schema}"."{table_name}"
where "review-id" != '' """
df: pd.DataFrame = rs_db.get_df(query=query)
df['last_review_date'].fillna(np.nan, inplace=True)
last_review_date = df['last_review_date'].to_string(index=False)
logger.info(f"last_review_date: {last_review_date}")
return last_review_date
def get_last_review_year_month():
"""
Get the last review year and month for the CSV reviews without comments
:returns: year, month
"""
query = f""" select "year" last_year, "month" last_month from
"{schema}"."{table_name}" where "review-id" = '' order by year desc, month desc limit 1; """
df = rs_db.get_df(query=query)
if df.empty:
return 0, 0
df['last_year'].fillna(0, inplace=True)
df['last_month'].fillna(0, inplace=True)
last_year = df['last_year'].astype(int)
last_month = df['last_month'].astype(int)
logger.info(f"last_year, last_month: {last_year, last_month}")
return int(last_year), int(last_month)
# set the start and end time
start_year = 2021
start_month = 1
if start_datetime == 'NA':
# """ for daily review """
last_review_date = get_last_review_date()
if last_review_date == 'NaN':
# set very old date
start_datetime = '2017-01-01 00:00:00'
else:
start_datetime = last_review_date
# """ for monthly review """
table_year, table_month = get_last_review_year_month()
if table_year == 0 and table_month == 0:
# """ keeping it same 2021, 01 """
pass
else:
# """ Every day it will refresh the last table month from s3 csv """
start_year = table_year
start_month = table_month
end_date = datetime.date.today() + dateutil.relativedelta.relativedelta(months=-1)
end_year = end_date.year
end_month = end_date.month
start_datetime = dateutil.parser.parse(start_datetime)
logger.info(f"start_datetime: {start_datetime}")
day_diff = datetime.datetime.now() - start_datetime
# So that we do not fetch all the reviews every time
estimated_count = (day_diff.days + 1) * 3
logger.info(f"estimated_count: {estimated_count}")
reviews_list = r.get_all_review(count=estimated_count)
reviews_df = json_normalize(reviews_list)
# Column mapping
columns = {
'reviewId': 'review-id',
'userName': 'author-name',
'userImage': 'user-image',
'content': 'review',
'score': 'star-rating',
'thumbsUpCount': 'thumbsup-count',
'at': 'review-created-at',
'replyContent': 'reply-content',
'repliedAt': 'replied-at'
}
reviews_df = reviews_df[columns.keys()].rename(columns=columns)
# Filter the existing reviews
reviews_df['review-created-at'] = reviews_df['review-created-at'].apply(
pd.to_datetime, errors='coerce')
reviews_df = reviews_df[(reviews_df['review-created-at'] > start_datetime)]
# Review link calculation
reviews_df['review-link'] = reviews_df['review-id'].apply(
lambda
x: f"http://play.google.com/console/developers/7917307073215847519/app/4974372962404296517/"
f"user-feedback/review-details?reviewId={x}&corpus=PUBLIC_REVIEWS")
reviews_df['year'] = reviews_df['review-created-at'].apply(lambda x: x.year)
reviews_df['month'] = reviews_df['review-created-at'].apply(lambda x: x.month)
# Delete the existing reviews if any
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "review-created-at" >
'{start_datetime}' and "review-id" is not null '''
rs_db.execute(truncate_query)
logger.info(f"reviews_df: {reviews_df.head(2)}")
# Insert
if reviews_df.empty:
logger.info("No data to insert.")
else:
insert_reviews(df=reviews_df)
logger.info("End of daily reviews.")
# """ Monthly Reviews """
print(f"start year, month: {start_year, start_month}")
print(f"end year, month: {end_year, end_month}")
csv_r_df = pd.DataFrame()
for year in range(start_year, end_year + 1):
for month in range(1, 12 + 1):
if year == start_year and month < start_month:
# """ stopping for old month"""
continue
if year == end_year and month > end_month:
# """ stopping for new months"""
continue
print(f"year, month: {year, month}")
df = get_month_reviews(year=year, month=month)
csv_r_df = pd.concat([csv_r_df, df], ignore_index=True)
# Delete the old reviews data from table
query = f"""delete from "{schema}"."{table_name}" where "review-id" = '' and year = {year}
and month = {month}; """
rs_db.execute(query=query)
logger.info(f"csv_r_df.head(1): {csv_r_df.head(1)}")
# Filter only start rating review
csv_r_df = csv_r_df[csv_r_df['Review Link'].isna()]
columns = {
'Star Rating': 'star-rating',
'Review Submit Date and Time': 'review-created-at',
'year': 'year',
'month': 'month'
}
# # fetching review-id from review link
# csv_r_df['reviewId'] = csv_r_df['Review Link'].apply(
# lambda x: re.search('reviewId=(.*)&', str(x)).group(1) if 'reviewId' in str(x) else x)
csv_r_df = csv_r_df[columns.keys()].rename(columns=columns)
csv_r_df['review-created-at'] = pd.to_datetime(csv_r_df['review-created-at'],
format='%Y-%m-%dT%H:%M:%SZ')
# Insert
if csv_r_df.empty:
logger.info(f"No CSV data to insert: {csv_r_df}")
else:
logger.info(f"Update/Insert CSV data count: {len(csv_r_df)}")
insert_reviews(df=csv_r_df)
logger.info("End of monthly reviews.")
rs_db.close_connection()
def last_day_of_month(any_day):
"""
The day 28 exists in every month. 4 days later, it's always next month
:param any_day: datetime object of date
:returns: last day of the month
"""
next_month = any_day.replace(day=28) + datetime.timedelta(days=4)
# subtracting the number of the current day brings us back one month
return next_month - datetime.timedelta(days=next_month.day)
# """ Send monthly reminder of playstore review file(CSV) """
today = datetime.date.today()
last_day_of_month = last_day_of_month(any_day=today)
if today == last_day_of_month:
subject = '''[Reminder] Playstore reviews monthly CSV file '''
mail_body = f'''Hey Hardik,
Please send the playstore reviews csv file for the
year: {last_day_of_month.year}, month: {last_day_of_month.month}.
@Neha: Please upload this file at this location:
s3://aws-glue-temporary-921939243643-ap-south-1/playstore-reviews/
Please complete this activity by EOD, otherwise our glue/cron job will fail tomorrow.
Thanks,
Team Data
'''
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=[]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/playstore-reviews/playstore-reviews-store-to-table.py | playstore-reviews-store-to-table.py |
#@owner: [email protected]
#@Purpose: To find the system inventory
import sys
import argparse
import os
from datetime import datetime
import pandas as pd
import numpy as np
sys.path.append('../../../..')
from dateutil.tz import gettz
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.db.db import MSSql
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]",
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info('Script Manager Initialized')
rs_db = DB()
s3 = S3()
rs_db.open_connection()
snapshot_date = datetime.now().date()
# Store Inventory
store ='''
select
i."store-id" as "entity-id",
s.name as "entity-name",
'store' as "entity-type",
d."type" as "drug-type" ,
(case
when d."company-id" = 6984 then 'true'
else 'false'
end) as "goodaid-flag",
s."franchisee-id" as "franchise-id",
'physical' as "inventory-type",
(case
when date(i.expiry) <= current_date then 'expired'
when
(DATEDIFF('days',
current_date,
date(i.expiry))<= 90 and DATEDIFF('days',
current_date,
date(i.expiry)) >0) then 'near-expiry'
else 'others'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
SUM(i.quantity + i."locked-for-check" + i."locked-for-audit" + i."locked-for-transfer" + i."locked-for-return") as "quantity",
SUM((i.ptr / (1 + ii.vat / 100)) * (i.quantity + i."locked-for-check" + i."locked-for-audit" + i."locked-for-transfer" + i."locked-for-return")) as "value-without-tax",
SUM(i.ptr * (i.quantity + i."locked-for-check" + i."locked-for-audit" + i."locked-for-transfer" + i."locked-for-return")) as "value-with-tax"
from
"prod2-generico"."inventory-1" i
left join "prod2-generico"."invoice-items-1" ii on
i."invoice-item-id" = ii.id
left join "prod2-generico".stores s on
i."store-id" = s.id
left join "prod2-generico".drugs d on
i."drug-id" = d.id
where
(i.quantity>0
or i."locked-for-check">0
or i."locked-for-audit">0
or i."locked-for-return">0
or i."locked-for-transfer" >0)
and
s.category != 'dc'
group by
i."store-id",
s."name" ,
d."type",
"goodaid-flag" ,
s."franchisee-id",
"inventory-sub-type-1" ;
'''
stores=rs_db.get_df(query=store)
# DC To be dispatched Inventory
to_be_dispatched='''
SELECT
s3.id as "entity-id",
s3.name as "entity-name",
'dc/warehouse' as "entity-type",
d."type" as "drug-type" ,
(case when d."company-id" =6984 then 'true'
else 'false' end) as "goodaid-flag",
s."franchisee-id" as "franchise-id",
'to_be_dispatched' as "inventory-type",
'' as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
SUM(i."locked-quantity") as "quantity",
SUM(i."locked-quantity" * ((i."purchase-rate")/(1+ii.vat/100))) as "value-without-tax",
SUM(i."locked-quantity" * ((i."purchase-rate"))) as "value-with-tax"
FROM
"prod2-generico".inventory i
LEFT JOIN "prod2-generico".invoices i2 ON
i2.id =i."invoice-id"
LEFT JOIN "prod2-generico".stores s ON
i2."store-id" =s.id
LEFT JOIN "prod2-generico"."invoice-items" ii ON
i."invoice-item-id" =ii.id
LEFT JOIN "prod2-generico".stores s3 ON
s3.id =i2."dc-id"
left join "prod2-generico".drugs d on ii."drug-id" =d.id
WHERE
(i."locked-quantity" >0 and i2."dispatch-status" ='dispatch-status-na')
GROUP BY s3.id ,s3."name" ,d."type","goodaid-flag",s."franchisee-id" ;
'''
to_be_dispatched=rs_db.get_df(query=to_be_dispatched)
# DC/Warehouse Returns
return_query='''
SELECT
s2.id as "entity-id",
s2.name as "entity-name",
'dc/warehouse' as "entity-type",
d."type" as "drug-type",
(case when d."company-id" =6984 then 'true'
else 'false' end) as "goodaid-flag",
s2."franchisee-id" as "franchise-id",
'return' as "inventory-type",
CASE when ri."return-reason" IN ('reason-not-ordered',
'reason-to-be-returned',
'reason-wrong-product',
'reason-product-short',
'reason-softcopy-excess',
'reason-already-returned',
'reason-short-from-dc',
'reason-customer-refused',
'reason-wrongly-ordered',
'reason-excess-supplied',
'reason-non-moving',
'reason-wrong-mrp',
'reason-wrong-expiry',
'reason-excess-or-not-ordered',
'reason-late-supply',
'reason-wrong-pack-size',
'reason-excess-order')
Then 'Saleable'
When ri."return-reason" IN ('reason-product-expired', 'reason-over-expiry', 'reason-npi-non-saleable') Then 'Non saleable'
WHen ri. "return-reason" IN ('reason-product-damaged', 'reason-near-expiry') AND (DATEDIFF('days',rtd2."created-at" , i2."invoice-date")> 30) THEN
'Non saleable'
WHen ri."return-reason" IN ('reason-product-damaged', 'reason-near-expiry') AND (DATEDIFF('days',rtd2."created-at" , i2."invoice-date")<= 30) THEN
'Saleable'
Else 'Saleable'
end
as "inventory-sub-type-1",
ri.status as "inventory-sub-type-2",
dn.status as "inventory-sub-type-3" ,
SUM(ri."returned-quantity") as "quantity" ,
SUM(ri.taxable) as "value-without-tax",
SUM(ri.net) as "value-with-tax"
FROM
"prod2-generico"."return-items" ri
LEFT JOIN "prod2-generico"."debit-note-items" dni
ON
ri.id = dni."item-id"
AND dni."is-active" != 0
LEFT JOIN "prod2-generico"."debit-notes" dn
ON
dni."debit-note-id" = dn.id
LEFT JOIN "prod2-generico"."inventory-1" i ON
ri."inventory-id" =i.id
LEFT JOIN "prod2-generico"."returns-to-dc" rtd3 ON
ri."return-id" =rtd3.id
LEFT JOIN "prod2-generico"."return-items-1" ri2 ON ri."return-item-reference" =ri2.id
LEFT JOIN "prod2-generico"."returns-to-dc-1" rtd2 ON ri2."return-id" =rtd2.id
LEFT JOIN "prod2-generico".invoices i2 On (i2.id =i."invoice-id" )
LEFT JOIN "prod2-generico".stores s2 ON (ri."return-dc-id" =s2.id)
left join "prod2-generico".drugs d on i."drug-id" =d.id
WHERE (ri.status ='saved' OR ri.status ='approved') and ri."returned-quantity" >0
Group By s2.id,s2.name,s2."franchisee-id" ,d."type","goodaid-flag" ,"inventory-sub-type-1", ri.status,dn.status;
'''
return_query=rs_db.get_df(query=return_query)
# Intransit
in_transit='''
select
s3.id as "entity-id" ,
s3.name as "entity-name",
'dc/warehouse' as "entity-type",
d."type" as "drug-type",
(case when d."company-id" =6984 then 'true'
else 'false' end) as "goodaid-flag",
s."franchisee-id" as "franchise-id",
'in-transit' as "inventory-type",
'' as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
SUM(i."locked-quantity") as "quantity" ,
SUM(i."locked-quantity" * (i."purchase-rate")/(1 + ii.vat / 100)) as "value-without-tax",
SUM(i."locked-quantity" * (i."purchase-rate")) as "value-with-tax"
from
"prod2-generico"."inventory-1" i
left join "prod2-generico".stores s on
s.id=i."store-id"
left join "prod2-generico".invoices i2 on
i."invoice-id" =i2.id
left join "prod2-generico"."invoice-items-1" ii on
i."invoice-item-id" =ii.id
left join "prod2-generico".stores s3 on
i2."dc-id" =s3.id
left join "prod2-generico".drugs d on i."drug-id" =d.id
where
(i."locked-quantity" >0)
group by
s3.id,
s3.name,
d."type",
"goodaid-flag",
s."franchisee-id" ;
'''
in_transit=rs_db.get_df(query=in_transit)
#drug info
drug_info='''
select
d.id as "drug-id",
d."type" as "drug-type" ,
(case when d."company-id" =6984 then 'true'
else 'false' end) as "goodaid-flag"
from
"prod2-generico".drugs d
'''
drug_info=rs_db.get_df(query=drug_info)
#Warehouse Barcoded Query
mssql = MSSql(connect_via_tunnel=False)
cnxn = mssql.open_connection()
cursor = cnxn.cursor()
barcoded_bhw = '''
select
199 as "entity-id",
'bhiwandi-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
b.Barcode as "drug-id",
'barcoded' as "inventory-type",
(case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
sum(case when a.Vno < 0 then 0 else coalesce(a.bqty, 0) end) as "quantity",
sum(case when a.Vno < 0 then 0 else coalesce(a.bqty * a.Cost , 0) end) as "value-without-tax",
sum(case when a.vno<0 then 0 else coalesce((a.bqty * a.Cost *(1 + coalesce((sp.CGST + sp.SGST + sp.IGST), 0)/ 100)), 0) end) as "value-with-tax"
from
fifo a
left join item b on
a.itemc = b.code
left join SalePurchase2 sp on
(a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt)
where
b.code > 0 and a.Vno!=0
and isnumeric(b.Barcode) = 1
and b.Barcode not like '%[^0-9]%'
and a.BQty >0
and (a.Psrlno in (
select Psrlno from Esdata.dbo.salepurchase2 s2)
or a.Psrlno IN (SELECT sp2.Psrlno from Esdata2122.dbo.SalePurchase2 sp2
))
group by
b.Barcode,
(case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
else 'non-goodaid'
end)
'''
barcoded = pd.read_sql(barcoded_bhw,cnxn)
# Warehouse Non Barcoded
non_barcoded_bhw = '''
select
199 as "entity-id",
'bhiwandi-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
b.Barcode as "drug-id",
'non-barcoded' as "inventory-type",
(Case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
Else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
sum(case when a.Vno > 0 then 0 else coalesce(a.TQty , 0) end) as "quantity",
sum(case when a.Vno > 0 then 0 else coalesce(a.TQty * a.Cost , 0) end) as "value-without-tax",
sum(case when a.vno > 0 then 0 else coalesce((a.TQty * a.Cost *(1 +COALESCE((sp.CGST + sp.SGST + sp.IGST),0)/ 100)), 0) end) as "value-with-tax"
from
fifo a
left join item b on
a.itemc = b.code
left join SalePurchase2 sp on
(a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc and sp.Vdt = a.Vdt)
where
b.code > 0 and a.Vno!=0
and isnumeric(b.Barcode) = 1
and b.Barcode not like '%[^0-9]%'
and a.TQty >0 and a.vno<0 and
(a.Psrlno in (
select Psrlno from Esdata.dbo.salepurchase2 s2)
or a.Psrlno IN (SELECT sp2.Psrlno from Esdata2122.dbo.SalePurchase2 sp2
))
group by
b.Barcode,
(Case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
Else 'non-goodaid'
end)
'''
non_barcoded = pd.read_sql(non_barcoded_bhw,cnxn)
#Wh to distributor
wh_dist_return ='''
select
199 as "entity-id",
'bhiwandi-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
item.Barcode as "drug-id",
'wh-to-distributor-return' as "inventory-type",
(case
when (item.Compcode = 465
or item.Compcode = 459 or item.Compcode=460) then 'goodaid'
else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
SUM(s2.qty) as "quantity",
SUM(s2.ftrate * (s2.qty + s2.fqty)) as "value-without-tax",
SUM((1 + (s2.IGST + s2.CGST + s2.SGST)/ 100) * s2.ftrate * (s2.qty + s2.fqty)) as "value-with-tax"
from
salepurchase1 s1
inner join salepurchase2 s2 on
s2.vtype = s1.vtyp
and s2.vno = s1.vno
and s2.vdt = s1.vdt
and s1.Trntype = s2.Trntype
inner join item on
item.code = s2.itemc
inner join acm on
s1.acno = acm.code
inner join FIFO f on
f.Psrlno = s2.Psrlno
left join (
select
Pbillno,
Vdt,
Acno,
Psrlno
from
salePurchase2 sp
where
Trntype = 'PB') as spx on
spx.Acno = s2.Acno
and spx.Psrlno = s2.Psrlno
left join (
select
vno,
avtype
from
Adjstmnt
where
vtype = 'PR') as sttl on
sttl.vno = s1.Vno
where
(s1.status is null
or s1.status <> 'C')
and s1.trntype in ('PR')
and sttl.avtype is null
and s2.Ftrate>0
group by
item.Barcode,
(case
when (item.Compcode = 465
or item.Compcode = 459 or item.Compcode=460) then 'goodaid'
else 'non-goodaid'
end);
'''
wh_returns= pd.read_sql(wh_dist_return,cnxn)
#GOODAID Warehouse
mssql_ga = MSSql(connect_via_tunnel=False,db='Esdata_WS_2')
cnxn = mssql_ga.open_connection()
cursor = cnxn.cursor()
barcoded_ga ='''
select
343 as "entity-id",
'goodaid-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
b.Barcode as "drug-id",
'barcoded' as "inventory-type",
(case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
sum(case when a.Vno < 0 then 0 else coalesce(a.bqty, 0) end) as "quantity",
sum(case when a.Vno < 0 then 0 else coalesce(a.bqty * a.Cost , 0) end) as "value-without-tax",
sum(case when a.vno<0 then 0 else coalesce((a.bqty * a.Cost *(1 + coalesce((sp.CGST + sp.SGST + sp.IGST), 0)/ 100)), 0) end) as "value-with-tax"
from
fifo a
left join item b on
a.itemc = b.code
left join SalePurchase2 sp on
(a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt)
where
b.code > 0 and a.Vno!=0
and isnumeric(b.Barcode) = 1
and b.Barcode not like '%[^0-9]%'
and a.BQty >0
and a.Psrlno in (
select
Psrlno
from
SalePurchase2 sp)
group by
b.Barcode,
(case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
else 'non-goodaid'
end)
'''
barcoded_ga = pd.read_sql(barcoded_ga,cnxn)
non_barcoded_ga ='''
select
343 as "entity-id",
'goodaid-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
b.Barcode as "drug-id",
'non-barcoded' as "inventory-type",
(Case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
Else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
sum(case when a.Vno > 0 then 0 else coalesce(a.TQty , 0) end) as "quantity",
sum(case when a.Vno > 0 then 0 else coalesce(a.TQty * a.Cost , 0) end) as "value-without-tax",
sum(case when a.vno > 0 then 0 else coalesce((a.TQty * a.Cost *(1 + COALESCE((sp.CGST + sp.SGST + sp.IGST), 0)/ 100)), 0) end) as "value-with-tax"
from
fifo a
left join item b on
a.itemc = b.code
left join SalePurchase2 sp on
(a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt)
where
b.code > 0 and a.Vno!=0
and isnumeric(b.Barcode) = 1
and b.Barcode not like '%[^0-9]%'
and a.TQty >0
and a.vno<0
and a.Psrlno in (
select
Psrlno
from
SalePurchase2 sp)
group by
b.Barcode,
(Case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
Else 'non-goodaid'
end)
'''
non_barcoded_ga = pd.read_sql(non_barcoded_ga,cnxn)
# TEPL Warehouse
mssql_tepl = MSSql(connect_via_tunnel=False,db='Esdata_TEPL')
cnxn = mssql_tepl.open_connection()
cursor = cnxn.cursor()
barcoded_tepl ='''
select
342 as "entity-id",
'tepl-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
b.Barcode as "drug-id",
'barcoded' as "inventory-type",
(case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
sum(case when a.Vno < 0 then 0 else coalesce(a.bqty, 0) end) as "quantity",
sum(case when a.Vno < 0 then 0 else coalesce(a.bqty * a.Cost , 0) end) as "value-without-tax",
sum(case when a.vno<0 then 0 else coalesce((a.bqty * a.Cost *(1 + coalesce((sp.CGST + sp.SGST + sp.IGST), 0)/ 100)), 0) end) as "value-with-tax"
from
fifo a
left join item b on
a.itemc = b.code
left join SalePurchase2 sp on
(a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt)
where
b.code > 0 and a.Vno!=0
and isnumeric(b.Barcode) = 1
and b.Barcode not like '%[^0-9]%'
and a.BQty >0
and a.Psrlno in (
select
Psrlno
from
SalePurchase2 sp)
group by
b.Barcode,
(case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
else 'non-goodaid'
end)
'''
barcoded_tepl = pd.read_sql(barcoded_tepl,cnxn)
non_barcoded_tepl ='''
select
342 as "entity-id",
'tepl-warehouse' as "entity-name",
'warehouse' as "entity-type",
'' as "franchise-id",
b.Barcode as "drug-id",
'non-barcoded' as "inventory-type",
(Case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
Else 'non-goodaid'
end) as "inventory-sub-type-1",
'' as "inventory-sub-type-2",
'' as "inventory-sub-type-3",
sum(case when a.Vno > 0 then 0 else coalesce(a.TQty , 0) end) as "quantity",
sum(case when a.Vno > 0 then 0 else coalesce(a.TQty * a.Cost , 0) end) as "value-without-tax",
sum(case when a.vno > 0 then 0 else coalesce((a.TQty * a.Cost *(1 + COALESCE((sp.CGST + sp.SGST + sp.IGST), 0)/ 100)), 0) end) as "value-with-tax"
from
fifo a
left join item b on
a.itemc = b.code
left join SalePurchase2 sp on
(a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt)
where
b.code > 0 and a.Vno!=0
and isnumeric(b.Barcode) = 1
and b.Barcode not like '%[^0-9]%'
and a.TQty >0
and a.vno<0
and a.Psrlno in (
select
Psrlno
from
SalePurchase2 sp)
group by
b.Barcode,
(Case
when (b.Compcode = 465
or b.Compcode = 459 or b.Compcode=460)
and a.Acno != 59353 then 'goodaid'
when (a.Acno = 59353) then 'npi'
Else 'non-goodaid'
end)
'''
non_barcoded_tepl = pd.read_sql(non_barcoded_tepl,cnxn)
# Concatenating the barcoded, non barcoded and wh_returns
warehouse_all=pd.concat([barcoded,non_barcoded,wh_returns,barcoded_ga,non_barcoded_ga,barcoded_tepl,
non_barcoded_tepl],
sort=False,ignore_index=False)
warehouse_all[['entity-id', 'franchise-id', 'drug-id']]= \
warehouse_all[['entity-id','franchise-id','drug-id']].\
apply(pd.to_numeric, errors='ignore').astype('Int64')
warehouse_merge=pd.merge(warehouse_all,drug_info,how='left',on='drug-id')
warehouse_merge[['drug-type', 'goodaid-flag']]=\
warehouse_merge[['drug-type', 'goodaid-flag']].fillna('NA')
warehouse_merge=warehouse_merge.drop(['drug-id'],axis=1)
warehouse_merge[['quantity']]=warehouse_merge[['quantity']].astype(np.int64)
warehouse_merge=warehouse_merge.\
groupby(['entity-id', 'entity-name', 'entity-type','inventory-type',
'inventory-sub-type-1','inventory-sub-type-2',
'inventory-sub-type-3','drug-type', 'goodaid-flag']).sum()
warehouse_merge=warehouse_merge.reset_index()
warehouse_merge[['entity-id','franchise-id']]=warehouse_merge[['entity-id','franchise-id']].\
replace(0, np.nan)
warehouse_merge=warehouse_merge[['entity-id', 'entity-name', 'entity-type', 'drug-type',
'goodaid-flag',
'franchise-id', 'inventory-type', 'inventory-sub-type-1',
'inventory-sub-type-2', 'inventory-sub-type-3', 'quantity',
'value-without-tax', 'value-with-tax']]
system_inv = pd.concat([stores,to_be_dispatched,in_transit,return_query,warehouse_merge],
sort=False,ignore_index=True)
system_inv[['entity-id', 'franchise-id']]= system_inv[['entity-id','franchise-id']].\
apply(pd.to_numeric, errors='ignore').astype('Int64')
system_inv[['quantity','value-without-tax', 'value-with-tax']]=\
system_inv[['quantity','value-without-tax', 'value-with-tax']].fillna(0)
system_inv[['quantity']]=system_inv[['quantity']].astype(np.int64)
system_inv[['value-without-tax', 'value-with-tax']]=\
system_inv[['value-without-tax', 'value-with-tax']].astype(np.float64)
system_inv['snapshot-date'] = snapshot_date
created_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
system_inv['created-at']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
system_inv['updated-at']=datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
system_inv['created-by'] = 'etl-automation'
system_inv['updated-by'] = 'etl-automation'
#Truncate the Query
truncate_query = '''
delete from "prod2-generico"."system-inventory"
where date("snapshot-date") = '{snapshot_date}'
'''.format(snapshot_date=snapshot_date)
rs_db.execute(truncate_query)
system_inv.columns = [c.replace('_', '-') for c in system_inv.columns]
schema = "prod2-generico"
table_name = "system-inventory"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
s3.write_df_to_db(df=system_inv[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
status=True
if status==True:
script_status="Success"
else:
script_status="Failed"
email = Email()
email.send_email_file(subject=f"system_inventory {snapshot_date} {script_status}",
mail_body=f"system inventory job status: {script_status} ",
to_emails=email_to)
# closing the DB connection in the end
rs_db.close_connection()
mssql.close_connection()
mssql_ga.close_connection()
mssql_tepl.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/system_inventory/system-inventory.py | system-inventory.py |
import os
import sys
sys.path.append('../../../..')
# import json
import argparse
import pandas as pd
import numpy as np
import datetime
import math
# import traceback
# from datetime import date, timedelta
# from dateutil.relativedelta import relativedelta
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-n', '--number_of_stores', default="10", type=str, required=False)
parser.add_argument('-cn', '--city_name', default="Nagpur", type=str, required=False)
parser.add_argument('-cd', '--city_distance_from_mumbai_in_km', default="836", type=str, required=False)
parser.add_argument('-inp', '--increase_in_purchasing_power_compared_to_mumbai', default="86.99", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
number_of_stores = args.number_of_stores
city_name = args.city_name
city_distance_from_mumbai_in_km = args.city_distance_from_mumbai_in_km
increase_in_purchasing_power_compared_to_mumbai = args.increase_in_purchasing_power_compared_to_mumbai
number_of_stores = int(number_of_stores)
city_distance_from_mumbai_in_km = float(city_distance_from_mumbai_in_km)
increase_in_purchasing_power_compared_to_mumbai = float(increase_in_purchasing_power_compared_to_mumbai)
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
today_date = start_time.strftime('%Y-%m-%d')
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info(f"email_to: {email_to}")
logger.info(f"number_of_stores: {number_of_stores}")
logger.info(f"city_name: {city_name}")
logger.info(f"city_distance_from_mumbai_in_km: {city_distance_from_mumbai_in_km}")
logger.info(f"increase_in_purchasing_power_compared_to_mumbai: {increase_in_purchasing_power_compared_to_mumbai}")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
city_cost_parity = 1/((increase_in_purchasing_power_compared_to_mumbai+100)/100)
status = False
try:
# assumptions = pd.read_csv(r'D:\Distribution Network Model\assumptions.csv')
assumptions = pd.read_csv(s3.download_file_from_s3(file_name="dnm-cost-input/dnm-assumption-input.csv"))
# assumptions.set_index('variable',inplace=True)
# Calculating last 30 days sales figures
store_sales_query = f"""
select
s3."s-type" as "variable",
round(avg(s3.value), 0) as values,
round(avg(s3.quantity), 0) as quantity,
round(avg(s3."purchase-rate"), 0) as "purchase-rate",
-- round(avg(s3.ptr), 0) as ptr,
'type wise last 30 days sales per store avg' as "description"
from
(
select
s2."store-id",
s2."s-type",
sum(s2.value) as "value" ,
sum(s2.quantity) as "quantity",
sum(s2."purchase-rate") as "purchase-rate",
sum(s2.ptr) as ptr
from
(
select
s."store-id" ,
round(sum(s.quantity * s.rate), 0) as "value",
sum(quantity) as quantity,
sum(s."purchase-rate"*s.quantity) as "purchase-rate" ,
sum(s.ptr*s.quantity) as "ptr",
case
when s.company = 'GOODAID' then 'goodaid'
when s."type" = 'ethical' then 'ethical'
when s."type" = 'generic' then 'generic'
else 'others'
end as "s-type"
from
"prod2-generico"."prod2-generico".sales s
where
date(s."created-at") >= current_date -31
and date(s."created-at") <= current_date - 1
and date(s."store-opened-at") <= current_date - 60
and date(s."first-bill-date") <= current_date - 60
group by
s."store-id",
s."type" ,
s.company)s2
group by
s2."store-id",
s2."s-type")s3
group by
s3."s-type"
"""
store_sales = rs_db.get_df(store_sales_query)
logger.info('fetched store_sales')
return_ratio_query = f"""
select
avg(combine.sold_quantity) as "avg_sold_qty_per_store",
avg(combine.returned_quantity) as "avg_return_qty_per_store"
from
((
select
s."store-id" ,
sum(quantity) as sold_quantity
from
"prod2-generico"."prod2-generico".sales s
where
date(s."created-at") >= current_date -31
and date(s."created-at") <= current_date - 1
and date(s."store-opened-at") <= current_date - 60
and date(s."first-bill-date") <= current_date - 60
group by
s."store-id")s2
left join (
select
rtd."store-id" ,
sum(ri."returned-quantity") as returned_quantity
from
"prod2-generico"."prod2-generico"."return-items-1" ri
left join "prod2-generico"."prod2-generico"."returns-to-dc-1" rtd
on
ri."return-id" = rtd.id
left join "prod2-generico"."prod2-generico".stores s2
on
s2.id = rtd."store-id"
where
date(rtd."created-at") >= current_date -31
and date(rtd."created-at") <= current_date - 1
and date(s2."opened-at") <= current_date - 60
group by
rtd."store-id"
)ret
on
ret."store-id" = s2."store-id")combine
"""
return_ratio = rs_db.get_df(return_ratio_query)
logger.info('fetched return_ratio')
avg_store_sale = store_sales['values'].sum()
avg_quantity = store_sales['quantity'].sum()
avg_cogs = store_sales['purchase-rate'].sum()
logger.info(f'average sale of current stores - {avg_store_sale}')
logger.info(f'quantity per store as per current stores - {avg_quantity}')
logger.info(f'cogs per store as per current stores - {avg_cogs}')
# fofo_store_factor = float(assumptions.where(assumptions['variable'] == 'fofo_store_sales_as_percentage_of_total',axis=0).dropna()['value'])
#
# avg_fofo_store_sale = float(avg_store_sale)*float(fofo_store_factor)
total_model = [1,2,3,4]
summary_final = pd.DataFrame()
# total_model = [4]
for model in total_model:
if model== 1:
model_name = 'wh_to_store_direct'
elif model==2:
model_name = 'wh_to_store_via_dc'
elif model==3:
model_name = 'wh_to_store_via_dc_more_local_vendors'
elif model == 4:
model_name = 'wh_to_store_via_dc_plus_dc_storing_items'
result = pd.DataFrame(columns=['variable','values'])
i = 0
result.loc[i,'variable'] = 'Model Name'
result.loc[i,'values'] = model_name
result.loc[i,'description'] = 'Model'
i = i+1
result.loc[i,'variable'] = 'Number of Stores'
result.loc[i,'values'] = number_of_stores
result.loc[i,'description'] = 'input'
i = i+1
result.loc[i,'variable'] = 'city'
result.loc[i,'values'] = city_name
result.loc[i,'description'] = 'input'
i = i+1
result.loc[i,'variable'] = 'distance from Mumbai in KM'
result.loc[i,'values'] = city_distance_from_mumbai_in_km
result.loc[i,'description'] = 'input'
i = i+1
result.loc[i,'variable'] = 'increase in purcahsing power in the city compared to mumbai'
result.loc[i,'values'] = increase_in_purchasing_power_compared_to_mumbai
result.loc[i,'description'] = 'input'
i = i+1
result.loc[i,'variable'] = 'city cost parity'
result.loc[i,'values'] = round(city_cost_parity,2)
result.loc[i,'description'] = 'calculation based on purchasing power'
result.loc[i,'calculation'] = '1/((increase_in_purchasing_power_compared_to_mumbai+100)/100)'
i = i+1
result = pd.concat([result,store_sales[['variable', 'values', 'quantity', 'description']]],sort=True)
i = i + 4
result.reset_index(inplace=True,drop=True)
result.loc[i,'variable'] = 'revenue'
result.loc[i,'values'] = avg_store_sale*number_of_stores
result.loc[i,'description'] = f'monthly revenue for {number_of_stores} stores'
i = i+1
result.loc[i,'variable'] = 'cogs'
result.loc[i,'values'] = avg_cogs*number_of_stores
result.loc[i,'description'] = f'monthly cogs for {number_of_stores} stores'
i = i+1
result.loc[i,'variable'] = 'quantity'
result.loc[i,'values'] = avg_quantity*number_of_stores
result.loc[i,'description'] = f'monthly quantity sold in {number_of_stores} stores'
i = i+1
if model==1:
distribution = {'wh_ethical': 1,
'wh_goodaid':1,
'wh_generic':1,
'wh_others':1}
elif model==2:
distribution = {'wh_ethical': 0.7,
'wh_goodaid':1,
'wh_generic':0.9,
'wh_others':0.6}
elif model==3:
distribution = {'wh_ethical': 0.4,
'wh_goodaid':1,
'wh_generic':0.7,
'wh_others':0.3}
elif model==4:
distribution = {'wh_ethical': 0.4,
'wh_goodaid':1,
'wh_generic':0.7,
'wh_others':0.3}
result.loc[i,'variable'] = 'wh others'
result.loc[i,'values'] = distribution['wh_others']
result.loc[i,'description'] = f'value - % Quantity Transfer through WH for others per day for {number_of_stores} stores'
result.loc[i,'quantity'] =(distribution['wh_others']*float(result.where(result['variable'] == 'others',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "wh_share_for_wh_others*(others_flow_per_store_per_month/30)*number_of_stores"
i = i+1
result.loc[i,'variable'] = 'wh ethical'
result.loc[i,'values'] = distribution['wh_ethical']
result.loc[i,'description'] = f'value - % Quantity Transfer through WH for Ethical,quantity transfer per day for {number_of_stores} stores'
result.loc[i,'quantity'] =(distribution['wh_ethical']*float(result.where(result['variable'] == 'ethical',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i,'calculation'] = "wh_share_for_wh_ethical*(ethical_flow_per_store_per_month/30)*number_of_stores"
i = i+1
result.loc[i,'variable'] = 'wh goodaid'
result.loc[i,'values'] = distribution['wh_goodaid']
result.loc[i,'description'] = f'value - % Quantity Transfer through WH for goodaid per day for {number_of_stores} stores'
result.loc[i,'quantity'] =(distribution['wh_goodaid']*float(result.where(result['variable'] == 'goodaid',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "wh_share_for_wh_goodaid*(goodaid_flow_per_store_per_month/30)*number_of_stores"
i = i+1
result.loc[i,'variable'] = 'wh generic'
result.loc[i,'values'] = distribution['wh_generic']
result.loc[i,'description'] = f'value - % Quantity Transfer through WH for generic per day for {number_of_stores} stores'
result.loc[i,'quantity'] =(distribution['wh_generic']*float(result.where(result['variable'] == 'generic',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "wh_share_for_wh_generic*(generic_flow_per_store_per_month/30)*number_of_stores"
i = i+1
wh_throghput = result.where(result['variable'].isin(['wh ethical', 'wh generic', 'wh goodaid', 'wh others']),axis=0)['quantity'].dropna().sum()
result.loc[i,'variable'] = 'wh throghput'
result.loc[i,'quantity'] = wh_throghput
result.loc[i,'description'] = f'quantity flow through wh on daily basis for {number_of_stores} stores'
result.loc[i, 'calculation'] = "sum of all types flow"
i = i+1
wh_staff = assumptions[assumptions['type']=='wh_staff'][['variable', 'throghput', 'Salary_per_person', 'description']]
conditions = [
wh_staff['description'] == 'throughput-qty_per_person_per_day',
(wh_staff['description'] == 'per city')]
choices = [wh_throghput/wh_staff['throghput'], wh_staff['throghput']]
wh_staff['quantity'] = np.select(conditions, choices)
wh_staff['values'] = wh_staff['quantity']*wh_staff['Salary_per_person']
wh_staff['type'] = 'wh_variable'
result = pd.concat([result,wh_staff],sort=True)
i = i + 10
result.reset_index(inplace=True,drop=True)
wh_variable = assumptions[assumptions['type']=='wh_variable'][['variable', 'throghput', 'Salary_per_person', 'description']]
wh_variable.reset_index(inplace=True,drop=True)
wh_variable.loc[0,'values'] = wh_throghput*float(wh_variable.where(wh_variable['variable'] == 'wh_stationary',axis=0)['throghput'].dropna())
wh_variable.loc[1,'values'] = wh_staff['quantity'].sum()*float(wh_variable.where(wh_variable['variable'] == 'wh_staff_welfare',axis=0)['throghput'].dropna())
wh_variable.loc[2,'values'] = float(avg_cogs)*float(number_of_stores)*float(wh_variable.where(wh_variable['variable'] == 'wh_shrinkages',axis=0)['throghput'].dropna())
wh_variable['type'] = 'wh_variable'
result = pd.concat([result,wh_variable],sort=True)
i = i + 3
result.reset_index(inplace=True,drop=True)
wh_fixed = assumptions[assumptions['type']=='wh_fixed'][['variable', 'value' , 'Salary_per_person', 'description']]
wh_fixed.rename(columns = { 'value': 'throghput'}, inplace=True)
wh_fixed['description'] = 'throghput - total cost per month, value = marginal increase'
wh_fixed['values'] = 0
wh_fixed['type'] = 'wh_fixed'
result = pd.concat([result,wh_fixed],sort=True)
i = i + 5
result.reset_index(inplace=True,drop=True)
result.loc[i,'variable'] = 'dc others'
result.loc[i,'values'] = 1 - distribution['wh_others']
result.loc[i,'description'] = f'value - % Quantity Transfer directly through dc for others'
result.loc[i,'quantity'] =((1- distribution['wh_others'])*float(result.where(result['variable'] == 'others',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "dc_share_for_dc_others*(others_flow_per_store_per_month/30)*number_of_stores"
i = i+1
result.loc[i,'variable'] = 'dc ethical'
result.loc[i,'values'] = 1-distribution['wh_ethical']
result.loc[i,'description'] = f'value - % Quantity Transfer directly through dc for Ethical'
result.loc[i,'quantity'] =((1 - distribution['wh_ethical'])*float(result.where(result['variable'] == 'ethical',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "dc_share_for_dc_ethical*(ethical_flow_per_store_per_month/30)*number_of_stores"
i = i+1
result.loc[i,'variable'] = 'dc goodaid'
result.loc[i,'values'] = 1 - distribution['wh_goodaid']
result.loc[i,'description'] = f'value - % Quantity Transfer directly through dc for goodaid'
result.loc[i,'quantity'] =((1 - distribution['wh_goodaid'])*float(result.where(result['variable'] == 'goodaid',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "dc_share_for_dc_goodaid*(goodaid_flow_per_store_per_month/30)*number_of_stores"
i = i+1
result.loc[i,'variable'] = 'dc generic'
result.loc[i,'values'] = 1 - distribution['wh_generic']
result.loc[i,'description'] = f'value - % Quantity Transfer directly through dc for generic'
result.loc[i,'quantity'] =((1-distribution['wh_generic'])*float(result.where(result['variable'] == 'generic',axis=0)['quantity'].dropna())/30)*number_of_stores
result.loc[i, 'calculation'] = "dc_share_for_dc_generic*(generic_flow_per_store_per_month/30)*number_of_stores"
i = i+1
dc_throghput = result.where(result['variable'].isin(['dc ethical', 'dc generic', 'dc goodaid', 'dc others']),axis=0)['quantity'].dropna().sum()
result.loc[i,'variable'] = 'dc throghput'
result.loc[i,'quantity'] = dc_throghput
result.loc[i,'description'] = f'quantity flow through dc on daily basis'
i = i+1
if model ==4:
result.loc[i,'variable'] = 'dc holding inventory flag'
result.loc[i,'values'] = 1
result.loc[i,'description'] = 'if 1 then yes, if 0 then no'
result.loc[i,'calculation'] = 'model dependent'
i = i+1
else:
result.loc[i,'variable'] = 'dc holding inventory flag'
result.loc[i,'values'] = 0
result.loc[i,'description'] = 'if 1 then yes, if 0 then no'
result.loc[i,'calculation'] = 'model dependent'
i = i+1
if model ==4:
dc_holding_inventory_for_n_days = float(assumptions.where(assumptions['variable'] == 'inventory_holding_for_n_days',axis=0)['value'].dropna())
else:
dc_holding_inventory_for_n_days = 0
result.loc[i, 'variable'] = 'dc holding inventory for n days'
result.loc[i, 'values'] = dc_holding_inventory_for_n_days
result.loc[i, 'description'] = 'value - number of days, Input'
result.loc[i, 'calculation'] = 'input'
i = i + 1
result.loc[i,'variable'] = 'cogs per quantity'
result.loc[i,'values'] = avg_cogs/avg_quantity
result.loc[i,'description'] = f'avg cogs per quantity'
result.loc[i, 'calculation'] = 'avg_cogs/avg_quantity'
i = i+1
result.loc[i, 'variable'] = 'dc inventory holding'
result.loc[i, 'quantity'] = dc_holding_inventory_for_n_days*dc_throghput
dc_holding_value = float(avg_cogs/avg_quantity)*dc_holding_inventory_for_n_days*dc_throghput
result.loc[i, 'values'] = dc_holding_value
result.loc[i, 'description'] = 'inventory holding per day'
result.loc[i, 'calculation'] = 'cogs per quantity*dc_holding_inventory_for_n_days*dc_throghput'
i = i + 1
result.loc[i, 'variable'] = 'inventory carrying cost'
result.loc[i, 'Salary_per_person'] = 12
result.loc[i, 'values'] = dc_holding_value*12/1200
result.loc[i, 'description'] = 'value - rs per month, Salary_per_person- interest per annum'
result.loc[i, 'calculation'] = 'dc_holding_value * interest per annum/1200'
result.loc[i,'type'] = 'dc_variable'
i = i + 1
def percentage_increase_in_dc_fixed_cost_due_to_inv_holding(quantity):
return (quantity/200000)*100
result.loc[i, 'variable'] = 'dc_fixed_cost_increase_inv_holding'
result.loc[i, 'values'] = percentage_increase_in_dc_fixed_cost_due_to_inv_holding(dc_holding_inventory_for_n_days*dc_throghput)
result.loc[i, 'description'] = 'percentage increase_in_dc_fixed_cost_due_to_inv_holding'
result.loc[i, 'calculation'] = 'Dc holding quantity/200000'
i = i + 1
dc_staff = assumptions[assumptions['type']=='dc_staff'][['variable', 'Salary_per_person', 'description', 'throghput']]
conditions = [
dc_staff['variable'].isin(['dc_barcoder']),
dc_staff['variable'].isin(['dc_purchaser','dc_inward_team']),
dc_staff['variable'].isin(['dc_returns_team']),
dc_staff['variable'].isin(['dc_manager','dc_inventory_manager'])]
choices = [(dc_throghput/dc_staff['throghput']),
((dc_throghput/4)/dc_staff['throghput']),
((dc_throghput/10)/dc_staff['throghput']),
dc_staff['throghput']]
dc_staff['quantity'] = np.select(conditions, choices)
conditions = [dc_staff['quantity']<=1,dc_staff['quantity']>1]
choices = [1,dc_staff['quantity'].apply(np.round)]
dc_staff['quantity'] = dc_staff['quantity'].apply(np.ceil)
dc_staff['values'] = dc_staff['quantity']*dc_staff['Salary_per_person']
dc_staff['type'] = 'dc_variable'
dc_staff.reset_index(inplace=True,drop = True)
dc_employees = dc_staff['quantity'].sum()
dc_staff.loc[5,'quantity'] = dc_employees
dc_staff.loc[5,'values'] = dc_employees*dc_staff.loc[5,'throghput']*30
if dc_throghput==0:
dc_staff['values']=0
dc_staff['quantity']=0
result = pd.concat([result,dc_staff],sort=True)
i = i + 7
result.reset_index(inplace=True,drop=True)
dc_fixed = assumptions[assumptions['type']=='dc_fixed'][['variable', 'value' , 'Salary_per_person', 'description']]
dc_fixed.rename(columns = { 'value': 'throghput'}, inplace=True)
dc_fixed['description'] = f'value = final cost in {city_name},throghput - total cost per month in mumbai,Salary_per_person - according to cities cost parity,quantity - percentage_impact of inventory holding'
dc_fixed['Salary_per_person'] = dc_fixed['throghput']*city_cost_parity
dc_fixed['quantity'] = percentage_increase_in_dc_fixed_cost_due_to_inv_holding(dc_holding_inventory_for_n_days*dc_throghput)
dc_fixed['values'] = dc_fixed['throghput']*city_cost_parity*(100 +percentage_increase_in_dc_fixed_cost_due_to_inv_holding(dc_holding_inventory_for_n_days*dc_throghput) )/100
dc_fixed['type'] = 'dc_fixed'
if dc_throghput==0:
dc_fixed['values']=0
dc_fixed['quantity']=0
result = pd.concat([result,dc_fixed],sort=True)
i = i + 7
result.reset_index(inplace=True,drop=True)
if dc_throghput <= 0:
number_of_biker = number_of_stores / 5
else:
number_of_biker = number_of_stores / 10
result.loc[i, 'variable'] = f'delivery assosiate required for {city_name}'
result.loc[i, 'quantity'] = number_of_biker
result.loc[i, 'description'] = 'for intercity transport'
result.loc[i, 'Salary_per_person'] = 15000
result.loc[i, 'values'] = number_of_biker * 15000
result.loc[i, 'type'] = 'logistics'
result.loc[i, 'calculation'] = 'if dc available 1 biker per 10 stores, if no dc 1 biker per 5 stores'
i = i + 1
result.loc[i,'variable'] = 'kg_per_quantity'
result.loc[i,'values'] = float(assumptions.where(assumptions['variable'] == 'kg_per_quantity',axis=0)['value'].dropna())
result.loc[i,'description'] = 'input'
i = i+1
result.loc[i,'variable'] = 'flow_through_wh_in_kg'
result.loc[i,'values'] = wh_throghput*float(assumptions.where(assumptions['variable'] == 'kg_per_quantity',axis=0)['value'].dropna())
result.loc[i,'description'] = 'on daily basis'
i = i+1
def cost_of_transport(km):
if km<=100:
return 30
elif km<=200:
return 30
elif km<=300:
return 30
elif km <= 400:
return 30
elif km <= 500:
return 35
elif km <= 1000:
return 40
elif km<= 2000:
return 45
else:
return 50
result.loc[i,'variable'] = 'cost per kg'
result.loc[i,'values'] = cost_of_transport(city_distance_from_mumbai_in_km)
result.loc[i,'description'] = 'cost assumed based on distance'
i = i+1
if dc_throghput<= 0:
result.loc[i,'variable'] = 'extra cost based on delivery convinience'
result.loc[i,'values'] =2
result.loc[i,'description'] = 'If DC available single localtion of delivery otherwise multiple'
i = i+1
cost_of_transport_ = cost_of_transport(city_distance_from_mumbai_in_km)+2
else:
result.loc[i,'variable'] = 'extra cost based on delivery convinience'
result.loc[i,'values'] =0
result.loc[i,'description'] = 'If DC available single localtion of delivery otherwise multiple'
i = i+1
cost_of_transport_ = cost_of_transport(city_distance_from_mumbai_in_km) + 0
result.loc[i,'variable'] = 'cost of transport till courier destination'
result.loc[i,'values'] = wh_throghput*float(assumptions.where(assumptions['variable'] == 'kg_per_quantity',axis=0)['value'].dropna())*cost_of_transport_*30
result.loc[i,'description'] = 'on monthly basis in Rs'
result.loc[i,'type'] = 'logistics_1'
i = i+1
result.loc[i,'variable'] = 'sold vs returned quatity'
result.loc[i,'quantity'] = return_ratio.loc[0,'avg_sold_qty_per_store']
result.loc[i,'throghput'] = return_ratio.loc[0,'avg_return_qty_per_store']
return_ratio_value = return_ratio.loc[0,'avg_return_qty_per_store']/return_ratio.loc[0,'avg_sold_qty_per_store']
result.loc[i,'values'] = return_ratio_value
result.loc[i,'description'] = 'quantity - avg_sold_qty_per_store, throughput - avg_return_qty_per_store '
i = i+1
result.loc[i,'variable'] = 'wh return quantity'
result.loc[i,'quantity'] = wh_throghput*return_ratio_value
result.loc[i,'description'] = 'quantity received from wh will be returned to wh'
result.loc[i,'calculation'] = 'wh_throghput*return_ratio_value'
i = i+1
result.loc[i,'variable'] = 'flow_through_store_to_wh_in_kg'
result.loc[i,'values'] = wh_throghput*return_ratio_value*float(assumptions.where(assumptions['variable'] == 'kg_per_quantity',axis=0)['value'].dropna())
result.loc[i,'description'] = 'on daily basis'
result.loc[i,'calculation'] = 'wh_throghput*return_ratio_value*kg_per_quantity'
i = i+1
def additional_cost_while_returning(km):
if km<=10:
return 150
elif km<=20:
return 300
else:
return 450
additiona_cost_while_returns = additional_cost_while_returning(float(result.where(result['variable'] == 'flow_through_store_to_wh_in_kg', axis=0)['values'].dropna()))
result.loc[i,'variable'] = 'flat additional cost for wh returns'
result.loc[i,'values'] = additiona_cost_while_returns
result.loc[i,'description'] = 'on daily basis'
result.loc[i,'calculation'] = 'if <10 Kg per day 150, 20 - 300, else - 450'
i = i+1
result.loc[i, 'variable'] = 'cost of transport of wh returns'
result.loc[i, 'values'] = (wh_throghput*return_ratio_value * float(
assumptions.where(assumptions['variable'] == 'kg_per_quantity', axis=0)[
'value'].dropna()) * cost_of_transport_ + additiona_cost_while_returns)* 30
result.loc[i, 'description'] = 'on monthly basis in Rs'
result.loc[i, 'type'] = 'logistics_1'
result.loc[i,'calculation'] = '(flow_through_store_to_wh_in_kg*cost_of_transport + flat_rate)*30'
i = i + 1
tempo_rent = float(assumptions.where(assumptions['variable'] == 'tempo_rent', axis=0)['value'].dropna())
tempo_mileage = float(assumptions.where(assumptions['variable'] == 'tempo_mileage', axis=0)['value'].dropna())
petrol_cost = float(assumptions.where(assumptions['variable'] == 'petrol_cost', axis=0)['value'].dropna())
quantity_per_tempo = float(assumptions.where(assumptions['variable'] == 'quantity_per_tempo', axis=0)['value'].dropna())
result.loc[i,'variable'] = 'tempo travel info'
result.loc[i,'Salary_per_person'] =tempo_rent
result.loc[i, 'throghput'] = tempo_mileage
result.loc[i, 'quantity'] = quantity_per_tempo
result.loc[i, 'values'] = petrol_cost
result.loc[i,'description'] = 'values-petrol_cost, quantity=quantity_per_tempo, throghput=tempo_mileage, Salary_per_person=tempo_rent'
result.loc[i,'calculation'] = 'input'
i = i+1
if model==4:
tempo_trips_per_day = math.ceil(wh_throghput*dc_holding_inventory_for_n_days/quantity_per_tempo)/dc_holding_inventory_for_n_days
else:
tempo_trips_per_day = math.ceil(wh_throghput/ quantity_per_tempo)
result.loc[i,'variable'] = 'tempo trips per day'
result.loc[i,'values'] =tempo_trips_per_day
result.loc[i, 'quantity'] = (city_distance_from_mumbai_in_km/tempo_mileage*2)
result.loc[i,'description'] = 'values - trips per day, quantity - petrol used per trip'
result.loc[i,'calculation'] = 'if no inv holding at dc - ceil(dc_throghput/ quantity_per_tempo), if dc - ceil(dc_throghput*dc_holding_inventory_for_n_days/quantity_per_tempo)/dc_holding_inventory_for_n_days '
i = i+1
result.loc[i,'variable'] = 'tempo trips cost per month'
result.loc[i, 'quantity'] = tempo_trips_per_day*30
result.loc[i,'values'] =(tempo_trips_per_day)*30*(tempo_rent+(city_distance_from_mumbai_in_km/tempo_mileage*2)*petrol_cost)
result.loc[i, 'throghput'] = (tempo_trips_per_day)*30*(tempo_rent)
result.loc[i, 'Salary_per_person'] = (city_distance_from_mumbai_in_km/tempo_mileage*2)*petrol_cost
result.loc[i,'description'] = 'per month basis'
result.loc[i,'type'] = 'logistics_2'
result.loc[i,'calculation'] = 'quantity- trips,throghput = flat charge,Salary_per_person = Petrol charge per trip'
i = i+1
logistic_comparison = result[result['type'].isin(['logistics_1','logistics_2'])].groupby(['type']).agg({'values': [np.sum]}).reset_index()
logistic_comparison.columns = ["-".join(x) for x in logistic_comparison.columns.ravel()]
if logistic_comparison.loc[0,'values-sum']>=logistic_comparison.loc[1,'values-sum']:
output_logistic = logistic_comparison.loc[1,'type-']
logistic_value = logistic_comparison.loc[1,'values-sum']
else:
output_logistic = logistic_comparison.loc[0,'type-']
logistic_value = logistic_comparison.loc[0,'values-sum']
result.loc[i,'variable'] = 'best-logistic'
result.loc[i, 'quantity'] = output_logistic
result.loc[i,'values'] =logistic_value
result.loc[i, 'throghput'] = f"logistic_1 - {round(logistic_comparison.loc[0,'values-sum'])}"
result.loc[i, 'Salary_per_person'] = f"logistic_2 - {round(logistic_comparison.loc[1,'values-sum'])}"
result.loc[i,'description'] = 'per month basis'
result.loc[i,'type'] = 'logistics'
result.loc[i,'calculation'] = 'min of logistics_1 & 2'
i = i+1
cols_to_move = ['variable', 'values', 'quantity', 'Salary_per_person', 'throghput', 'type', 'description','calculation']
result = result[cols_to_move + [col for col in result.columns
if col not in cols_to_move]]
summary = result[(result['type'].notna())&(~result['type'].isin(['logistics_1','logistics_2']))].groupby(['type']).agg({'values': [np.sum]}).reset_index()
summary.columns = ["-".join(x) for x in summary.columns.ravel()]
summary['model-name'] = model_name
summary['model-number'] = model
summary['desciption'] = f'logistic_choosed - {output_logistic}'
summary.rename(columns = {'type-':'type',
'values-sum':'values'}, inplace = True)
cols_to_move = ['model-number','model-name', 'type', 'values']
summary = summary[cols_to_move + [col for col in summary.columns
if col not in cols_to_move]]
summary_final = summary_final.append(summary)
if model==1:
model1 = result.copy(deep = True)
elif model ==2:
model2 = result.copy(deep = True)
elif model ==3:
model3 = result.copy(deep = True)
elif model ==4:
model4 = result.copy(deep = True)
model_cost=summary_final.groupby(['model-name','model-number']).agg({'values': [np.sum]}).reset_index()
model_cost.columns = ["-".join(x) for x in model_cost.columns.ravel()]
model_cost.rename(columns={'model-name-': 'model-name',
'model-number-': 'model-number',
'values-sum': 'values'}, inplace=True)
model_cost['values'] = round(model_cost['values'],0)
model_cost['values'] = model_cost['values'].astype(int)
model_cost['revenue'] = int(round(avg_store_sale*number_of_stores,0))
model_cost['revenue-percentage'] = round((model_cost['values'].astype(float)*100)/float(avg_store_sale*number_of_stores),2)
cols_to_move = [ 'model-number', 'model-name', 'values']
model_cost = model_cost[cols_to_move + [col for col in model_cost.columns
if col not in cols_to_move]]
# model_cost.idxmax(axis = 0, skipna = True)
# model1.to_csv('model1.csv')
# model2.to_csv('model2.csv')
# model3.to_csv('model3.csv')
# model4.to_csv('model4.csv')
# summary_final.to_csv('summary.csv')
# model_1 = s3.save_df_to_s3(df=model1, file_name=f'{city_name}_model1.csv')
# model_2 = s3.save_df_to_s3(df=model2, file_name=f'{city_name}_model2.csv')
# model_3 = s3.save_df_to_s3(df=model3, file_name=f'{city_name}_model3.csv')
# model_4 = s3.save_df_to_s3(df=model4, file_name=f'{city_name}_model4.csv')
# summary_1 = s3.save_df_to_s3(df=summary_final, file_name=f'{city_name}_summary.csv')
# model_cost_ = s3.save_df_to_s3(df=model_cost, file_name=f'{city_name}_total.csv')
# Formatting Excel
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
if not os.path.exists(path):
os.mkdir(path, 0o777)
file_name = f"DNM_{city_name}_{number_of_stores}_stores.xlsx"
local_file_full_path = path + file_name
# writing in a Excel
with pd.ExcelWriter(local_file_full_path) as writer:
model_cost.to_excel(writer, sheet_name='total', index=False)
summary_final.to_excel(writer, sheet_name='model_summay', index=False)
model1.to_excel(writer, sheet_name='model_1', index=False)
model2.to_excel(writer, sheet_name='model_2', index=False)
model3.to_excel(writer, sheet_name='model_3', index=False)
model4.to_excel(writer, sheet_name='model_4', index=False)
assumptions.to_excel(writer, sheet_name='assumptions', index=False)
status = True
except Exception as error:
logger.exception(error)
logger.info(f'code failed in between')
status = False
if status is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
if status=="Success":
email.send_email_file(subject=f"Distributor Network Model - Costs Analysis",
mail_body=f"Dear User,\n"
f"\n"
f"Distributor Network Model- Costs Analysis\n"
f"city - {city_name}\n"
f"number of stores - {number_of_stores}\n"
f"expected revenue - {int(round(avg_store_sale*number_of_stores,0))}\n"
f"Recommendation - {model_cost.loc[model_cost['values'].idxmin(),'model-name']}\n"
f"\n"
f"Summary\n"
f"{model_cost[['model-number','model-name','values','revenue-percentage']].to_string(col_space=25)}\n"
f"\n"
f"Regards,\n"
f"Data Team\n",
to_emails=email_to, file_paths=[local_file_full_path])
else:
email.send_email_file(subject=f"{env}-{status}-dnm-cost-analysis",
mail_body=f"Dear User,\n"
f"Distributor Network Model- Costs Analysis - job is failed, Please connect with Data team to resolve this issue"
f"\n"
f"Regards,\n"
f"Data Team\n",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/dnm_costs_analysis/dnm-cost-analysis.py | dnm-cost-analysis.py |
import argparse
import os
import sys
from datetime import datetime
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MongoDB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-bs', '--batch_size', default=1000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
batch_size = args.batch_size
os.environ['env'] = env
s3 = S3()
rs_db = DB(read_only=False)
rs_db.open_connection()
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
schema = 'prod2-generico' if env == 'prod' else 'test-generico'
table_name = 'exotelincomingcalllogs'
database = mg_client["generico-crm"]
collection = database["exotelIncomingCallLogs"]
logger = get_logger()
def max_last_date():
query = f""" select max(updatedat) as max_date from "{schema}"."{table_name}" """
df = pd.read_sql_query(query, rs_db.connection)
if df[b'max_date'][0] is None:
return "2020-01-01 00:00:00.000000"
else:
return str(df[b'max_date'][0])
max_update_date = max_last_date()
logger.info(f"max update-at date: {max_update_date}")
query = {}
# query["CallTo"] = {u"$eq": u"02248900429"}
# query["RecordingUrl"] = {u"$ne": u"null"}
query["updatedAt"] = {u"$gt": datetime.strptime(f"{max_update_date}", "%Y-%m-%d %H:%M:%S.%f")}
sort = [(u"updatedAt", 1)]
skip = 0
df = pd.DataFrame()
cursor = collection.find(query, sort=sort, skip=0, limit=batch_size)
temp_df = pd.DataFrame(data=list(cursor))
df = temp_df.copy()
while not temp_df.empty:
skip += batch_size
logger.info(f"skip: {skip}")
cursor = collection.find(query, sort=sort, skip=skip, limit=batch_size)
temp_df = pd.DataFrame(data=list(cursor))
df = pd.concat([df, temp_df])
if not df.empty:
df.rename(columns={"_id": "oid__id"}, inplace=True)
df.is_active = df.is_active.replace({True: 1, False: 0})
df.is_exist_in_db = df.is_exist_in_db.replace({True: 1, False: 0}).fillna(0)
df.columns = [c.lower() for c in df.columns]
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
"""
Fill the absent columns with default data in the dataframe
"""
for i, c in table_info.iterrows():
default_value = None
if c['data_type'] == 'character varying':
default_value = ""
if c['data_type'] == 'timestamp without time zone':
default_value = datetime.now(tz=None)
if c['data_type'] == 'integer':
default_value = 0
df[c['column_name']] = df.get(c['column_name'], default_value)
df['is_exist_in_db'] = df['is_exist_in_db'].astype(int)
logger.info(f"Total {len(df)} new records found.")
logger.info(f"df.head: {df.head()}")
s3.write_df_to_db(df=df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
else:
logger.info("New data NOT found.")
mg_db.close()
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/exotel_call_log_sync/exotel_call_log_sync.py | exotel_call_log_sync.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "zeno-order-logs"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"prescription-needed",
"prescreptions-created",
"completed-at",
"pso-created-by",
"pso-created-at",
"follow-up-by",
"follow-up-at",
"follow-up-doc-by",
"follow-up-doc-at",
"presc-created-by",
"presc-created-at",
"confirm-need-presc-by",
"confirm-need-presc-at",
"out-del-by",
"out-del-at"
)
select
zoal."zeno-order-id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
count(case when zoal."target-state" = 'CONFIRM-BUT-NEED-PRESCRIPTION' then zoal.id end) as "prescription-needed",
count(case when zoal."target-state" = 'PRESCRIPTION-CREATED' then zoal.id end) as "prescreptions-created",
max(case when zoal."target-state" = 'DELIVERED' then zoal."created-at" end) as "completed-at",
max(case when "target-state" = 'CREATE-PATIENT-STORE-ORDER' and r = 1 then "action-executed-by" end) as "pso-created-by",
max(case when "target-state" = 'CREATE-PATIENT-STORE-ORDER' and r = 1 then "created-at" end) as "pso-created-at",
max(case when "target-state" = 'FOLLOW-UP-WITHIN-TAT' and r = 1 then "action-executed-by" end) as "follow-up-by",
max(case when "target-state" = 'FOLLOW-UP-WITHIN-TAT' and r = 1 then "created-at" end) as "follow-up-at",
max(case when "target-state" = 'FOLLOW-UP-DOCTOR-WITHIN-TAT' and r = 1 then "action-executed-by" end) as "follow-up-doc-by",
max(case when "target-state" = 'FOLLOW-UP-DOCTOR-WITHIN-TAT' and r = 1 then "created-at" end) as "follow-up-doc-at",
max(case when "target-state" = 'PRESCRIPTION-CREATED' and r = 1 then "action-executed-by" end) as "presc-created-by",
max(case when "target-state" = 'PRESCRIPTION-CREATED' and r = 1 then "created-at" end) as "presc-created-at",
max(case when "target-state" = 'CONFIRM-BUT-NEED-PRESCRIPTION' and r = 1 then "action-executed-by" end) as "confirm-need-presc-by",
max(case when "target-state" = 'CONFIRM-BUT-NEED-PRESCRIPTION' and r = 1 then "created-at" end) as "confirm-need-presc-at",
max(case when "target-state" = 'OUT-FOR-DELIVERY' and r = 1 then "action-executed-by" end) as "out-del-by",
max(case when "target-state" = 'OUT-FOR-DELIVERY' and r = 1 then "created-at" end) as "out-del-at"
from
(
select
* ,
rank() over (partition by "zeno-order-id",
"target-state"
order by
"created-at" desc ) r
from
"prod2-generico"."zeno-order-action-log" zoal
where
"action-status" = 'SUCCESS') zoal
group by
zoal."zeno-order-id";
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."zeno-order-logs";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/zeno-order-log/zeno-order-log.py | zeno-order-log.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "home-delivery-metadata"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"patient-id",
"doctor-id",
"store-id",
"store-name",
"store-lat",
"store-lon",
"drug-id",
"drug-name",
"type",
"category",
"composition",
"composition-master-id",
"company",
"company-id",
"requested-quantity",
"inventory-quantity",
"order-number",
"bill-id",
"billed-at",
"order-source",
"order-type",
"status",
"order-mode",
"pso-created-at",
"year-pso-created-at",
"month-pso-created-at",
"payment-type",
"slot-id",
"turnaround-time",
"patient-request-id",
"pr-flag",
"abo",
"line-manager",
"store-manager",
"city",
"store-b2b",
"delivery-status",
"assigned-to",
"assigned-to-id",
"dispatcher",
"receiver",
"delivered-at",
"completed-at",
"no-of-deliveries",
"vendor-bill-number",
"first-slot",
"last-slot",
"total-slot-changes",
"slot-type",
"per-slot-capacity",
"scheduled-at",
"zeno-order-id",
"delivery-type",
"first-slot-date",
"last-slot-date",
"store-patient-distance" ,
"delivery-cost",
"assigned-at"
)
select
pso."id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
pso."patient-id" as "patient-id" ,
pso."doctor-id" as "doctor-id",
pso."store-id" as "store-id",
s."name" as "store-name",
s."lat" as "store-lat",
s."lon" as "store-lon",
pso."drug-id" as "drug-id",
pso."drug-name" as "drug-name",
d."type" ,
d."category" ,
d."composition" ,
d."composition-master-id" as "composition-master-id",
d."company" ,
d."company-id" as "company-id",
pso."requested-quantity" as "requested-quantity",
pso."inventory-quantity" as "inventory-quantity",
pso."order-number" as "order-number",
pso."bill-id" as "bill-id" ,
b."created-at" as "billed-at",
pso."order-source" as "order-source",
pso."order-type" as "order-type" ,
pso."status",
pso."order-mode" as "order-mode",
pso."created-at" as "pso-created-at",
extract(year
from
pso."created-at") as "year-pso-created-at",
extract(month
from
pso."created-at") as "month-pso-created-at",
pso."payment-type" as "payment-type",
pso."slot-id" as "slot-id" ,
pso."turnaround-time" as "turnaround-time" ,
pso."patient-request-id" as "patient-request-id",
(case
when pso."patient-request-id" is null then false
else true
end) as "pr-flag",
sm."abo",
sm."line-manager",
sm."store-manager",
sm."city",
sm."store-b2b",
dt."delivery-status",
dt."assigned-to",
dt."assigned-to-id" ,
dt."dispatcher",
dt."receiver",
dt."delivered-at",
dt."completed-at",
dt."no-of-deliveries",
dt."vendor-bill-number",
sc."first-slot",
sc."last-slot",
sc."total-slot-changes",
ss."slot-type" as "slot-type" ,
ss."per-slot-capacity" as "per-slot-capacity",
dt."scheduled-at",
pso."zeno-order-id",
ss."type" as "delivery-type",
sc."first-slot-date",
sc."last-slot-date",
pso."store-patient-distance" ,
pso."delivery-cost" ,
dt."assigned-at"
from
"prod2-generico"."patients-store-orders" pso
left join "prod2-generico"."bills-1" b on
b."id" = pso."bill-id"
left join "prod2-generico"."stores" s on
s."id" = pso."store-id"
left join "prod2-generico".patients p2 on
p2."id" = pso."patient-id"
left join "prod2-generico"."drugs" d on
d."id" = pso."drug-id"
left join "prod2-generico"."stores-master" sm on
sm."id" = pso."store-id"
left join "prod2-generico"."delivery-tracking-metadata" dt on
pso.id = dt."id"
left join "prod2-generico"."pso-slot-changes" sc on
pso.id = sc."id"
left join "prod2-generico"."store-slots" ss on
pso."slot-id" = ss.id
where
pso."order-type" = 'delivery';
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."home-delivery-metadata";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
print(f"env: {env}")
os.environ['env'] = env
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/home-delivery/home-delivery.py | home-delivery.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "retention-master"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" ( "id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"patient-id",
"store-id",
"doctor-id",
"promo-code-id",
"promo-discount",
"payment-method",
"redeemed-points",
"bill-date",
"bill-year",
"bill-month",
"bill-day",
"bill-month-diff",
"doctor-name",
"total-spend",
"spend-generic",
"spend-goodaid",
"spend-ethical",
"spend-others-type",
"num-drugs",
"quantity-generic",
"quantity-goodaid",
"quantity-ethical",
"quantity-chronic",
"quantity-repeatable",
"quantity-others-type",
"is-generic",
"is-goodaid",
"is-ethical",
"is-chronic",
"is-repeatable",
"is-others-type",
"is-rx",
"total-quantity",
"zippin-serial",
"total-mrp-value",
"pr-flag",
"hd-flag",
"ecom-flag",
"promo-flag",
"digital-payment-flag",
"total-purchase-rate-value",
"total-ptr-value",
"month-bill-rank",
"min-bill-date-in-month",
"store-id-month",
"normalized-date",
"total-cashback",
"zenocare-amount",
"p-first-bill-date",
"p-last-bill-date",
"p-average-bill-value",
"p-total-quantity",
"p-quantity-generic",
"p-quantity-chronic",
"p-quantity-ethical",
"p-quantity-repeatable",
"p-quantity-goodaid",
"p-quantity-rx",
"p-quantity-others-type",
"p-number-of-bills",
"p-hd-bills",
"p-is-repeatable",
"p-is-generic",
"p-is-chronic",
"p-is-goodaid",
"p-is-ethical",
"p-is-rx",
"p-is-others-type",
"p-hd-flag",
"p-ecom-flag",
"p-pr-flag",
"p-total-spend",
"p-spend-generic",
"p-promo-min-bill-date",
"p-hd-min-bill-date",
"p-ecom-min-bill-date",
"p-pr-min-bill-date",
"p-generic-min-bill-date",
"p-goodaid-min-bill-date",
"p-ethical-min-bill-date",
"p-chronic-min-bill-date",
"p-repeatable-min-bill-date",
"p-others-type-min-bill-date",
"p-rx-min-bill-date",
"p-digital-payment-min-bill-date",
"p-first-bill-id",
"p-last-bill-id",
"p-digital-payment-flag",
"p-total-mrp-value",
"is-nps",
"latest-nps-rating",
"latest-nps-rating-comment",
"latest-nps-rating-store-id",
"latest-nps-rating-store-name",
"latest-nps-rating-date",
"referred-count",
"primary-store-id",
"p-num-drugs",
"p-primary-disease",
"recency-customer-days",
"system-age-days",
"avg-purchase-interval",
"std-purchase-interval",
"quantity-generic-pc",
"quantity-chronic-pc",
"quantity-ethical-pc",
"quantity-repeatable-pc",
"quantity-goodaid-pc",
"quantity-others-type-pc",
"spend-generic-pc",
"previous-bill-date",
"previous-store-id",
"value-segment-calculation-date",
"value-segment",
"behaviour-segment-calculation-date",
"behaviour-segment",
"promo-code",
"promo-code-type",
"promo-eligibility",
"promo-discount-type",
"promo-min-purchase",
"campaign-id",
"campaign-name",
"store",
"line-manager",
"abo",
"store-manager",
"store-type",
"store-opened-at",
"date-diff",
"month-diff",
"latitude",
"longitude",
"store-contact-1",
"store-contact-2",
"store-address",
"store-city",
"store-b2b",
"line",
"landmark",
"store-group-id",
"franchisee-id",
"franchisee-name",
"old-new",
"bill-quarter",
"previous-normalized-date",
"cum-spend",
"cum-nob",
"cum-abv",
"prev-cum-spend",
"prev-cum-nob",
"prev-cum-abv",
"acquired" ,
"old-new-static" ,
"day-diff-previous-bill" ,
"resurrected-flag",
"crm-flag",
"p-crm-flag",
"loyal-customer-flag")
select
bm."id",
bm."created-by" ,
bm."created-at" ,
'etl-automation',
bm."updated-at" ,
bm."patient-id",
bm."store-id",
bm."doctor-id",
bm."promo-code-id",
bm."promo-discount",
bm."payment-method",
bm."redeemed-points",
bm."bill-date",
bm."bill-year",
bm."bill-month",
bm."bill-day",
bm."month-diff" as "bill-month-diff",
bm."doctor-name",
bm."total-spend",
bm."spend-generic",
bm."spend-goodaid",
bm."spend-ethical",
bm."spend-others-type",
bm."num-drugs",
bm."quantity-generic",
bm."quantity-goodaid",
bm."quantity-ethical",
bm."quantity-chronic",
bm."quantity-repeatable",
bm."quantity-others-type",
bm."is-generic",
bm."is-goodaid",
bm."is-ethical",
bm."is-chronic",
bm."is-repeatable",
bm."is-others-type",
bm."is-rx",
bm."total-quantity",
bm."zippin-serial",
bm."total-mrp-value",
bm."pr-flag",
bm."hd-flag",
bm."ecom-flag",
bm."promo-flag",
bm."digital-payment-flag",
bm."total-purchase-rate-value",
bm."total-ptr-value",
bm."month-bill-rank",
bm."min-bill-date-in-month",
bm."store-id-month",
bm."normalized-date",
bm."total-cashback",
bm."zenocare-amount",
pm."first-bill-date" as "p-first-bill-date",
pm."last-bill-date" as "p-last-bill-date",
pm."average-bill-value" as "p-average-bill-value",
pm."total-quantity" as "p-total-quantity",
pm."quantity-generic" as "p-quantity-generic",
pm."quantity-chronic" as "p-quantity-chronic",
pm."quantity-ethical" as "p-quantity-ethical",
pm."quantity-repeatable" as "p-quantity-repeatable",
pm."quantity-goodaid" as "p-quantity-goodaid",
pm."quantity-rx" as "p-quantity-rx",
pm."quantity-others-type" as "p-quantity-others-type",
pm."number-of-bills" as "p-number-of-bills",
pm."hd-bills" as "p-hd-bills",
pm."is-repeatable" as "p-is-repeatable",
pm."is-generic" as "p-is-generic",
pm."is-chronic" as "p-is-chronic",
pm."is-goodaid" as "p-is-goodaid",
pm."is-ethical" as "p-is-ethical",
pm."is-rx" as "p-is-rx",
pm."is-others-type" as "p-is-others-type",
pm."hd-flag" as "p-hd-flag",
pm."ecom-flag" as "p-ecom-flag",
pm."pr-flag" as "p-pr-flag",
pm."total-spend" as "p-total-spend",
pm."spend-generic" as "p-spend-generic",
pm."promo-min-bill-date" as "p-promo-min-bill-date",
pm."hd-min-bill-date" as "p-hd-min-bill-date",
pm."ecom-min-bill-date" as "p-ecom-min-bill-date",
pm."pr-min-bill-date" as "p-pr-min-bill-date",
pm."generic-min-bill-date" as "p-generic-min-bill-date",
pm."goodaid-min-bill-date" as "p-goodaid-min-bill-date",
pm."ethical-min-bill-date" as "p-ethical-min-bill-date",
pm."chronic-min-bill-date" as "p-chronic-min-bill-date",
pm."repeatable-min-bill-date" as "p-repeatable-min-bill-date",
pm."others-type-min-bill-date" as "p-others-type-min-bill-date",
pm."rx-min-bill-date" as "p-rx-min-bill-date",
pm."digital-payment-min-bill-date" as "p-digital-payment-min-bill-date",
pm."first-bill-id" as "p-first-bill-id",
pm."last-bill-id" as "p-last-bill-id",
pm."digital-payment-flag" as "p-digital-payment-flag",
pm."total-mrp-value" as "p-total-mrp-value",
pm."is-nps",
pm."latest-nps-rating",
pm."latest-nps-rating-comment",
pm."latest-nps-rating-store-id",
pm."latest-nps-rating-store-name",
pm."latest-nps-rating-date",
pm."referred-count",
pm."primary-store-id",
pm."num-drugs" as "p-num-drugs",
pm."primary-disease" as "p-primary-disease",
pm."recency-customer-days",
pm."system-age-days",
pm."avg-purchase-interval",
pm."std-purchase-interval",
pm."quantity-generic-pc",
pm."quantity-chronic-pc",
pm."quantity-ethical-pc",
pm."quantity-repeatable-pc",
pm."quantity-goodaid-pc",
pm."quantity-others-type-pc",
pm."spend-generic-pc",
lead(bm."bill-date" , 1)
OVER(
PARTITION BY bm."patient-id"
ORDER BY bm."created-at" desc) AS "previous-bill-date",
lead(bm."store-id" , 1)
OVER(
PARTITION BY bm."patient-id"
ORDER BY bm."created-at" desc) AS "previous-store-id",
cvs."segment-calculation-date" as "value-segment-calculation-date",
cvs."value-segment",
cbs."segment-calculation-date" as "behaviour-segment-calculation-date",
cbs."behaviour-segment",
p."promo-code",
p."promo-code-type",
p."promo-eligibility",
p."promo-discount-type",
p."promo-min-purchase",
p."campaign-id",
p."campaign-name",
sm."store",
sm."line-manager",
sm."abo",
sm."store-manager",
sm."store-type",
sm."opened-at" as "store-opened-at",
sm."date-diff",
sm."month-diff",
sm."latitude",
sm."longitude",
sm."store-contact-1",
sm."store-contact-2",
sm."store-address",
sm."city" as "store-city",
sm."store-b2b",
sm."line",
sm."landmark",
sm."store-group-id",
sm."franchisee-id",
sm."franchisee-name",
case
when (12 * (extract (year from bm."created-at") - extract (year from pm."first-bill-date")) + (extract (month from bm."created-at") - extract (month from pm."first-bill-date")))>= 1 then 'old'
else 'new'
end as "old-new",
extract('year' from bm."bill-date")||'Q'||extract('quarter' from bm."bill-date") as "bill-quarter",
ppnd."previous-normalized-date",
sum(bm."total-spend") over( partition by bm."patient-id"
order by
bm."created-at" asc rows unbounded preceding) as "cum-spend",
count(bm."id") over( partition by bm."patient-id"
order by
bm."created-at" asc rows unbounded preceding) as "cum-nob",
sum(bm."total-spend") over( partition by bm."patient-id"
order by
bm."created-at" asc rows unbounded preceding)/ count(bm."id") over( partition by bm."patient-id"
order by
bm."created-at" asc rows unbounded preceding) as "cum-abv",
(sum(bm."total-spend") over( partition by bm."patient-id"
order by
bm."created-at" asc rows unbounded preceding))-bm."total-spend" as "prev-cum-spend",
(count(bm."id") over( partition by bm."patient-id"
order by
bm."created-at" asc rows unbounded preceding))-1 as "prev-cum-nob",
sum(bm."total-spend") over( partition by bm."patient-id"
order by
bm."created-at" asc rows between unbounded preceding and 1 preceding)/ count(bm."id") over( partition by bm."patient-id"
order by
bm."created-at" asc rows between unbounded preceding and 1 preceding) as "prev-cum-abv",
sm."acquired" ,
sm."old-new-static",
datediff('day',
lead(bm."bill-date" , 1)
OVER(
PARTITION BY bm."patient-id"
ORDER BY bm."created-at" desc),
bm."bill-date" ) as "day-diff-previous-bill",
(case
when datediff('day',
lead(bm."bill-date" , 1)
OVER(
PARTITION BY bm."patient-id"
ORDER BY bm."created-at" desc),
bm."bill-date" )>90 then 1
else 0
end) as "resurrected-flag",
bm."crm-flag",
pm."crm-flag" as "p-crm-flag",
(case when (12 *(bm."bill-year"-extract('year'
from
ppnd."previous-2-normalized-date"))+(bm."bill-month" - extract('month'
from
ppnd."previous-2-normalized-date")) ) in (90, 2) then 1
when (ppq."bill-quarter"-ppq."previous-quarter") in (97, 1) then 1
else 0
end) as "loyal-customer-flag"
from
"prod2-generico"."bills-1-metadata" bm
left join "prod2-generico"."patients-metadata-2" pm on
pm.id = bm."patient-id"
left join "prod2-generico"."customer-value-segment" cvs on
bm."patient-id" = cvs."patient-id"
and bm."normalized-date" = cvs."segment-calculation-date"
left join "prod2-generico"."customer-behaviour-segment" cbs on
bm."patient-id" = cbs."patient-id"
and bm."normalized-date" = cbs."segment-calculation-date"
left join "prod2-generico".promo p on
bm."promo-code-id" = p.id
inner join "prod2-generico"."stores-master" sm on
bm."store-id" = sm."id"
inner join "prod2-generico"."patient-previous-normalized-date" ppnd on
bm."patient-id" = ppnd."patient-id"
and bm."normalized-date" = ppnd."normalized-date"
inner join "prod2-generico"."patient-previous-quarter" ppq on
ppq."patient-id" =bm."patient-id"
and extract('year' from bm."bill-date")||0||extract('quarter' from bm."bill-date") = ppq."bill-quarter"
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
print(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/retention-master/retention_master.py | retention_master.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.new_stores.new_stores_ipc import new_stores_ss_calc
from zeno_etl_libs.utils.new_stores.helper_functions import get_drug_info, order_value_report
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
def main(debug_mode, reset_stores, goodaid_ss_flag,
ga_inv_weight, rest_inv_weight, top_inv_weight, wh_gen_consolidation,
type_list, rs_db_read, rs_db_write, read_schema, write_schema, s3, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
reset_date = dt.date.today().strftime("%Y-%m-%d")
# define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
try:
for store_id in reset_stores:
logger.info("New store SS calculation started for store id: " +
str(store_id))
# NEW STORES SS CALCULATION
ss_stores = new_stores_ss_calc(store_id, reset_date, rs_db_read,
read_schema, logger)
# EXTRA INFO FETCH
data_inv, data_ptr, data_drug_info, data_drug_grade,\
data_stores = get_drug_info(store_id, rs_db_read, read_schema)
# MERGE DATA
ss_stores_merge = ss_stores.merge(
data_inv[['drug_id', 'current_inventory']],
how='left', on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_ptr, how='left',
on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_drug_info, how='left',
on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_drug_grade, how='left',
on='drug_id')
ss_stores_merge = ss_stores_merge.merge(data_stores, how='left',
on='store_id')
logger.info("Null values in dataframes, count is {}".format(
ss_stores_merge.isnull().sum()))
# fill Null values
ss_stores_merge['current_inventory'].fillna(0, inplace=True)
ss_stores_merge['ptr'].fillna(67, inplace=True)
ss_stores_merge['type'].fillna('', inplace=True)
ss_stores_merge['category'].fillna('', inplace=True)
ss_stores_merge['drug_grade'].fillna('NA', inplace=True)
# final data-frame name for update
new_stores_ss = ss_stores_merge.copy()
logger.info("SS list base algo+triggers length is {}".format(
len(new_stores_ss)))
logger.info(
"Types in list are - {}".format(new_stores_ss['type'].unique()))
# remove banned and discontinued drugs
new_stores_ss = new_stores_ss[~new_stores_ss['type'].isin(
['banned', 'discontinued-products'])]
logger.info(
"Types in list are - {}".format(new_stores_ss['type'].unique()))
logger.info(
"SS list after removing banned and discontinued - length is {}".format(
len(new_stores_ss)))
# order value report
order_value = order_value_report(new_stores_ss)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
new_stores_ss, consolidation_log = stores_ss_consolidation(
new_stores_ss, rs_db_read, read_schema,
min_column='min', ss_column='safety_stock',
max_column='max')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
new_stores_ss, good_aid_ss_log = update_ga_ss(
new_stores_ss, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='min', ss_column='safety_stock',
max_column='max', logger=logger)
# few more columns
new_stores_ss['inventory_quantity'] = new_stores_ss['current_inventory']
new_stores_ss['fptr'] = new_stores_ss['ptr']
new_stores_ss['store_id'] = store_id
new_stores_ss['daily_sales_1'] = -1
new_stores_ss['daily_sales_2'] = -1
new_stores_ss['daily_sales_3'] = -1
new_stores_ss['ads'] = -1
new_stores_ss['ads_min'] = -1
new_stores_ss['ads_ss'] = -1
new_stores_ss['ads_max'] = -1
new_stores_ss['algo_max_days'] = 30
# adjustment for ethical
# same logic as in new_store_ipc_funcs.ss_calc
new_stores_ss['algo_max_days'] = np.round(
np.where(new_stores_ss['type'].isin(
['ethical', 'high-value-ethical']),
new_stores_ss['algo_max_days'] * (1 / 2),
new_stores_ss['algo_max_days'] * (2 / 3)))
# for min
new_stores_ss['algo_min_days'] = np.where(new_stores_ss['max'] > 0,
(new_stores_ss['min'] /
new_stores_ss['max']
) * new_stores_ss[
'algo_max_days'], 0)
# for ss
new_stores_ss['algo_ss_days'] = np.where(new_stores_ss['max'] > 0,
(new_stores_ss[
'safety_stock'] /
new_stores_ss['max']
) * new_stores_ss[
'algo_max_days'], 0)
new_stores_ss['corr_min'] = new_stores_ss['min']
new_stores_ss['corr_ss'] = new_stores_ss['safety_stock']
new_stores_ss['corr_max'] = new_stores_ss['max']
new_stores_ss['to_order_quantity'] = np.where(
new_stores_ss['inventory_quantity']
<= new_stores_ss['corr_ss'],
new_stores_ss['corr_max'] -
new_stores_ss['inventory_quantity'],
0)
new_stores_ss['to_order_value'] = new_stores_ss['fptr'] * \
new_stores_ss['to_order_quantity']
# required columns
new_store_ss = new_stores_ss[[
'store_id', 'store_name', 'drug_id', 'drug_name', 'type',
'category', 'drug_grade', 'inventory_quantity',
'min', 'safety_stock', 'max',
'daily_sales_1', 'daily_sales_2', 'daily_sales_3',
'ads', 'ads_min', 'ads_ss', 'ads_max',
'algo_min_days', 'algo_ss_days', 'algo_max_days',
'corr_min', 'corr_ss', 'corr_max',
'to_order_quantity', 'fptr', 'to_order_value', 'algo_type']]
# overall order value
order_value_all = order_value_all.append(order_value,
ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table non-ipc-safety-stock
# new_store_ss['store_id'] = new_store_ss['store_id'].astype(int)
new_store_ss['reset-date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
new_store_ss['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
new_store_ss['created-by'] = 'etl-automation'
new_store_ss['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
new_store_ss['updated-by'] = 'etl-automation'
new_store_ss.columns = [c.replace('_', '-') for c in
new_store_ss.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='new-store-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
new_store_ss = new_store_ss[columns] # required column order
logger.info("Writing to table: new-store-safety-stock")
s3.write_df_to_db(df=new_store_ss,
table_name='new-store-safety-stock',
db=rs_db_write, schema=write_schema)
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
new_store_ss.columns = [c.replace('-', '_') for c in
new_store_ss.columns]
ss_data_upload = new_store_ss.query('corr_max > 0')[
['store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']]
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"New-Stores-SS code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"New-Stores-SS code execution status: {status}")
return status, reset_date, new_drug_entries, missed_entries, order_value_all
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str,
required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-exsto', '--exclude_stores',
default=[52, 60, 92, 243, 281], nargs='+', type=int,
required=False)
parser.add_argument('-gad', '--gaid_flag', default="Y", type=str,
required=False)
parser.add_argument('-giw', '--gaid_inv_wt', default=0.5, type=float,
required=False)
parser.add_argument('-riw', '--rest_inv_wt', default=0.0, type=float,
required=False)
parser.add_argument('-tiw', '--top_inv_wt', default=1, type=float,
required=False)
parser.add_argument('-wgc', '--wh_gen_consld', default="Y", type=str,
required=False)
parser.add_argument('-rs', '--reset_stores',
default=[0], nargs='+', type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
# JOB EXCLUSIVE PARAMS
exclude_stores = args.exclude_stores
goodaid_ss_flag = args.gaid_flag
ga_inv_weight = args.gaid_inv_wt
rest_inv_weight = args.rest_inv_wt
top_inv_weight = args.rest_inv_wt
wh_gen_consolidation = args.wh_gen_consld
reset_stores = args.reset_stores
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# new stores list
new_stores = stores.loc[
(dt.datetime.now() - stores['opened_at'] <= dt.timedelta(days=90)) &
(dt.datetime.now() - stores['opened_at'] >= dt.timedelta(
days=30)), 'id'].values
if reset_stores == [0]: # Fetch all new stores
reset_stores = new_stores
logger.info(f"Algo to run for all new stores: {reset_stores}")
else:
reset_stores = list(set(reset_stores).intersection(new_stores))
logger.info(f"Algo to run for specified new stores: {reset_stores}")
if not reset_stores:
logger.info(f"ALERT: None of specified stores is a new store")
reset_stores = new_stores
logger.info(f"REVERT: Algo to run for all new stores: {reset_stores}")
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
""" calling the main function """
status, reset_date, new_drug_entries, missed_entries, \
order_value_all = main(
debug_mode, reset_stores, goodaid_ss_flag, ga_inv_weight,
rest_inv_weight, top_inv_weight, wh_gen_consolidation,
type_list, rs_db_read, rs_db_write, read_schema, write_schema, s3,
logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"New Stores SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/new_stores/new_stores_ss_main.py | new_stores_ss_main.py |
import json
import requests
tableau_admin_user_password = ''
tableau_baseurl = 'https://tableau.generico.in'
site_id = 'd68c13b5-ae9e-4cb2-8824-25c6d4daf7a2'
db_to_update = 'postgres'
old_read_only_username = ''
new_read_only_username = ''
new_read_only_user_password = ''
# sign in
login_data = {
"credentials": {
"name": 'admin',
"password": tableau_admin_user_password,
"site": {
"contentUrl": ""
}
}
}
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
response = requests.post(f"{tableau_baseurl}/api/3.7/auth/signin", headers=headers, data=json.dumps(login_data))
login_resp = response.json()
if response.status_code == 200 and 'credentials' in login_resp:
login_resp = login_resp["credentials"]
headers['X-Tableau-Auth'] = login_resp["token"]
# # data source list
workbook_response = requests.get(f'{tableau_baseurl}/api/3.7/sites/{site_id}/workbooks?pageSize=1000',
headers=headers)
# print(workbook_response.json())
if workbook_response.status_code != 200:
raise Exception(f"workbook_response: {workbook_response.text}")
workbook_data = workbook_response.json()
for workbook in workbook_data['workbooks']['workbook']:
if "" in workbook['name'].lower():
print(workbook['name'])
connection_response = requests.get(
f"{tableau_baseurl}/api/3.7/sites/{site_id}/workbooks/{workbook['id']}/connections?pageSize=1000",
headers=headers)
if connection_response.status_code != 200:
raise Exception(f"workbook_response: {connection_response.text}")
connection_data = connection_response.json()
for connection in connection_data['connections']['connection']:
# print(connection)
print(f"connection type: {connection['type']}")
if connection['type'] == db_to_update:
if connection['userName'] == old_read_only_username:
connection_id = connection['id']
request_body = {
'connection': {
'userName': new_read_only_username,
'password': new_read_only_user_password
}
}
print(f"connection to update: {connection}")
# update connection
data_source_id = connection['datasource']['id']
conn_update_response = requests.put(
f'{tableau_baseurl}/api/3.7/sites/{site_id}/datasources/{data_source_id}/connections/{connection_id}',
data=json.dumps(request_body), headers=headers)
update_status = 'failed'
if conn_update_response.status_code == 200:
update_status = 'successful'
print(f"Connection ID: {connection_id} Data Source Name: {connection['datasource']['name']} "
f"update {update_status}")
# project_response = requests.get(f'{tableau_baseurl}/api/3.7/sites/{site_id}/projects?pageSize=1000',
# headers=headers)
# # print(project_response.json())
# project_data = project_response.json()
# for project in project_data['projects']['project']:
# if project['name'] == 'Marketing':
# print(f"project: {project}")
# if data_sources_response.status_code == 200:
# counter = 0
# data_sources_data = data_sources_response.json()
# for data_source in data_sources_data['datasources']['datasource']:
# data_source_id = data_source['id']
#
# if data_source['project']['name'].lower() != 'test':
# continue
# print(f"data_source: {data_source}")
# # per data source connections list
# connections_response = requests.get(
# f'{tableau_baseurl}/api/3.7/sites/{site_id}/datasources/{data_source_id}/connections',
# headers=headers)
#
# if connections_response.status_code != 200:
# connections_data = connections_response.json()
# for connection in connections_data['connections']['connection']:
# # print(f"connection type: {connection['type']}")
# if connection['type'] == db_to_update:
# if connection['userName'] == old_read_only_username:
# connection_id = connection['id']
# request_body = {
# 'connection': {
# 'userName': new_read_only_username,
# 'password': new_read_only_user_password
# }
# }
# # print(f"connection to update: {connection}")
# # # update connection
# # conn_update_response = requests.put(
# # f'{tableau_baseurl}/api/3.7/sites/{site_id}/datasources/{data_source_id}/connections/{connection_id}',
# # data=json.dumps(request_body), headers=headers)
# #
# # update_status = 'failed'
# # if conn_update_response.status_code == 200:
# # update_status = 'successful'
# # print(
# # f'Connection ID: {connection_id} Data Source Name: {data_source["name"]} update {update_status}')
# counter += 1
# print(f"Total connections: {counter}")
#
# # sign out
# sign_out_headers = {'X-Tableau-Auth': headers['X-Tableau-Auth']}
# sign_out_response = requests.post(f'{tableau_baseurl}/api/3.7/auth/signout', headers=sign_out_headers)
# if sign_out_response.status_code == 204:
# print('Successfully Signed Out')
# else:
# print('Sign Out Failed')
"""
NOTES:
> api_version: 3.7, server_version: v2020.1
> rest-api fundamental concepts
https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_fundamentals.htm
> rest-api-samples
https://github.com/tableau/rest-api-samples/tree/master/python
https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_example_requests.htm (rq_bdy xml to json)
> Pagination
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_concepts_paging.htm
> List Data Sources
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#query_data_sources
> List Data Source Connections
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#query_data_source_connections
> Update a Data Source
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#update_data_source_connection
> Sign Out
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#sign_out
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/tableau/list-workbooks-and-connections.py | list-workbooks-and-connections.py |
import json
import requests
tableau_admin_user_password = ''
tableau_baseurl = 'https://tableau.generico.in'
site_id = 'd68c13b5-ae9e-4cb2-8824-25c6d4daf7a2'
db_to_update = 'postgres'
old_read_only_username = ''
new_read_only_username = ''
new_read_only_user_password = ''
# sign in
login_data = {
"credentials": {
"name": 'admin',
"password": tableau_admin_user_password,
"site": {
"contentUrl": ""
}
}
}
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
response = requests.post(f"{tableau_baseurl}/api/3.7/auth/signin", headers=headers, data=json.dumps(login_data))
login_resp = response.json()
if response.status_code == 200 and 'credentials' in login_resp:
login_resp = login_resp["credentials"]
headers['X-Tableau-Auth'] = login_resp["token"]
# data source list
data_sources_response = requests.get(f'{tableau_baseurl}/api/3.7/sites/{site_id}/datasources?pageSize=1000',
headers=headers)
if data_sources_response.status_code == 200:
counter = 0
data_sources_data = data_sources_response.json()
for data_source in data_sources_data['datasources']['datasource']:
data_source_id = data_source['id']
# print(f"data_source: {data_source}")
# per data source connections list
connections_response = requests.get(
f'{tableau_baseurl}/api/3.7/sites/{site_id}/datasources/{data_source_id}/connections',
headers=headers)
if connections_response.status_code == 200:
connections_data = connections_response.json()
for connection in connections_data['connections']['connection']:
# print(f"connection type: {connection['type']}")
if connection['type'] == db_to_update:
if connection['userName'] == old_read_only_username:
connection_id = connection['id']
request_body = {
'connection': {
'userName': new_read_only_username,
'password': new_read_only_user_password
}
}
print(f"connection to update: {connection}")
# update connection
conn_update_response = requests.put(
f'{tableau_baseurl}/api/3.7/sites/{site_id}/datasources/{data_source_id}/connections/{connection_id}',
data=json.dumps(request_body), headers=headers)
update_status = 'failed'
if conn_update_response.status_code == 200:
update_status = 'successful'
counter += 1
print(
f'Connection ID: {connection_id} Data Source Name: {data_source["name"]} update {update_status}')
print(f"Total connections updated: {counter}")
# sign out
sign_out_headers = {'X-Tableau-Auth': headers['X-Tableau-Auth']}
sign_out_response = requests.post(f'{tableau_baseurl}/api/3.7/auth/signout', headers=sign_out_headers)
if sign_out_response.status_code == 204:
print('Successfully Signed Out')
else:
print('Sign Out Failed')
"""
NOTES:
> api_version: 3.7, server_version: v2020.1
> rest-api fundamental concepts
https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_fundamentals.htm
> rest-api-samples
https://github.com/tableau/rest-api-samples/tree/master/python
https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_concepts_example_requests.htm (rq_bdy xml to json)
> Pagination
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_concepts_paging.htm
> List Data Sources
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#query_data_sources
> List Data Source Connections
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#query_data_source_connections
> Update a Data Source
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#update_data_source_connection
> Sign Out
https://help.tableau.com/v2020.1/api/rest_api/en-us/REST/rest_api_ref.htm#sign_out
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/tableau/data-source-password-change.py | data-source-password-change.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "sales-agg"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"created-by",
"created-at",
"updated-by",
"updated-at",
"bill-id",
"drug-id",
"patient-id",
"store-id",
"year-created-at",
"month-created-at",
"net-quantity",
"net-revenue-value",
"gross-quantity",
"gross-revenue-value",
"returned-quantity",
"returned-revenue-value",
"created-date"
)
select
'etl-automation' as "created-by",
b."created-at" as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
ms."bill-id" ,
ms."drug-id" ,
ms."patient-id" ,
ms."store-id" ,
extract(year
from
b."created-at") as "year-created-at",
extract(month
from
b."created-at") as "month-created-at",
sum(ms."net-quantity") as "net-quantity",
sum(ms."revenue-value") as "net-revenue-value",
sum(case when ms."bill-flag" = 'gross' then ms."quantity" else 0 end ) as "gross_quantity",
sum(case when ms."bill-flag" = 'gross' then ms."revenue-value" else 0 end ) as "gross_revenue_value",
sum(case when ms."bill-flag" = 'return' then ms."quantity" else 0 end ) as "returned-quantity",
sum(case when ms."bill-flag" = 'return' then ms."revenue-value" else 0 end ) as "returned-revenue-value",
date(b."created-at") as "created-date"
from
"prod2-generico".sales ms
inner join "prod2-generico"."bills-1" b on
ms."bill-id" = b.id
group by
ms."bill-id",
ms."drug-id" ,
ms."patient-id" ,
ms."store-id",
b."created-at"
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sales-agg/sales_agg.py | sales_agg.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "ecomm-das"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-at",
"created-by",
"updated-at",
"updated-by",
"patient-id",
"promo-code-id",
"preferred-store-id",
"order-type",
"zeno-created-at",
"zeno-created-by",
"is-prescription-required",
"order-number",
"status",
"comments",
"zeno-drug-id",
"patient-store-order-id",
"zeno-qty",
"overall-min-bill-date",
"type",
"category",
"composition",
"company-id",
"composition-master-id",
"zeno-drug-name",
"zeno-drug-type",
"source-pincode",
"order-cancellation-reason-id",
"cancel-comment",
"cancelled-at",
"cancelled-by",
"cancel-reason",
"cancel-type",
"pso-requested-quantity",
"patient-request-id",
"pso-created-at",
"pso-created-by",
"pso-inventory-quantity",
"pso-status",
"store-id",
"bill-id",
"slot-id",
"turnaround-time",
"delivered-at",
"assigned-to",
"slot-type",
"per-slot-capacity",
"vendor-bill-number",
"prescription-needed",
"prescreptions-created",
"completed-at",
"mrp",
"selling-rate",
"gross-quantity",
"sale-flag",
"gross-revenue-value",
"returned-quantity",
"returned-revenue-value",
"promo-code",
"promo-code-type",
"promo-eligibility",
"campaign-name",
"store-name",
"store-city",
"store-b2b",
"abo",
"line-manager",
"order-city",
"min",
"max",
"safe-stock",
"grade-updated-at",
"zeno-drug-created-by"
)
select
me."id",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
'etl-automation' as "updated-by",
me."patient-id",
me."promo-code-id",
me."preferred-store-id",
me."order-type",
me."zeno-created-at",
me."zeno-created-by",
me."is-prescription-required",
me."order-number",
me."status",
me."comments",
me."zeno-drug-id",
me."patient-store-order-id",
me."zeno-qty",
me."overall-min-bill-date",
me."type",
me."category",
me."composition",
me."company-id",
me."composition-master-id",
me."zeno-drug-name",
me."zeno-drug-type",
me."source-pincode",
me."order-cancellation-reason-id",
me."cancel-comment",
me."cancelled-at",
me."cancelled-by",
me."cancel-reason",
me."cancel-type",
me."pso-requested-quantity",
me."patient-request-id",
me."pso-created-at",
me."pso-created-by",
me."pso-inventory-quantity",
me."pso-status",
me."store-id",
me."bill-id",
me."slot-id",
me."turnaround-time",
me."delivered-at",
me."assigned-to",
me."slot-type",
me."per-slot-capacity",
me."vendor-bill-number",
me."prescription-needed",
me."prescreptions-created",
me."completed-at",
me."mrp",
me."selling-rate",
me."gross-quantity",
me."sale-flag",
me."gross-revenue-value",
me."returned-quantity",
me."returned-revenue-value",
me."promo-code",
me."promo-code-type",
me."promo-eligibility",
me."campaign-name",
me."store-name",
me."store-city",
me."store-b2b",
me."abo",
me."line-manager",
me."order-origin",
doi."min",
doi."max",
doi."safe-stock" as "safe-stock",
doi."grade-updated-at" as "grade-updated-at",
me."zeno-drug-created-by" as "zeno-drug-created-by"
from
"prod2-generico".ecomm me
left join "prod2-generico"."drug-order-info" doi on
me."store-id" = doi."store-id"
and me."zeno-drug-id" = doi."drug-id";
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
print(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm-das/ecomm-das.py | ecomm-das.py |
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.django.api import Django
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.forecast_reset import ipc_forecast_reset
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.utils.ipc.store_portfolio_additions import generic_portfolio
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag, v6_active_flag,
v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, rs_db_read, rs_db_write, read_schema,
write_schema, s3, django, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("IPC SS calculation started for store id: " + str(store_id))
if not type_list:
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
drug_class, weekly_fcst, safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = ipc_forecast_reset(
store_id, type_list, reset_date, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
rs_db_read, read_schema,
drug_type_list_v4=drug_type_list_v4,
v5_active_flag=v5_active_flag,
v6_active_flag=v6_active_flag,
v6_type_list=v6_type_list,
v6_ptr_cut_off=v6_ptr_cut_off,
chronic_max_flag=chronic_max_flag,
logger=logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# KEEP ALL GENERIC COMPOSITIONS IN STORE
if keep_all_generic_comp == 'Y':
safety_stock_df = generic_portfolio(safety_stock_df,
rs_db_read, read_schema,
logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(store_id, drug_class, weekly_fcst,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value, ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
weekly_fcst.rename(
columns={'date': 'week_begin_dt', 'fcst': 'point_forecast',
'std': 'forecast_deviation'}, inplace=True)
weekly_fcst['store_id'] = weekly_fcst['store_id'].astype(int)
weekly_fcst['drug_id'] = weekly_fcst['drug_id'].astype(int)
weekly_fcst['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
weekly_fcst['week_begin_dt'] = weekly_fcst['week_begin_dt']
weekly_fcst['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['created-by'] = 'etl-automation'
weekly_fcst['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['updated-by'] = 'etl-automation'
weekly_fcst.columns = [c.replace('_', '-') for c in weekly_fcst.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-forecast',
schema=write_schema)
columns = list(table_info['column_name'])
weekly_fcst = weekly_fcst[columns] # required column order
logger.info("Writing to table: ipc-forecast")
s3.write_df_to_db(df=weekly_fcst,
table_name='ipc-forecast',
db=rs_db_write, schema=write_schema)
# writing table ipc-safety-stock
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc-abc-xyz-class
drug_class['store_id'] = drug_class['store_id'].astype(int)
drug_class['drug_id'] = drug_class['drug_id'].astype(int)
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in safety_stock_df.columns]
ss_data_upload = safety_stock_df.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema, logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
if isinstance(reset_store_ops, pd.DataFrame):
content_type = 74
object_id = reset_store_ops.loc[
reset_store_ops['store_id'] == store_id, 'object_id'].unique()
for obj in object_id:
request_body = {"object_id": int(obj), "content_type": content_type}
api_response, _ = django.django_model_execution_log_create_api(
request_body)
reset_store_ops.loc[
reset_store_ops['object_id'] == obj,
'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"IPC code execution status: {status}")
return status, order_value_all, new_drug_entries, missed_entries
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-exsto', '--exclude_stores',
default=[52, 60, 92, 243, 281], nargs='+', type=int,
required=False)
parser.add_argument('-gad', '--gaid_flag', default="Y", type=str,
required=False)
parser.add_argument('-giw', '--gaid_inv_wt', default=0.5, type=float,
required=False)
parser.add_argument('-riw', '--rest_inv_wt', default=0.0, type=float,
required=False)
parser.add_argument('-tiw', '--top_inv_wt', default=1, type=float,
required=False)
parser.add_argument('-cmf', '--chronic_max_flag', default="N", type=str,
required=False)
parser.add_argument('-wgc', '--wh_gen_consld', default="Y", type=str,
required=False)
parser.add_argument('-v5', '--v5_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v6', '--v6_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v6lst', '--v6_type_list',
default=['ethical', 'generic', 'others'], nargs='+',
type=str, required=False)
parser.add_argument('-v6ptr', '--v6_ptr_cut_off', default=400, type=int,
required=False)
parser.add_argument('-rd', '--reset_date', default="YYYY-MM-DD", type=str,
required=False)
parser.add_argument('-rs', '--reset_stores',
default=[0], nargs='+', type=int,
required=False)
parser.add_argument('-v3', '--v3_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v3sp', '--corr_selling_prob_cutoff',
default="{'ma_less_than_2': 0.40, 'ma_more_than_2' : 0.40}",
type=str, required=False)
parser.add_argument('-v3cp', '--corr_cumm_prob_cutoff',
default="{'ma_less_than_2':0.50,'ma_more_than_2':0.63}",
type=str, required=False)
parser.add_argument('-v4tl', '--v4_drug_type_list',
default="{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'others':'{0:[0,0,0], 1:[0,1,2], 2:[0,1,2],3:[1,2,3]}'}",
type=str, required=False)
parser.add_argument('-npi', '--omit_npi', default='N', type=str, required=False)
parser.add_argument('-kagc', '--keep_all_generic_comp', default='N', type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
# JOB EXCLUSIVE PARAMS
exclude_stores = args.exclude_stores
goodaid_ss_flag = args.gaid_flag
ga_inv_weight = args.gaid_inv_wt
rest_inv_weight = args.rest_inv_wt
top_inv_weight = args.rest_inv_wt
chronic_max_flag = args.chronic_max_flag
wh_gen_consolidation = args.wh_gen_consld
v5_active_flag = args.v5_active_flag
v6_active_flag = args.v6_active_flag
v6_type_list = args.v6_type_list
v6_ptr_cut_off = args.v6_ptr_cut_off
reset_date = args.reset_date
reset_stores = args.reset_stores
v3_active_flag = args.v3_active_flag
corrections_selling_probability_cutoff = args.corr_selling_prob_cutoff
corrections_cumulative_probability_cutoff = args.corr_cumm_prob_cutoff
drug_type_list_v4 = args.v4_drug_type_list
omit_npi = args.omit_npi
keep_all_generic_comp = args.keep_all_generic_comp
# EVALUATE REQUIRED JSON PARAMS
corrections_selling_probability_cutoff = literal_eval(
corrections_selling_probability_cutoff)
corrections_cumulative_probability_cutoff = literal_eval(
corrections_cumulative_probability_cutoff)
drug_type_list_v4 = literal_eval(drug_type_list_v4)
logger = get_logger()
s3 = S3()
django = Django()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# considering reset of old stores only (age > 1 year)
store_id = stores.loc[dt.datetime.now() -
stores['opened_at'] >
dt.timedelta(days=365), 'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']', ')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
reset_stores = reset_store_ops['store_id'].unique()
type_list = None
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
""" calling the main function """
status, order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, rs_db_read, rs_db_write, read_schema,
write_schema, s3, django, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"IPC SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ipc/ipc_ss_main.py | ipc_ss_main.py |
# =============================================================================
# purpose: Excess REMOVAL CODE
# Author: Saurav Maskar
# =============================================================================
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import datetime
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sku', '--sku_to_add_daily', default=18, type=int, required=False)
parser.add_argument('-fsku', '--fofo_sku_to_add_daily', default=50, type=int, required=False)
parser.add_argument('-ccf', '--cold_chain_flag', default=1, type=str, required=False)
parser.add_argument('-si', '--stores_to_include_if_blank_all', default="NULL", type=str, required=False)
parser.add_argument('-se', '--stores_to_exclude_if_blank_none', default="NULL", type=str, required=False)
parser.add_argument('-ci', '--city_id_to_include_if_blank_all', default="NULL", type=str, required=False)
parser.add_argument('-ce', '--city_id_to_exclude_if_blank_none', default="NULL", type=str, required=False)
parser.add_argument('-dieibn', '--drug_id_exclude_if_blank_none', default="NULL", type=str, required=False)
parser.add_argument('-ff', '--fofo_inclusion_flag', default="0", type=str, required=False)
parser.add_argument('-gif', '--goodaid_inclusion_flag', default=1, type=int, required=False)
parser.add_argument('-qc', '--quantity_cap', default=100, type=int, required=False)
parser.add_argument('-fqc', '--fofo_quantity_cap', default=70, type=int, required=False)
parser.add_argument('-rfm', '--read_from_mysql', default=1, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
sku_to_add_daily = args.sku_to_add_daily
fofo_sku_to_add_daily = args.fofo_sku_to_add_daily
# Cold Chain Parameter Logic - If 0 - Don't add cold chain products, IF 2 - Only add cold chain product, If 1 - Don't care if cold chain product is added or not
cold_chain_flag = args.cold_chain_flag
stores_to_include_if_blank_all = args.stores_to_include_if_blank_all
stores_to_exclude_if_blank_none = args.stores_to_exclude_if_blank_none
city_id_to_include_if_blank_all = args.city_id_to_include_if_blank_all
city_id_to_exclude_if_blank_none = args.city_id_to_exclude_if_blank_none
drug_id_exclude_if_blank_none = args.drug_id_exclude_if_blank_none
fofo_inclusion_flag = args.fofo_inclusion_flag
goodaid_inclusion_flag = args.goodaid_inclusion_flag
quantity_cap = args.quantity_cap
fofo_quantity_cap = args.fofo_quantity_cap
read_from_mysql= args.read_from_mysql
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
mysql_read = MySQL()
mysql_read.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("sku_to_add_daily - " + str(sku_to_add_daily))
logger.info("fofo_sku_to_add_daily - " + str(sku_to_add_daily))
logger.info("cold_chain_flag - " + str(cold_chain_flag))
logger.info("stores_to_include_if_blank_all - " + str(stores_to_include_if_blank_all))
logger.info("stores_to_exclude_if_blank_none - " + str(stores_to_exclude_if_blank_none))
logger.info("city_id_to_include_if_blank_all - " + str(city_id_to_include_if_blank_all))
logger.info("city_id_to_exclude_if_blank_none - " + str(city_id_to_exclude_if_blank_none))
logger.info("drug_id_exclude_if_blank_none - " + str(city_id_to_exclude_if_blank_none))
logger.info("fofo_inclusion_flag - " + str(fofo_inclusion_flag))
logger.info("goodaid_inclusion_flag - " + str(goodaid_inclusion_flag))
logger.info("quantity_cap - " + str(quantity_cap))
logger.info("fofo_quantity_cap - " + str(fofo_quantity_cap))
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
time_period_to_look_back = cur_date.day + 2
# =============================================================================
# set parameters, to adhere to adhoc request of adding/excluding NPI in mentioned stores only
# =============================================================================
parameter_input1 = False
parameter_input2 = False
parameter_input3 = False
parameter_input4 = False
# Writng this function so that we can get list of stores irrespective of input format in parameter
def fetch_number(list):
list2 = []
for i in list:
try:
list2.append(int(i))
except:
pass
return list2
if stores_to_include_if_blank_all == 'NULL' and stores_to_exclude_if_blank_none == 'NULL':
parameter_input1 = False
parameter_input2 = False
logger.info('Missing parameters, Taking all stores')
else:
if stores_to_include_if_blank_all != 'NULL':
parameter_input1 = True
stores_to_include_if_blank_all = stores_to_include_if_blank_all
stores_to_include_if_blank_all = fetch_number(stores_to_include_if_blank_all.split(','))
logger.info('read parameters to include stores, taking included stores only - {}'.format(
stores_to_include_if_blank_all))
if stores_to_exclude_if_blank_none != 'NULL':
parameter_input2 = True
stores_to_exclude_if_blank_none = stores_to_exclude_if_blank_none
stores_to_exclude_if_blank_none = fetch_number(stores_to_exclude_if_blank_none.split(','))
logger.info('read parameters to exclude stores, not taking excluded stores - {}'.format(
stores_to_exclude_if_blank_none))
if city_id_to_include_if_blank_all == 'NULL' and city_id_to_exclude_if_blank_none == 'NULL':
parameter_input3 = False
parameter_input4 = False
logger.info('Missing parameters, Taking all cities')
else:
if city_id_to_include_if_blank_all != 'NULL':
parameter_input3 = True
city_id_to_include_if_blank_all = city_id_to_include_if_blank_all
city_id_to_include_if_blank_all = fetch_number(city_id_to_include_if_blank_all.split(','))
logger.info('read parameters to include city, taking included cities only - {}'.format(
city_id_to_include_if_blank_all))
if city_id_to_exclude_if_blank_none != 'NULL':
parameter_input4 = True
city_id_to_exclude_if_blank_none = city_id_to_exclude_if_blank_none
city_id_to_exclude_if_blank_none = fetch_number(city_id_to_exclude_if_blank_none.split(','))
logger.info('read parameters to exclude city, not taking excluded cities - {}'.format(
city_id_to_exclude_if_blank_none))
if drug_id_exclude_if_blank_none == 'NULL':
drug_id_exclude_if_blank_none = [0,0]
logger.info('Missing parameters, drug_id_exclude_if_blank_none')
else:
drug_id_exclude_if_blank_none = fetch_number(drug_id_exclude_if_blank_none.split(','))
logger.info('read parameters to exclude drugs, not taking excluded drugs - {}'.format(
drug_id_exclude_if_blank_none))
# =============================================================================
# NPI Removal Script
# =============================================================================
# Getting prod drug detail
prod_drugs_query = '''
select
id as "drug-id",
"drug-name",
type,
"pack-form",
"cold-chain"
from
"prod2-generico"."drugs" d
WHERE
d."type" not in ('banned','discontinued-products')
and d."drug-name" not like '%BAILLEY%'
and d."drug-name" not like '%Lozenges%'
and d."drug-name" not like '%LOZENGE%'
'''
prod_drugs = rs_db.get_df(prod_drugs_query)
# Getting drugs under Promo codes
promo_prod_drugs_query = '''
SELECT
DISTINCT i."drug-id" as "drug-id"
FROM
"prod2-generico"."bill-promo-codes" bpc
left join "prod2-generico"."bill-items-1" bi
on
bpc."bill-item-id" = bi.id
left join "prod2-generico"."inventory-1" i
on
bi."inventory-id" = i.id
left join "prod2-generico"."promo-codes" pc
on
bpc."promo-code-id" = pc.id
left join "prod2-generico".drugs d on
d.id = i."drug-id"
WHERE
pc."promo-code" in ('PPI', 'PPI50', 'PPI20', 'FPPI', 'BOGO')
and bi."created-at" >= CURRENT_DATE - 45
'''
promo_prod_drugs = rs_db.get_df(promo_prod_drugs_query)
promo_drugs = list(map(int,promo_prod_drugs['drug-id'].unique()))
final_drugs_to_exclude = promo_drugs + drug_id_exclude_if_blank_none
logger.info('Final drugs to exclude - {}'.format(final_drugs_to_exclude))
prod_drugs = prod_drugs[~prod_drugs['drug-id'].isin(final_drugs_to_exclude)]
# getting my sql store_drug list
if int(read_from_mysql) == 1:
store_drug_prod_query = '''
select
`store-id` ,
`drug-id`,
1 as `dummy`
from
`prod2-generico`.`excess-drugs` nd
where
status in ('saved', 'in-progress')
or (status = 'completed'
and date(nd.`created-at`) > date(DATE_ADD(date(now()) , INTERVAL -{time_period_to_look_back} Day)))
'''.format(time_period_to_look_back=time_period_to_look_back)
store_drug_prod = pd.read_sql_query(store_drug_prod_query, mysql_read.connection)
logger.info('Read store_drug_prod - from Mysql')
else:
store_drug_prod_query = '''
select
"store-id" ,
"drug-id",
1 as "dummy"
from
"prod2-generico"."excess-drugs" nd
where
status in ('saved', 'in-progress')
or (status = 'completed'
and date(nd."created-at") > date(dateadd(d,-{time_period_to_look_back},current_date)))
'''.format(time_period_to_look_back=time_period_to_look_back)
store_drug_prod = rs_db.get_df(store_drug_prod_query)
logger.info('Read store_drug_prod - from RS')
# Getting list of drugs in audit at the moment
if int(read_from_mysql) == 1:
audit_drug_prod_query = '''
SELECT
a.`store-id` ,
a.`drug-id` ,
1 as dummy_audit
from
(
select
b.`store-id` ,
a.`drug-id` ,
1 as dummy,
ROW_NUMBER() OVER(PARTITION BY b.`store-id` ,
a.`drug-id`
ORDER BY
a.id DESC) as 'row'
from
`inventory-check-items-1` as a
join `inventory-check-1` as b on
a.`check-id` = b.id
where
b.`complete` = 0)a
WHERE
a.`row` = 1
'''
audit_drug_prod = pd.read_sql_query(audit_drug_prod_query, mysql_read.connection)
logger.info('Read audit_drug_prod - from Mysql')
else:
audit_drug_prod_query = '''
SELECT
a."store-id" ,
a."drug-id" ,
1 as dummy_audit
from
(
select
b."store-id" ,
a."drug-id" ,
1 as dummy,
ROW_NUMBER() OVER(PARTITION BY b."store-id" ,
a."drug-id"
ORDER BY
a.id DESC) as "row"
from
"prod2-generico"."inventory-check-items-1" as a
join "prod2-generico"."inventory-check-1" as b on
a."check-id" = b.id
where
b."complete" = 0)a
WHERE
a."row" = 1
'''
audit_drug_prod = rs_db.get_df(audit_drug_prod_query)
logger.info('Read audit_drug_prod - from RS')
# getting store_id list
# connection = current_config.data_science_postgresql_conn()
# store_list_query = '''
# select distinct store_id
# from dead_stock_inventory dsi
# where inventory_type = 'Rotate'
# '''
# store_list = pd.read_sql_query(store_list_query, connection)
# connection.close()
store_list_query = '''
select
distinct "store-id"
from
"prod2-generico"."excess-inventory-categorisation" eic
where
"inventory-type" = 'Rotate'
'''
store_list = rs_db.get_df(store_list_query)
if int(fofo_inclusion_flag) == 1:
store_list_query_fofo = '''
select
distinct "store-id"
from
"prod2-generico"."excess-inventory-categorisation" eic
where
"inventory-type" = 'Rotate_fofo_launch_doh'
'''
store_list_fofo = rs_db.get_df(store_list_query_fofo)
store_list = pd.concat([store_list,store_list_fofo],sort=True)
# getting last day store status
store_completed = pd.DataFrame()
if int(read_from_mysql)==1:
store_last_status_query = """
select
*
from
(
select
row_number() over (partition by nd.`store-id`
order by
nd.`created-at` desc
) as `row`,
nd.`store-id`,
nd.status ,
nd.`created-at`
from
`prod2-generico`.`excess-drugs` nd) nd
where
nd.`row` = 1
"""
store_last_status = pd.read_sql_query(store_last_status_query, mysql_read.connection)
logger.info('Read store_last_status - from Mysql')
else:
store_last_status_query = """
select
*
from
(
select
row_number() over (partition by nd."store-id"
order by
nd."created-at" desc
) as "row",
nd."store-id",
nd.status ,
nd."created-at"
from
"prod2-generico"."excess-drugs" nd) nd
where
nd."row" = 1
"""
store_last_status = rs_db.get_df(store_last_status_query)
logger.info('Read store_last_status - from RS')
store_completed = store_last_status[store_last_status['status']=='completed']['store-id']
store_completed = pd.DataFrame(store_completed,columns=['store-id'])
# Checking If any new store is added
nd_stores = store_last_status['store-id'].unique()
new_stores = pd.DataFrame()
for store in store_list['store-id']:
if store not in nd_stores:
#print(store)
store_new = pd.DataFrame([store], columns=['store-id'])
new_stores = new_stores.append(store_new)
store_completed = pd.concat([store_completed,new_stores])
# Adding city ids and franchise flag to stores
store_info_query = '''
select
s.id as "store-id",
s."franchisee-id" ,
s."city-id"
from
"prod2-generico".stores s
where s."is-active" =1
'''
store_info = rs_db.get_df(store_info_query )
# Inner Join - Only selecting active stores
store_completed = store_completed.merge(store_info,on='store-id',how='inner')
if parameter_input1:
store_completed = store_completed[store_completed ['store-id'].isin(stores_to_include_if_blank_all)]
if parameter_input2:
store_completed = store_completed[~store_completed ['store-id'].isin(stores_to_exclude_if_blank_none)]
if parameter_input3:
store_completed = store_completed[store_completed['city-id'].isin(city_id_to_include_if_blank_all)]
if parameter_input4:
store_completed = store_completed[~store_completed['city-id'].isin(city_id_to_exclude_if_blank_none)]
if int(fofo_inclusion_flag) == 0:
store_completed = store_completed[store_completed['franchisee-id']==1]
elif int(fofo_inclusion_flag) == 2:
store_completed = store_completed[store_completed['franchisee-id'] != 1]
elif int(fofo_inclusion_flag) == 1:
store_completed = store_completed
del store_completed['city-id']
# for store in store_list['store-id']:
# store_completed_query = '''
# select
# distinct "store-id"
# from
# "prod2-generico"."npi-drugs"
# where
# date("created-at") =
# (
# select
# Max(date("created-at"))
# from
# "prod2-generico"."npi-drugs"
# where
# "store-id"= {store})
# and status = 'completed'
# and "store-id"= {store}
# '''.format(store=store)
# store_completed_1 = rs_db.get_df(store_completed_query)
#
# if len(store_completed_1)== 0:
# new_store = """
# SELECT
# DISTINCT nd."store-id"
# FROM
# "prod2-generico"."npi-drugs" nd
# WHERE
# nd."store-id" = {store}
# """.format(store=store)
# new_store = rs_db.get_df(new_store)
#
# if len(new_store)== 0:
# store_completed_1 = pd.DataFrame([store],columns=['store-id'])
#
# store_completed = store_completed_1.append(store_completed)
# getting PG drug list
# connection = current_config.data_science_postgresql_conn()
# npi_drug_list = """
# select store_id, drug_id,
# sum(locked_quantity + quantity) as "total_quantity",
# sum(locked_value + value) as "total_value"
# from dead_stock_inventory dsi
# where inventory_type = 'Rotate'
# group by store_id, drug_id
# """
# npi_drug_list = pd.read_sql_query(npi_drug_list, connection)
# connection.close()
npi_drug_list = """
select
"store-id",
"drug-id",
sum("excess-quantity") as "total-quantity",
sum("excess-value") as "total-value"
from
"prod2-generico"."excess-inventory-categorisation" eic
where
"inventory-type" = 'Rotate'
group by
"store-id",
"drug-id"
"""
npi_drug_list = rs_db.get_df(npi_drug_list)
if int(fofo_inclusion_flag) == 1:
logger.info('adding FOFO Launch Stock Excess Inventory as well')
npi_drug_list_fofo = """
select
"store-id",
"drug-id",
sum("excess-quantity") as "total-quantity",
sum("excess-value") as "total-value"
from
"prod2-generico"."excess-inventory-categorisation" eic
where
"inventory-type" = 'Rotate_fofo_launch_doh'
group by
"store-id",
"drug-id"
"""
npi_drug_list_fofo = rs_db.get_df(npi_drug_list_fofo)
npi_drug_list = pd.concat([npi_drug_list,npi_drug_list_fofo],sort = True)
# merging npi list with drugs table for packform, also removing any drugs if provided as input in prod_drugs query
npi_drug_list = npi_drug_list.merge(prod_drugs, how='inner', on='drug-id')
# =============================================================================
# Adding Quantity Sold at System level
# =============================================================================
drgs = tuple(map(int,npi_drug_list['drug-id'].unique()))
s1 = """
select
"drug-id",
sum("net-quantity") as "system-sales-qty-last-90-days"
from
"prod2-generico"."sales" sh
where
date("created-at") >= date(current_date - 90)
and date("created-at") <= date(current_date)
and "drug-id" in {drgs}
group by
"drug-id"
""".format( drgs=drgs)
quantity_sold = rs_db.get_df(s1)
npi_drug_list = npi_drug_list.merge(quantity_sold,on = 'drug-id', how ='left')
npi_drug_list['system-sales-qty-last-90-days'] = npi_drug_list['system-sales-qty-last-90-days'].fillna(0)
# =============================================================================
# System Searched quantity last 90 days
# =============================================================================
s2 = """
select
"drug-id",
sum("search-count-clean") as "system-searched-qty-last-90-days"
from
"prod2-generico"."cfr-searches-v2" csv2
where
date("search-date") >= date(current_date - 90)
and date("search-date") <= date(current_date)
and "drug-id" in {drgs}
group by
"drug-id"
""".format( drgs=drgs)
drugs_searched = rs_db.get_df(s2)
npi_drug_list = npi_drug_list.merge(drugs_searched,on = 'drug-id', how ='left')
npi_drug_list['system-searched-qty-last-90-days'] = npi_drug_list['system-searched-qty-last-90-days'].fillna(0)
npi_drug_list['liquidation-index'] = npi_drug_list['system-sales-qty-last-90-days']*0.8+npi_drug_list['system-searched-qty-last-90-days']*0.2
npi_drug_list['liquidation-index'] = npi_drug_list['liquidation-index'].fillna(0)
npi_drug_list['liquidation-index'] = npi_drug_list['liquidation-index'] - npi_drug_list['liquidation-index'].min()
npi_drug_list['liquidation-index'] = npi_drug_list['liquidation-index']/npi_drug_list['liquidation-index'].max()
npi_drug_list['liquidation-index'] = npi_drug_list['liquidation-index'].astype(float)*npi_drug_list['total-value'].astype(float)
# GA drugs inclusion flag
if int(goodaid_inclusion_flag) == 0:
logger.info('removing GA drugs')
goodaid_drug_query = '''
select
d.id as "drug-id"
from
"prod2-generico".drugs d
where
d."company-id" = 6984
'''
goodaid_drugs = rs_db.get_df(goodaid_drug_query)
goodaid_drug_id = tuple(map(int, goodaid_drugs['drug-id'].unique()))
npi_drug_list = npi_drug_list[~npi_drug_list['drug-id'].isin(goodaid_drug_id)]
logger.info('removed GA drugs')
else:
logger.info('not removing GA drugs')
if int(cold_chain_flag) == 0:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain']==0]
logger.info('removing cold chain products')
elif int(cold_chain_flag) == 2:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 1]
logger.info('considering only cold chain products')
else:
logger.info('Not caring whether cold chain items are added or not')
# merging prod and DSS to avoid duplicate entries
npi_drug_list = npi_drug_list.merge(store_drug_prod, how='left', on=['store-id', 'drug-id'])
# merging with completed stores
npi_drug_list = npi_drug_list.merge(store_completed, how='inner', on=['store-id'])
# replaceing null with 0 and extracting 35 rows
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy == 0]
# merging with audit drugs to avoid audit drugs entry
npi_drug_list = npi_drug_list.merge(audit_drug_prod, how='left', on=['store-id', 'drug-id'])
# replaceing null with 0 and extracting 35 rows
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy_audit == 0]
npi_drug_list=npi_drug_list[~npi_drug_list['type'].isin(['discontinued-products','banned'])]
choice = [npi_drug_list['type'] == 'high-value-ethical',
npi_drug_list['type'] == 'ethical',
npi_drug_list['type'] == 'generic',
npi_drug_list['type'] == 'ayurvedic',
npi_drug_list['type'] == 'surgical',
npi_drug_list['type'] == 'category-4',
npi_drug_list['type'] == 'otc',
npi_drug_list['type'] == 'general',
npi_drug_list['type'] == 'baby-food',
npi_drug_list['type'] == 'baby-product',
npi_drug_list['type'] == 'glucose-test-kit',
npi_drug_list['type'] == 'discontinued-products',
npi_drug_list['type'] == 'banned']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
npi_drug_list['sort-type'] = np.select(choice, select, default=999)
npi_drug_list.sort_values(['store-id', 'liquidation-index', 'sort-type', 'pack-form', 'drug-name'],
ascending=[True, False, True, True, True], inplace=True)
# Adding decided SKU (18 - parameter - sku_to_add_daily) per day
npi_drug_list_franchisee = npi_drug_list[npi_drug_list['franchisee-id']!=1]
npi_drug_list_coco = npi_drug_list[npi_drug_list['franchisee-id']==1]
final_list_franchisee = npi_drug_list_franchisee.groupby('store-id').head(fofo_sku_to_add_daily).reset_index(drop=True)
final_list_coco = npi_drug_list_coco.groupby('store-id').head(sku_to_add_daily).reset_index(drop=True)
final_list = pd.concat([final_list_franchisee,final_list_coco],sort = True).reset_index(drop=True)
# Capping quantity to decided number for outside mumbai(70 - Paramenter - quantity_cap)
final_list['total-quantity'] = final_list['total-quantity'].astype(float)
final_list['cum_sum_quantity_per_store'] = final_list.groupby(['store-id'])['total-quantity'].cumsum()
# Atleast one sku should be added
final_list['sku_rank'] = final_list.groupby(['store-id']).cumcount()+1
# Adding city ids
# Mumbai citi ids - 1 - Mumbai, 3 - Thane, 2 - Navi Mumbai
store_ids = tuple(map(int,final_list['store-id'].unique()))
additng_city_id_query = """
select
s.id as "store-id",
s."city-id",
zc."name" as "city-name"
from
"prod2-generico".stores s
left join "prod2-generico"."zeno-city" zc
on
s."city-id" = zc.id
where s.id in {store_ids}
""".format(store_ids=store_ids + (0,0))
additng_city_id = rs_db.get_df(additng_city_id_query)
final_list = final_list.merge(additng_city_id,how = 'left', on = 'store-id')
final_list['city-id'] = final_list['city-id'].astype(int)
conditions = [final_list['city-id'].isin([1,2,3]),final_list['sku_rank']==1,final_list['franchisee-id']!=1,final_list['sku_rank']!=1]
choices = [1,1,1,final_list['cum_sum_quantity_per_store']]
final_list['quantity_cap_index'] = np.select(conditions, choices, default = 0)
final_list = final_list[((final_list['franchisee-id']==1) & (final_list['quantity_cap_index']<quantity_cap))|((final_list['franchisee-id']!=1) & (final_list['quantity_cap_index']<fofo_quantity_cap))]
logger.info(f'for outside mumbai cities quantity is capped to {quantity_cap}')
final_list['created-date'] = cur_date
final_list['created-by'] = '[email protected]'
final_list_npi = final_list[['store-id', 'drug-id','total-quantity']]
final_list_npi.rename(columns = {'total-quantity':'quantity'},inplace =True)
expected_data_length_insert = len(final_list_npi)
logger.info("mySQL - Resulted data length after insert should be is {}".format(expected_data_length_insert))
schema = 'prod2-generico'
table_name = 'excess-removal'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
status1 = False
status2 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
s3.write_df_to_db(df=final_list[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status1 = True
if status1:
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# inserting data into prod
logger.info("mySQL - Insert starting")
final_list_npi.to_sql(name='excess-drugs', con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
status2 = True
mysql_write.close()
npi_added_uri = s3.save_df_to_s3(df=final_list, file_name='excess_removal_details_{}.csv'.format(cur_date))
if status2 is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[npi_added_uri])
rs_db.close_connection()
mysql_read.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/excess_inventory/excess_removal.py | excess_removal.py |
""
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-dl', '--doh_limit', default=45, type=int, required=False,
help="Excess Parameter - DOH limit to qualify as excess")
parser.add_argument('-wvl', '--workcell_value_limit', default=100, type=int, required=False,
help="Excess Parameter - workcell_value_limit to qualify as excess")
parser.add_argument('-qcei', '--quantity_cap_to_qualify_as_excess_inventory', default=2, type=int, required=False,
help = "Excess Parameter - Store must have atleast this much quantity to qualify as excess")
parser.add_argument('-meqc', '--minimum_excess_quantity_cap', default=2, type=int, required=False,
help = "Excess Parameter - excess quantity should be >= minimum_excess_quantity_cap ")
parser.add_argument('-mx', '--max_times_x', default=1.2, type=int, required=False,
help = "Excess Parameter - Either/or - Cushion over Max to qualify as excess")
parser.add_argument('-mpxd', '--max_plux_x_doh', default=10, type=int, required=False,
help = "Excess Parameter - Either/or - Cushion over Max to qualify as excess")
# parser.add_argument('-md', '--minimum_doh', default=30, type=int, required=False,
# help = "less Parameter - DOH below which inventory will be qualified as less")
parser.add_argument('-ccof', '--categorise_coco_only_flag', default=1, type=int, required=False,
help = "1 = Only COCO store, 0 = All")
parser.add_argument('-msa', '--minimum_store_age', default=180, type=int, required=False,
help = "Minimum Store age to categorise excess inventory")
parser.add_argument('-msda', '--minimum_store_drug_age', default=45, type=int, required=False,
help = "Minimum Store_drug_age to categorise excess inventory")
parser.add_argument('-nec', '--near_expiry_cap', default=90, type=int, required=False,
help = "Inventory above near expiry will only be considered for categorisation")
parser.add_argument('-flsa', '--fofo_launch_store_age', default=90, type=int, required=False,
help = "fofo store age for excess type launch_stock>fofo_launch_doh_limit")
parser.add_argument('-fldl', '--fofo_launch_doh_limit', default=180, type=int, required=False,
help = "fofo store reove launch_stock where quantity>fofo_launch_doh_limit")
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
doh_limit = args.doh_limit
workcell_value_limit = args.workcell_value_limit
quantity_cap_to_qualify_as_excess_inventory = args.quantity_cap_to_qualify_as_excess_inventory
minimum_excess_quantity_cap = args.minimum_excess_quantity_cap
max_times_x = args.max_times_x
max_plux_x_doh = args.max_plux_x_doh
# minimum_doh = args.minimum_doh
categorise_coco_only_flag = args.categorise_coco_only_flag
minimum_store_age = args.minimum_store_age
minimum_store_drug_age = args.minimum_store_drug_age
near_expiry_cap = args.near_expiry_cap
fofo_launch_store_age = args.fofo_launch_store_age
fofo_launch_doh_limit = args.fofo_launch_doh_limit
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("doh_limit - " + str(doh_limit))
logger.info("workcell_value_limit - " + str(workcell_value_limit))
logger.info("quantity_cap_to_qualify_as_excess_inventory - " + str(quantity_cap_to_qualify_as_excess_inventory))
logger.info("minimum_excess_quantity_cap - " + str(minimum_excess_quantity_cap))
logger.info("max_times_x - " + str(max_times_x))
logger.info("max_plux_x_doh - " + str(max_plux_x_doh))
# logger.info("minimum_doh - " + str(minimum_doh))
logger.info("categorise_coco_only_flag - " + str(categorise_coco_only_flag))
logger.info("minimum_store_age - " + str(minimum_store_age))
logger.info("minimum_store_drug_age - " + str(minimum_store_drug_age))
logger.info("near_expiry_cap - " + str(near_expiry_cap))
logger.info("fofo_launch_store_age - " + str(fofo_launch_store_age))
logger.info("fofo_launch_doh_limit - " + str(fofo_launch_doh_limit))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
# Fetching Inventory data
inventory_query = f"""
select
i."store-id" ,
i."drug-id",
sum(i.quantity) as "quantity",
sum(i.quantity * i."purchase-rate") as "workcell-value"
from "prod2-generico"."prod2-generico"."inventory-1" i
where i."quantity" > 0
and i."franchisee-inventory" = 0
and i."expiry" > dateadd(day,{near_expiry_cap},getdate())
group by
i."store-id" ,
i."drug-id"
"""
inventory = rs_db.get_df(inventory_query)
logger.info('fetched inventory data')
# Fetching Stores data
stores_query = """
select
s.id as "store-id",
s."name" as "store-name",
case
when s."franchisee-id" = 1 then 'COCO'
else 'FOFO'
end as "franchise-tag",
case
when s."opened-at" is null
or s."opened-at" = '0101-01-01 00:00:00.000' then 0
else datediff(day,
date(s."opened-at"),
current_date)
end as "store-age",
zc."name" as "city-name"
from
"prod2-generico"."prod2-generico".stores s
left join "prod2-generico"."prod2-generico".franchisees f
on
s."franchisee-id" = f.id
left join "prod2-generico"."prod2-generico"."zeno-city" zc
on
s."city-id" = zc.id
left join "prod2-generico"."prod2-generico"."zeno-indian-states" zis
on
zc."indian-state-id" = zis.id
"""
stores = rs_db.get_df(stores_query)
inventory = inventory.merge(stores, how = 'left', on = 'store-id')
logger.info('fetched stores data')
# Fetching drugs data
drugs_query = """
select
d.id as "drug-id",
d."drug-name" ,
d."type" as "drug-type",
case when d."company-id" = 6984 then 'Goodaid' else 'non-Goodaid' end as "goodaid-flag"
from
"prod2-generico"."prod2-generico".drugs d
"""
drugs = rs_db.get_df(drugs_query)
inventory = inventory.merge(drugs, how = 'left', on = 'drug-id')
logger.info('fetched drugs data')
# Fetching store-drug-age
store_drug_age_query = """
select
s."store-id" ,
s."drug-id" ,
datediff(day,min(s."created-at"),current_date) as "store-drug-age"
from
"prod2-generico".sales s
where
s."bill-flag" = 'gross'
group by
s."store-id" ,
s."drug-id"
"""
store_drug_age = rs_db.get_df(store_drug_age_query)
inventory = inventory.merge(store_drug_age, how = 'left', on = ['store-id','drug-id'])
inventory['store-drug-age'] = inventory['store-drug-age'].fillna(1)
logger.info('fetched store_drug_age data')
#Fetching sales data
sales_query = """
select
s."store-id" ,
s."drug-id" ,
sum(s.quantity) as "last-90-days-sales-quantity"
from
"prod2-generico"."prod2-generico".sales s
where
date(s."created-at") > current_date - 90
group by
s."store-id" ,
s."drug-id"
"""
sales = rs_db.get_df(sales_query)
inventory = inventory.merge(sales, how = 'left', on = ['store-id','drug-id'])
inventory['last-90-days-sales-quantity-orignal'] = inventory['last-90-days-sales-quantity']
logger.info('fetched sales data')
# Fetching store-drug availibility percentage
store_drug_availibility_query = """
select
oosdl."store-id" ,
oosdl."drug-id" ,
-- sum("oos-count") as "oos-count",
-- sum("drug-count") as "drug-count",
1 - (sum("oos-count")/ sum("drug-count")) as "availibility_percentage"
from
"prod2-generico"."out-of-shelf-drug-level" oosdl
where
"closing-date" >= current_date - 90
and "max-set" = 'Y'
group by
oosdl."store-id" ,
oosdl."drug-id"
"""
store_drug_availibility = rs_db.get_df(store_drug_availibility_query)
inventory = inventory.merge(store_drug_availibility, how = 'left', on = ['store-id','drug-id'])
inventory['availibility_percentage'] = inventory['availibility_percentage'].fillna(1)
logger.info('fetched store_drug_availibility data')
# Calculating sales based doh
inventory['last-90-days-sales-quantity'] = inventory['last-90-days-sales-quantity'].fillna(0)
inventory['last-90-days-sales-quantity'] = inventory['last-90-days-sales-quantity'] /inventory['availibility_percentage']
inventory['last-90-days-sales-quantity'] = inventory['last-90-days-sales-quantity'].astype(float)
inventory['sales-demand-daily'] = inventory['last-90-days-sales-quantity']*1.0/90.0
inventory['doh-based-on-sales'] = inventory['quantity']/inventory['sales-demand-daily']
inventory['store-drug'] = inventory['store-id'].astype(str) + '-' + inventory['drug-id'].astype(str)
store_drug_list = tuple(map(str,inventory['store-drug'].unique()))
logger.info('calculated sales based doh')
# Fetching IPC forecast data
ipc_forecast_query = f"""
select
iss."store-id" ,
iss."drug-id" ,
iss.fcst ,
iss.std
from
"prod2-generico"."ipc2-safety-stock" iss
inner join (
select
"store-id" ,
max("reset-date") as latest_reset
from
"prod2-generico"."ipc2-safety-stock" iss
group by
"store-id"
) as sq
on
iss."store-id" = sq."store-id"
and iss."reset-date" = sq.latest_reset
and concat(iss."store-id", CONCAT('-', iss."drug-id")) in {store_drug_list}
"""
ipc_forecast = rs_db.get_df(ipc_forecast_query)
inventory = inventory.merge(ipc_forecast, how = 'left', on = ['store-id','drug-id'])
logger.info('fetched ipc_forecast data')
# calculating fcst based doh
inventory['fcst'] = inventory['fcst'].fillna(0)
inventory['fcst-demand-daily'] = inventory['fcst']/28
inventory['doh-based-on-fcst'] = inventory['quantity']/inventory['fcst-demand-daily']
logger.info('calculated fcst based doh')
# deciding fcst vs sales based doh
conditions = [inventory['doh-based-on-fcst']<inventory['doh-based-on-sales'],inventory['doh-based-on-fcst']>=inventory['doh-based-on-sales']]
choice = [inventory['doh-based-on-fcst'],inventory['doh-based-on-sales']]
choice2 = [inventory['fcst-demand-daily'],inventory['sales-demand-daily']]
choice3 = ['fcst-based','sales-history-based']
inventory['doh'] = np.select(conditions,choice, default=0)
inventory['demand-daily'] = np.select(conditions,choice2, default=0)
inventory['doh-type'] = np.select(conditions,choice3, default=0)
logger.info('decided fcst vs sales based doh')
# Fetching DOI data
doi_query = f"""
select
doi."store-id" ,
doi."drug-id" ,
doi.min,
doi.max ,
doi."safe-stock"
from
"prod2-generico"."drug-order-info" doi
where
concat(doi."store-id", CONCAT('-', doi."drug-id")) in {store_drug_list}
"""
doi = rs_db.get_df(doi_query)
inventory = inventory.merge(doi,how='left',on = ['store-id','drug-id'])
inventory['max-time-x-original'] = (inventory['max']*max_times_x).apply(np.ceil)
inventory['max-plus-x-doh'] = inventory['max']+ (inventory['fcst-demand-daily']*1).apply(np.ceil)
# Cushion over Max Eiether x% or x DOH which ever is higher
conditions = [inventory['max-time-x-original']>=inventory['max-plus-x-doh'],inventory['max-time-x-original']<inventory['max-plus-x-doh']]
choice = [inventory['max-time-x-original'],inventory['max-plus-x-doh']]
choice2 = ['percentage over max','DOH over max']
inventory['max-cushion-type'] = np.select(conditions,choice2)
inventory['max-time-x'] = np.select(conditions,choice)
logger.info('fetched doi data')
# Fetching NPI data
npi_query = """
select
nias."store-id" ,
nias."drug-id" ,
'1' as "in-npi"
from "prod2-generico"."prod2-generico"."npi-inventory-at-store" nias
where
nias."inventory-type" = 'Rotate'
group by
nias."store-id" ,
nias."drug-id"
"""
npi = rs_db.get_df(npi_query)
inventory = inventory.merge(npi , how = 'left', on = ['store-id','drug-id'])
logger.info('fetched npi data')
# Categorising inventory based on DOH and MAX
conditions = [(inventory['doh']>=doh_limit) & (inventory['quantity']>inventory['max-time-x']) &(inventory['quantity']>quantity_cap_to_qualify_as_excess_inventory)]
choice = ['excess']
inventory['excess-flag'] = np.select(conditions,choice, default='ok')
# Identifying excess inventory
inventory['excess-def1'] = (inventory['doh']-doh_limit)*inventory['demand-daily']
inventory['excess-def2'] = (inventory['quantity']-inventory['max-time-x'])
inventory['excess-defmin'] = inventory[['excess-def1','excess-def2']].min(axis=1)
# Calculating immediate stock transfer opportunity
# inventory['less-def1'] = (minimum_doh - inventory['doh'])*inventory['demand-daily']
# inventory['less-defmin'] = inventory['less-def1']
# Defining excess quantity and value
conditions = [(inventory['excess-flag']=='excess')]
choice = [inventory['excess-defmin']]
inventory['excess-quantity'] = np.select(conditions,choice, default=0)
inventory['excess-quantity'] = inventory['excess-quantity'].apply(np.floor)
inventory['excess-value'] = (inventory['workcell-value'].astype(float)/inventory['quantity'].astype(float))*inventory['excess-quantity'].astype(float)
# Excess value should be greater than workcell value limit
conditions = [(inventory['excess-flag']=='excess') & (inventory['excess-value']>= workcell_value_limit),
(inventory['excess-flag']=='excess') & (inventory['excess-value']< workcell_value_limit),
(inventory['excess-flag']!='excess')]
choice = ['excess','ok',inventory['excess-flag']]
choice1 =[inventory['excess-quantity'],0,inventory['excess-quantity']]
choice2 = [inventory['excess-value'],0,inventory['excess-value']]
inventory['excess-flag'] = np.select(conditions,choice, default='ok')
inventory['excess-quantity'] = np.select(conditions,choice1, default=0)
inventory['excess-value'] = np.select(conditions,choice2, default=0)
# Excess quantity should be greater than minimum_excess_quantity_cap
conditions = [(inventory['excess-flag']=='excess') & (inventory['excess-quantity']>= minimum_excess_quantity_cap),
(inventory['excess-flag']=='excess') & (inventory['excess-quantity']< minimum_excess_quantity_cap),
(inventory['excess-flag']!='excess')]
choice = ['excess','ok',inventory['excess-flag']]
choice1 =[inventory['excess-quantity'],0,inventory['excess-quantity']]
choice2 = [inventory['excess-value'],0,inventory['excess-value']]
inventory['excess-flag'] = np.select(conditions,choice, default='ok')
inventory['excess-quantity'] = np.select(conditions,choice1, default=0)
inventory['excess-value'] = np.select(conditions,choice2, default=0)
logger.info('categorised inventory with flags - excess/less/ok')
# Immediate Stock transfer opportunity
# Immediate implies -Void in network stores (Void = (minimum_doh - inventory['doh'])*inventory['demand-daily'])
# df4 = pd.pivot_table(inventory,
# values=['excess-quantity','excess-value'],
# index=['drug-id'],
# columns=['excess-flag'],
# aggfunc=np.sum).reset_index()
# df4.columns = ["-".join(x) for x in df4.columns.ravel()]
#
# df4 = df4.reset_index(drop = True)
# del df4['excess-quantity-ok']
# del df4['excess-value-ok']
#
# df4 = df4[(df4['excess-quantity-excess']>0) | (df4['excess-quantity-less']>0)]
# df4.reset_index(drop = True, inplace = True)
#
# df4.loc[df4['excess-quantity-excess']>=df4['excess-quantity-less'] , 'qty-stock-transfer-opportunity-immediate'] = df4['excess-quantity-less']
# df4.loc[df4['excess-quantity-excess']<df4['excess-quantity-less'] , 'qty-stock-transfer-opportunity-immediate'] = df4['excess-quantity-excess']
#
# df4.loc[df4['excess-quantity-excess']>=df4['excess-quantity-less'] , 'value-stock-transfer-opportunity-immediate'] = df4['excess-value-less']
# df4.loc[df4['excess-quantity-excess']<df4['excess-quantity-less'] , 'value-stock-transfer-opportunity-immediate'] = df4['excess-value-excess']
#
# df4.rename(columns={'drug-id-': 'drug-id'},inplace=True)
# df4.columns
# inventory = inventory.merge(df4[['drug-id','qty-stock-transfer-opportunity-immediate',
# 'value-stock-transfer-opportunity-immediate' ]] , how = 'left', on = ['drug-id'])
#
# logger.info('calculated immediate stock transfer opportunity')
# logger.info("")
# logger.info('whole network level data')
# logger.info(f"Excess Inventory - {sum(inventory[inventory['excess-flag']=='excess']['excess-value'])}")
# logger.info(f"Excess Inventory with NPI - {sum(inventory[(inventory['excess-flag']=='excess') & (inventory['in-npi']=='1')]['excess-value'])}")
# logger.info(f"Excess Inventory without NPI - {sum(inventory[(inventory['excess-flag']=='excess') & (inventory['in-npi']!='1')]['excess-value'])}")
# logger.info(f"stock-transfer-opportunity - {sum(df4['value-stock-transfer-opportunity-immediate'].fillna(0))}")
# logger.info("")
# Network level sales
network_sales = sales.groupby(['drug-id']).sum().reset_index()[['drug-id','last-90-days-sales-quantity']]
network_sales.rename(columns={'last-90-days-sales-quantity': 'network-level-last-90-days-sales-quantity'},inplace=True)
network_sales[['drug-id','network-level-last-90-days-sales-quantity']] = network_sales[['drug-id','network-level-last-90-days-sales-quantity']].astype(int)
inventory = inventory.merge(network_sales , how = 'left', on = ['drug-id'])
logger.info('added network level sales')
inventory = inventory[inventory['excess-flag'] == 'excess']
inventory = inventory[inventory['store-age']>minimum_store_age]
inventory = inventory[inventory['store-drug-age']>minimum_store_drug_age]
if int(categorise_coco_only_flag) == 1:
inventory = inventory[inventory['franchise-tag']=='COCO']
inventory = inventory[inventory['in-npi'].isna()]
inventory = inventory[inventory['last-90-days-sales-quantity']!= 0] # Potential NPI, so not adding in excess
# inventory['qty-stock-transfer-opportunity-immediate'] = inventory['qty-stock-transfer-opportunity-immediate'].fillna(0)
# inventory['value-stock-transfer-opportunity-immediate'] = inventory['value-stock-transfer-opportunity-immediate'].fillna(0)
inventory['network-level-last-90-days-sales-quantity'] = inventory['network-level-last-90-days-sales-quantity'].fillna(0)
# conditions = [inventory['qty-stock-transfer-opportunity-immediate']>0]
# choice = [1]
# inventory['immediate-stock-transfer-opportunity-flag'] = np.select(conditions,choice, default=0)
conditions = [inventory['network-level-last-90-days-sales-quantity']>0,inventory['network-level-last-90-days-sales-quantity']<=0]
choice = ['Rotate','Return']
inventory['inventory-type'] = np.select(conditions,choice, default=0)
logger.info('End : COCO Excess categorisation')
# Adding Launch stock DOH > 180 In FOFO
# Fetching fofo Inventory data
logger.info('start : Adding Launch stock DOH > 180 In FOFO')
fofo_launch_inventory_query = f"""
select
i."store-id" ,
i."drug-id",
sum(i.quantity) as "quantity",
sum(i.quantity * i."purchase-rate") as "workcell-value"
from
"prod2-generico"."inventory-1" i
left join "prod2-generico".invoices inv on
i."invoice-id" = inv.id
left join "prod2-generico"."invoices-1" i2
on i."franchisee-invoice-id" = i2.id
left join "prod2-generico"."stores" s on
s."id" = i."store-id"
where
i."quantity" > 0
and s."franchisee-id" != 1 -- fofo
and i."franchisee-inventory" = 0 -- workcell inventory
and (inv."invoice-date") < (s."opened-at") -- launch stock
and i2."franchisee-invoice" = 0 -- workcell invoice
group by
i."store-id" ,
i."drug-id"
"""
fofo_launch_inventory = rs_db.get_df(fofo_launch_inventory_query)
logger.info('fetched fofo_launch_inventory data')
fofo_launch_inventory = fofo_launch_inventory.merge(stores, how = 'left', on = 'store-id')
fofo_launch_inventory = fofo_launch_inventory.merge(drugs, how = 'left', on = 'drug-id')
fofo_launch_inventory = fofo_launch_inventory.merge(store_drug_age, how = 'left', on = ['store-id','drug-id'])
fofo_launch_inventory = fofo_launch_inventory.merge(sales, how = 'left', on = ['store-id','drug-id'])
fofo_launch_inventory['last-90-days-sales-quantity-orignal'] = fofo_launch_inventory['last-90-days-sales-quantity']
fofo_launch_inventory = fofo_launch_inventory.merge(store_drug_availibility, how = 'left', on = ['store-id','drug-id'])
fofo_launch_inventory['availibility_percentage'] = fofo_launch_inventory['availibility_percentage'].fillna(1)
fofo_launch_inventory['last-90-days-sales-quantity'] = fofo_launch_inventory['last-90-days-sales-quantity'].fillna(0)
fofo_launch_inventory['sales-demand-daily'] = fofo_launch_inventory['last-90-days-sales-quantity']*1.0/90.0
fofo_launch_inventory['doh-based-on-sales'] = fofo_launch_inventory['quantity']/fofo_launch_inventory['sales-demand-daily']
fofo_launch_inventory = fofo_launch_inventory.merge(ipc_forecast, how = 'left', on = ['store-id','drug-id'])
fofo_launch_inventory = fofo_launch_inventory.merge(doi,how='left',on = ['store-id','drug-id'])
fofo_launch_inventory['max-time-x'] = (fofo_launch_inventory['max']*max_times_x).apply(np.ceil)
# fofo_launch_inventory['max-plus-x-doh'] = fofo_launch_inventory['max'] + (fofo_launch_inventory['fcst-demand-daily']*1).apply(np.ceil)
# Cushion over Max Eiether x% or x DOH which ever is higher
# conditions = [fofo_launch_inventory['max-time-x-original']>=fofo_launch_inventory['max-plus-x-doh'],fofo_launch_inventory['max-time-x-original']<fofo_launch_inventory['max-plus-x-doh']]
# choice = [fofo_launch_inventory['max-time-x-original'],fofo_launch_inventory['max-plus-x-doh']]
# choice2 = ['percentage over max','DOH over max']
# fofo_launch_inventory['max-cushion-type'] = np.select(conditions,choice2)
# fofo_launch_inventory['max-time-x'] = np.select(conditions,choice)
fofo_launch_inventory = fofo_launch_inventory.merge(npi , how = 'left', on = ['store-id','drug-id'])
fofo_launch_inventory = fofo_launch_inventory.merge(network_sales , how = 'left', on = ['drug-id'])
# Changing doh infinity cases to doh 2000
conditions = [fofo_launch_inventory['doh-based-on-sales']==np.inf,fofo_launch_inventory['doh-based-on-sales']!=np.inf]
choice = [2000,fofo_launch_inventory['doh-based-on-sales']]
fofo_launch_inventory['doh-based-on-sales'] = np.select(conditions,choice)
fofo_launch_inventory['excess-def1'] = (fofo_launch_inventory['doh-based-on-sales']-fofo_launch_doh_limit)*fofo_launch_inventory['sales-demand-daily']
fofo_launch_inventory['excess-def2'] = (fofo_launch_inventory['quantity']-fofo_launch_inventory['max'])
fofo_launch_inventory['excess-defmin'] = fofo_launch_inventory[['excess-def1','excess-def2']].min(axis=1)
fofo_launch_inventory = fofo_launch_inventory[fofo_launch_inventory['store-age']>fofo_launch_store_age]
fofo_launch_inventory = fofo_launch_inventory[fofo_launch_inventory['doh-based-on-sales']>fofo_launch_doh_limit]
fofo_launch_inventory = fofo_launch_inventory[fofo_launch_inventory['in-npi'].isna()]
fofo_launch_inventory = fofo_launch_inventory[fofo_launch_inventory['last-90-days-sales-quantity']!= 0]
fofo_launch_inventory['excess-flag'] = 'fofo_launch_doh'
# Defining excess quantity and value
conditions = [(fofo_launch_inventory['excess-flag']=='fofo_launch_doh')]
choice = [fofo_launch_inventory['excess-defmin']]
fofo_launch_inventory['excess-quantity'] = np.select(conditions,choice, default=0)
fofo_launch_inventory['excess-quantity'] = fofo_launch_inventory['excess-quantity'].apply(np.floor)
fofo_launch_inventory['excess-value'] = (fofo_launch_inventory['workcell-value'].astype(float)/fofo_launch_inventory['quantity'].astype(float))*fofo_launch_inventory['excess-quantity'].astype(float)
fofo_launch_inventory['inventory-type'] = 'Rotate_fofo_launch_doh'
logger.info('end : Adding Launch stock DOH > 180 In FOFO')
inventory['max-with-cushion'] = inventory['max-time-x']
fofo_launch_inventory['max-with-cushion'] = fofo_launch_inventory['max-time-x']
categorisation = pd.concat([inventory,fofo_launch_inventory])
categorisation['created-at'] = cur_date
categorisation['created-by'] = '[email protected]'
categorisation['updated-at'] = cur_date
categorisation['updated-by'] = '[email protected]'
# =============================================================================
# Snapshot Queries
# =============================================================================
truncate_sns_query = '''
delete from "prod2-generico"."excess-inventory-categorisation-sns"
where "snapshot-date" = CURRENT_DATE +1
'''
insert_sns_query = '''
insert
into
"prod2-generico"."excess-inventory-categorisation-sns"
select
CURRENT_DATE + 1 as "snapshot-date",
"inventory-type",
"store-id",
"store-name",
"drug-type",
"goodaid-flag" ,
sum("excess-quantity") as "excess-quantity" ,
sum("excess-value") as "excess-value"
from
"prod2-generico"."excess-inventory-categorisation"
group by
"inventory-type",
"store-id",
"store-name",
"drug-type",
"goodaid-flag"
'''
# =============================================================================
# Writing table to RS
# =============================================================================
try:
schema = 'prod2-generico'
table_name = 'excess-inventory-categorisation'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}"
'''
rs_db.execute(truncate_query)
logger.info(str(table_name) + ' table old data deleted')
s3.write_df_to_db(df=categorisation[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
rs_db.execute(truncate_sns_query)
rs_db.execute(insert_sns_query)
logger.info(str(table_name) + 'snapshot' + ' table uploaded')
status = True
except Exception as error:
status = False
raise Exception(error)
if status is True:
mssg = 'Success'
else:
mssg = 'Failed'
# =============================================================================
# Sending Email
# =============================================================================
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{mssg} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/excess_inventory/excess_inventory_categorisation.py | excess_inventory_categorisation.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
import pandas as pd
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-st', '--source_table', default="", type=str, required=False)
parser.add_argument('-tt', '--target_table', default="", type=str, required=False)
parser.add_argument('-ss', '--source_schema', default="", type=str, required=False)
parser.add_argument('-ts', '--target_schema', default="", type=str, required=False)
parser.add_argument('-b', '--batch_size', default=1000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
source_table = args.source_table
target_table = args.target_table
source_schema = args.source_schema
target_schema = args.target_schema
batch_size = args.batch_size
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
tr_query = f""" truncate table "{target_schema}"."{target_table}" """
rs_db.execute(tr_query)
pg_obj = PostGre()
pg_obj.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=target_table, schema=target_schema)
columns = list(table_info['column_name'])
incomplete = True
last_id = None
total_pushed = 0
total_count = f"""select count(id) from "{source_schema}"."{source_table}" ;"""
df_count = pd.read_sql_query(total_count, pg_obj.connection)
count = df_count.values[0]
counter = 1
while incomplete:
logger.info("iteration no: {}".format(counter))
limit_str = f" limit {batch_size} " if batch_size else ""
filter_str = f" where id > {last_id} " if last_id else ""
query = f"""
select *
from
"{source_schema}"."{source_table}"
{filter_str}
order by id asc
{limit_str} ;
"""
df = pd.read_sql_query(query, pg_obj.connection)
if df.empty:
incomplete = False
else:
last_id = int(df['id'].values[-1])
df.drop(columns=['id'], inplace=True)
df.columns = [c.replace('_', '-') for c in df.columns]
logger.info("writing batch to target table")
s3.write_df_to_db(df=df, table_name=target_table, db=rs_db, schema=target_schema)
logger.info("batch successfully written to target table")
total_pushed += batch_size
if total_pushed >= count:
incomplete = False
counter = counter + 1
rs_db.close_connection()
pg_obj.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/postgres_to_rs_table_migration/postgres-to-rs-table-migration.py | postgres-to-rs-table-migration.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, PostGreWrite
from zeno_etl_libs.helper.aws.s3 import S3
import pandas as pd
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-st', '--source_table', default="", type=str, required=False)
parser.add_argument('-tt', '--target_table', default="", type=str, required=False)
parser.add_argument('-ss', '--source_schema', default="", type=str, required=False)
parser.add_argument('-ts', '--target_schema', default="", type=str, required=False)
parser.add_argument('-b', '--batch_size', default=1000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
source_table = args.source_table
target_table = args.target_table
source_schema = args.source_schema
target_schema = args.target_schema
batch_size = args.batch_size
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
pg_obj = PostGreWrite()
pg_obj.open_connection()
print(f"{target_schema}.{target_table}")
tr_query = f"""delete from {target_table};"""
pg_obj.engine.execute(tr_query)
s3 = S3()
incomplete = True
last_id = None
total_pushed = 0
total_count = f"""select count(id) from "{source_schema}"."{source_table}" ;"""
df_count = pd.read_sql_query(total_count, rs_db.connection)
count = df_count.values[0]
counter = 1
while incomplete:
logger.info("iteration no: {}".format(counter))
limit_str = f" limit {batch_size} " if batch_size else ""
filter_str = f" where id > {last_id} " if last_id else ""
query = f"""
select
id,
"store-id" as store_id,
"drug-type" as "type",
NULL as store_name,
NULL as dc,
"forward-dc-id" as dc_id,
"created-at" as uploaded_at
from
"{source_schema}"."{source_table}"
{filter_str}
order by id asc
{limit_str} ;
"""
df = rs_db.get_df(query=query)
if df.empty:
incomplete = False
else:
last_id = int(df['id'].values[-1])
df.drop(columns=['id'], inplace=True)
df.columns = [c.replace('_', '-') for c in df.columns]
logger.info("writing batch to target table")
df.to_sql(
name='store_dc_mapping', con=pg_obj.engine, if_exists='append',
chunksize=500, method='multi', index=False)
logger.info("batch successfully written to target table")
total_pushed += batch_size
if total_pushed >= count:
incomplete = False
counter = counter + 1
rs_db.close_connection()
pg_obj.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/postgres_to_rs_table_migration/rs_to_postgre_migration.py | rs_to_postgre_migration.py |
import argparse
import datetime
import json
import os
import sys
from datetime import datetime as dt
from datetime import timedelta
import dateutil
import pandas as pd
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
# connections
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# table info
schema = 'prod2-generico'
table_name = 'retention-day-quarter'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
########################################################
# BILLING data (lengthy data-set)
########################################################
# order id to patient id
q1 = """
SELECT
"patient-id",
"store-id",
"id" AS bill_id,
"created-at" AS bill_created_at
FROM "prod2-generico"."bills-1"
"""
rs_db.execute(q1, params=None)
data_bill: pd.DataFrame = rs_db.cursor.fetch_dataframe()
data_bill.columns = [c.replace('-', '_') for c in data_bill.columns]
data_bill['bill_created_at'] = pd.to_datetime(data_bill['bill_created_at'])
# data_bill['last_bill_created_at'] = pd.to_datetime(data_bill['bill_created_at'])
data_bill['bill_date'] = pd.to_datetime(data_bill['bill_created_at'].dt.date)
data_bill['year_bill'] = data_bill['bill_date'].dt.year
data_bill['month_bill'] = data_bill['bill_date'].dt.month
logger.info("Data for bills fetched, with length {}".format(len(data_bill)))
# First take only first bill in a month, others don't matter for retention
data_a = data_bill.groupby(['patient_id', 'year_bill', 'month_bill', 'store_id'])['bill_date'].min().reset_index()
# data_a = data_bill.groupby(['patient_id', 'year_bill', 'month_bill', 'store_id']).agg(
# last_bill_created_at=('last_bill_created_at', 'max'), bill_date=('bill_date', 'min')).reset_index()
logger.info("Data for bills - after taking first in month, with length {}".format(len(data_a)))
# Calculate quarters
data_a['cohort_quarter'] = data_a['bill_date'].dt.to_period("Q")
data_a['cohort_quarter_number'] = data_a['cohort_quarter'].dt.strftime('%q').astype(int)
# Min bill date in quarter
data_a = data_a.sort_values(by=['patient_id', 'year_bill', 'month_bill', 'bill_date'])
data_a['rank'] = data_a.groupby(['patient_id', 'cohort_quarter']).cumcount() + 1
data_a = data_a[data_a['rank'] == 1].copy()
# Self join is needed, to find any retention
# drop duplicates should not be needed ideally
data_a_left = data_a[
['patient_id', 'cohort_quarter', 'cohort_quarter_number', 'year_bill', 'month_bill', 'store_id']]
data_a_left = data_a_left.rename(columns={'year_bill': 'year_cohort'})
data_a_left = data_a_left.rename(columns={'month_bill': 'month_cohort'})
# drop duplicates should not be needed ideally
data_a_right = data_a[
['patient_id', 'cohort_quarter', 'cohort_quarter_number', 'year_bill', 'bill_date']]
data_a_right = data_a_right.rename(columns={'cohort_quarter': 'bill_quarter'})
data_a_right = data_a_right.rename(columns={'cohort_quarter_number': 'bill_quarter_number'})
# Self join
data = data_a_left.merge(data_a_right, how='left', on=['patient_id'])
# First day in quarter
data['day_zero_in_cohort_quarter'] = data['cohort_quarter'].dt.to_timestamp()
data['day_zero_in_bill_quarter'] = data['bill_quarter'].dt.to_timestamp()
# Convert cohort quarter to string
data['cohort_quarter'] = data['cohort_quarter'].astype(str)
data['bill_quarter'] = data['bill_quarter'].astype(str)
# Day number in quarter
data['day_index'] = (data['bill_date'] - data['day_zero_in_bill_quarter']).dt.days + 1
# Remove negative date mappings
data = data[data['bill_date'] >= data['day_zero_in_cohort_quarter']].copy()
# Official quarter diff
data['quarter_diff'] = (data['year_bill'] - data['year_cohort']) * 4 + (
data['bill_quarter_number'] - data['cohort_quarter_number'])
# We don't need history of 3+ quarters retention
data = data[data['quarter_diff'] <= 2].copy()
###############################
# Resurrection
###############################
# Now for resurrection specific
# Those who came in next quarter, are not resurrection candidates
# drop duplicates should not be needed
data_r1 = data[data['quarter_diff'] == 1][['patient_id', 'cohort_quarter']]
data_r1['resurrection_candidate'] = 0
data = data.merge(data_r1, how='left', on=['patient_id', 'cohort_quarter'])
# Rest are resurrection candidates, i.e they didnt come in next quarter,
# so we will see their resurrection, later on
data['resurrection_candidate'] = data['resurrection_candidate'].fillna(1)
logger.info("Data length is {}".format(len(data)))
# Summary numbers
data_grp = data.groupby(['cohort_quarter'])['patient_id'].apply(pd.Series.nunique).reset_index()
data_grp = data_grp.rename(columns={'patient_id': 'cohort_quarter_patients'})
# Merge with main data
data = data.merge(data_grp, how='left', on=['cohort_quarter'])
logger.info("Data length after merging with cohort base {}".format(len(data)))
# Resurrection base too
data_grp_res = data[data['resurrection_candidate'] == 1].groupby(
['cohort_quarter'])['patient_id'].apply(pd.Series.nunique).reset_index()
data_grp_res = data_grp_res.rename(columns={'patient_id': 'cohort_resurrection_candidates'})
# Merge with main data
data = data.merge(data_grp_res, how='left', on=['cohort_quarter'])
logger.info("Data length after merging with cohort base {}".format(len(data)))
# Final columns
final_cols = ['patient_id', 'cohort_quarter', 'cohort_quarter_number', 'year_cohort',
'store_id', 'bill_quarter', 'bill_quarter_number', 'year_bill',
'bill_date', 'day_zero_in_cohort_quarter',
'day_zero_in_bill_quarter', 'day_index', 'quarter_diff',
'resurrection_candidate', 'cohort_quarter_patients',
'cohort_resurrection_candidates']
data.drop_duplicates(keep="first", inplace=True)
data = data[final_cols]
data['resurrection_candidate'] = data['resurrection_candidate'].astype('Int64')
data.columns = [c.replace('_', '-') for c in data.columns]
data['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data['created-by'] = 'etl-automation'
data['updated-by'] = 'etl-automation'
data['bill-date'] = data['bill-date'].dt.date
#logger.info("Existing data fetched with length {}".format(len(data_dss)))
print(data.head(1))
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
logger.info(truncate_query)
rs_db.execute(truncate_query)
""" seek the data """
print(data.head(1))
print(table_info)
file_s3_uri_save = s3.save_df_to_s3(df=data[table_info['column_name']],
file_name="retention-day-quarter.csv")
s3.write_to_db_from_s3_csv(table_name=table_name,
file_s3_uri=file_s3_uri_save,
db=rs_db, schema=schema)
s3.write_df_to_db(df=data[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/retention-day-quarter/retention-day-quarter.py | retention-day-quarter.py |
import os
import sys
import argparse
import pandas as pd
from pandas.io.json import json_normalize
sys.path.append('../../../../../../../..')
from zeno_etl_libs.db.db import DB, MongoDB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
s3 = S3()
rs_db = DB(read_only=False)
rs_db.open_connection()
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
db = mg_client['generico-crm']
schema = 'prod2-generico'
table_name = 'zeno-app-delivery'
def max_last_date():
"""
This function helps in getting the maximum updated-at date from the
Redshift table for incremental load
"""
query = f""" select max("updated-at") as max_date from "{schema}"."{table_name}" """
df = pd.read_sql_query(query, rs_db.connection)
if df[b'max_date'][0] is None:
return "2020-01-01 00:00:00.000000"
return str(df[b'max_date'][0])
try:
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
logger.info(table_info)
collection = db['deliveryTaskGroupLog'].find({})
data_raw = pd.DataFrame(list(collection))
data_raw['tasks'] = data_raw['tasks'].apply(pd.Series)
app_data = json_normalize(data_raw['tasks'])
logger.info(app_data.columns)
collection = db['deliveryAgent'].find({})
del_agent_data_raw = pd.DataFrame(list(collection))
del_agent_data_raw.rename(columns={"id": "agent_id", "name": "agent_name"}, inplace=True)
del_agent_data = del_agent_data_raw[['agent_id', 'agent_name', 'username']]
del_agent_data.drop_duplicates(inplace=True)
# merging agent data
app_data = app_data.merge(del_agent_data, how='left', on=['agent_id'])
app_data.columns = [c.replace('_', '-') for c in app_data.columns]
app_data.rename(columns={"createdAt": "created-at", "updated-time": "updated-at"}, inplace=True)
app_data['agent-id'] = app_data['agent-id'].astype('Int64')
max_update_date = max_last_date()
logger.info(f"max update-at date: {max_update_date}")
app_data = app_data.loc[app_data['updated-at'] >= max_update_date]
s3.write_df_to_db(df=app_data[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
except Exception as error:
raise Exception(error)
finally:
rs_db.close_connection()
mg_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/zeno-app-delivery/zeno-app-delivery.py | zeno-app-delivery.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger, send_logs_via_email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
def seek(rs_db, limit=None):
cur_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
start_mtd = (datetime.date.today() + relativedelta(months=-6)).replace(day=1).strftime('%Y-%m-%d')
end_mtd = (datetime.date.today() + relativedelta(days=-1)).strftime('%Y-%m-%d')
# =============================================================================
# Store master
# =============================================================================
query = f"""
select
id as "store-id",
store as "store-name",
"store-manager" ,
"line-manager" ,
abo
from
"prod2-generico"."stores-master" sm
"""
rs_db.execute(query, params=None)
stores_master_df: pd.DataFrame = rs_db.cursor.fetch_dataframe()
logger.info("Data: stores-master fetched successfully")
# =============================================================================
# local purchase instances base data
# =============================================================================
limit_str = f" limit {limit} ; " if limit else ""
query = f"""
select
ii.id as "invoice-item-id",
date(i."received-at") as "received-date",
ii."drug-id" ,
i."store-id" ,
ii."invoice-item-reference" ,
ii."net-value" as "lp-value",
ii."actual-quantity" as "lp-qty"
from
"prod2-generico"."invoice-items-1" ii
inner join "prod2-generico"."invoices-1" i on
ii."franchisee-invoice-id" = i.id
inner join "prod2-generico".stores s on
i."store-id" = s.id
where
date(i."received-at") >= '%s'
and date(i."received-at") <= '%s'
and i."invoice-reference" is null
and s."franchisee-id" = 1
union
select
ii.id as "invoice-item-id",
date(i."received-at") as "received-date",
ii."drug-id" ,
i."store-id" ,
ii."invoice-item-reference" ,
ii."net-value" as "lp-value",
ii."actual-quantity" as "lp-qty"
from
"prod2-generico"."invoice-items-1" ii
inner join "prod2-generico"."invoices-1" i on
ii."franchisee-invoice-id" = i.id
inner join "prod2-generico".invoices i2
on
ii."invoice-id" = i2.id
inner join "prod2-generico".stores s on
i."store-id" = s.id
where
date(i."received-at") >= '%s'
and date(i."received-at") <= '%s'
and s."franchisee-id" != 1
and i."franchisee-invoice" != 0
and i2."distributor-id" != 10000
{limit_str}
""" % (start_mtd, end_mtd,start_mtd,end_mtd)
rs_db.execute(query=query, params=None)
lp_df: pd.DataFrame = rs_db.cursor.fetch_dataframe()
logger.info("Data: lp invoice fetched successfully")
lp_2_df = lp_df.groupby(['received-date', 'invoice-item-id', 'store-id', 'drug-id'],
as_index=False).agg({'lp-value': ['sum'], 'lp-qty': ['sum']}).reset_index(drop=True)
lp_2_df.columns = ["-".join(x).rstrip('-') for x in lp_2_df.columns.ravel()]
# =============================================================================
# local purchase liquidation
# =============================================================================
query = f"""
( SELECT
c."invoice-item-id",
b."store-id",
c."drug-id",
SUM(c."ptr" * a."quantity") AS "lp-sales",
SUM(a."quantity") AS "lp-qty-sales"
FROM
"prod2-generico"."bill-items-1" a
LEFT JOIN
"prod2-generico"."bills-1" b ON b."id" = a."bill-id"
LEFT JOIN
"prod2-generico"."inventory-1" c ON c."id" = a."inventory-id"
LEFT JOIN
"prod2-generico"."invoice-items-1" ii ON ii."id" = c."invoice-item-id"
JOIN
"prod2-generico"."invoices-1" i1 ON i1."id" = ii."franchisee-invoice-id"
join "prod2-generico".stores s on b."store-id" =s.id
WHERE
DATE(a."created-at") >= '{start_mtd}'
AND DATE(a."created-at") <= '{end_mtd}'
and s."franchisee-id" =1
and
i1."invoice-reference" IS null
GROUP BY c."invoice-item-id" , b."store-id" , c."drug-id"
UNION ALL SELECT
c."invoice-item-id",
b."store-id",
c."drug-id",
(SUM(c."ptr" * a."returned-quantity") * - 1) AS "lp-sales",
(SUM(a."returned-quantity") * - 1) AS "lp-qty-sales"
FROM
"prod2-generico"."customer-return-items-1" a
LEFT JOIN
"prod2-generico"."customer-returns-1" b ON b."id" = a."return-id"
LEFT JOIN
"prod2-generico"."inventory-1" c ON c."id" = a."inventory-id"
LEFT JOIN
"prod2-generico"."invoice-items-1" ii ON ii."id" = c."invoice-item-id"
JOIN
"prod2-generico"."invoices-1" i1 ON i1."id" = ii."franchisee-invoice-id"
JOIN "prod2-generico".stores s on b."store-id" =s.id
WHERE
DATE(a."returned-at") >= '{start_mtd}'
AND DATE(a."returned-at") <= '{end_mtd}'
and s."franchisee-id" =1 and
i1."invoice-reference" IS null
GROUP BY c."invoice-item-id" , b."store-id" , c."drug-id" )
union
(
SELECT
c."invoice-item-id",
b."store-id",
c."drug-id",
SUM(c."ptr" * a."quantity") AS "lp-sales",
SUM(a."quantity") AS "lp-qty-sales"
FROM
"prod2-generico"."bill-items-1" a
LEFT JOIN
"prod2-generico"."bills-1" b ON b."id" = a."bill-id"
LEFT JOIN
"prod2-generico"."inventory-1" c ON c."id" = a."inventory-id"
LEFT JOIN
"prod2-generico"."invoice-items-1" ii ON ii."id" = c."invoice-item-id"
JOIN
"prod2-generico"."invoices-1" i1 ON i1."id" = ii."franchisee-invoice-id"
join "prod2-generico".invoices i on ii."invoice-id" =i.id
join "prod2-generico".stores s on b."store-id" =s.id
WHERE
DATE(a."created-at") >= '{start_mtd}'
AND DATE(a."created-at") <='{end_mtd}'
and s."franchisee-id" !=1 and i1."franchisee-invoice" !=0
and i."distributor-id" !=10000
GROUP BY c."invoice-item-id" , b."store-id" , c."drug-id"
UNION ALL
SELECT
c."invoice-item-id",
b."store-id",
c."drug-id",
(SUM(c."ptr" * a."returned-quantity") * - 1) AS "lp-sales",
(SUM(a."returned-quantity") * - 1) AS "lp-qty-sales"
FROM
"prod2-generico"."customer-return-items-1" a
LEFT JOIN
"prod2-generico"."customer-returns-1" b ON b."id" = a."return-id"
LEFT JOIN
"prod2-generico"."inventory-1" c ON c."id" = a."inventory-id"
LEFT JOIN
"prod2-generico"."invoice-items-1" ii ON ii."id" = c."invoice-item-id"
JOIN
"prod2-generico"."invoices-1" i1 ON i1."id" = ii."franchisee-invoice-id"
join "prod2-generico".invoices i on ii."invoice-id" =i.id
JOIN "prod2-generico".stores s on b."store-id" =s.id
WHERE
DATE(a."returned-at") >= '{start_mtd}'
AND DATE(a."returned-at") <= '{end_mtd}'
and s."franchisee-id" !=1 and i."distributor-id" !=10000 and
i1."franchisee-invoice" !=0
GROUP BY c."invoice-item-id" , b."store-id" , c."drug-id")
{limit_str}
"""
rs_db.execute(query=query, params=None)
sales_df: pd.DataFrame = rs_db.cursor.fetch_dataframe()
logger.info("Data: sales fetched successfully")
sales_2_df = sales_df.groupby(['invoice-item-id', 'drug-id'], as_index=False).agg(
{'lp-sales': ['sum'], 'lp-qty-sales': ['sum']}).reset_index(drop=True)
sales_2_df.columns = ["-".join(x).rstrip('-') for x in sales_2_df.columns.ravel()]
# =============================================================================
# Drug details extraction
# =============================================================================
query = """
select
id as "drug-id",
"drug-name",
"type",
"category",
"company",
"composition"
from
"prod2-generico".drugs d
"""
rs_db.execute(query=query, params=None)
drug_df: pd.DataFrame = rs_db.cursor.fetch_dataframe()
logger.info("Data: drug fetched successfully")
# =============================================================================
# Drug disease extraction
# =============================================================================
query = """
select
"drug-id",
"drug-primary-disease" as "drug-disease"
from
"prod2-generico"."drug-primary-disease"
"""
rs_db.execute(query=query, params=None)
drug_disease_df: pd.DataFrame = rs_db.cursor.fetch_dataframe()
logger.info("Data: drug-disease fetched successfully")
# Merge all data points
local_purchase_df = pd.merge(left=stores_master_df, right=lp_2_df, on=['store-id'], how='right')
local_purchase_df = pd.merge(left=local_purchase_df, right=sales_2_df, on=['invoice-item-id', 'drug-id'],
how='left')
local_purchase_df = pd.merge(left=local_purchase_df, right=drug_df, on=['drug-id'], how='left')
lp_liquidation_df = pd.merge(left=local_purchase_df, right=drug_disease_df, how='left', on=['drug-id'])
logger.info("Merging of all data points successful.")
lp_liquidation_df['refreshed-at'] = datetime.datetime.now()
return lp_liquidation_df
def main(rs_db, s3, limit):
schema = 'prod2-generico'
table_name = 'lp-liquidation'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
""" seek the data """
lp_liquidation_df = seek(rs_db=rs_db, limit=limit)
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=lp_liquidation_df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-l', '--limit', default=None, type=int, required=False)
parser.add_argument('-jn', '--job_name', default=None, type=str, required=False)
parser.add_argument('-lem', '--log_email_to', default=None, type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
job_name = args.job_name
log_email_to = args.log_email_to.split(",")
limit = args.limit
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info(f"logger.info the env again: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
""" calling the main function """
main(rs_db=rs_db, s3=s3, limit=limit)
# Closing the DB Connection
rs_db.close_connection()
"""
Sending the job logs,
1. But if jobs fails before this step, you will not get the log email, so handle the exception
"""
send_logs_via_email(job_name=job_name, email_to=log_email_to) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/lp-liquidation/lp-liquidation.py | lp-liquidation.py |
import json
import os
import sys
from contextlib import contextmanager
from importlib import util
from inspect import getmembers, isfunction
from typing import Dict
import fire
import pandas as pd
from pandas import DataFrame
from zeno import (
DistillReturn,
ModelReturn,
ZenoParameters,
distill,
metric,
model,
zeno,
)
from zeno.api import MetricReturn, ZenoOptions
class ZenoEvals:
def __init__(
self,
results_file: str,
second_results_file: str = None,
functions_file: str = None,
):
self.dfs = {}
self.results_file = results_file
self.second_results_file = second_results_file
self.functions_file = functions_file
def generate_zeno_config(self) -> ZenoParameters:
if not os.path.exists(self.results_file):
print("ERROR: file '{}' does not exist.".format(self.results_file))
sys.exit(1)
data = []
with open(self.results_file) as f:
for line in f:
data.append(json.loads(line))
df, metric_names = read_results_file(data)
self.dfs[get_model_name(data)] = df
models = [get_model_name(data)]
if self.second_results_file is not None:
data2 = []
with open(self.second_results_file) as f:
for line in f:
data2.append(json.loads(line))
models.append(get_model_name(data2))
df2, _ = read_results_file(data2)
self.dfs[get_model_name(data2)] = df2
functions = [get_model_fn(self.dfs)]
if self.functions_file is not None:
functions = functions + parse_testing_file(self.functions_file)
base_df_columns = ["id", "prompt"]
for m in metric_names:
functions = functions + [get_metric_function(m)]
if "expected" in df.columns:
functions = functions + [get_correct_fn(self.dfs), avg_correct]
base_df_columns = base_df_columns + ["expected"]
zeno_config = ZenoParameters(
metadata=df[base_df_columns],
models=models,
functions=functions,
view="openai-chat",
data_column="prompt",
id_column="id",
cache_path="./.zeno_cache_" + data[0]["spec"]["eval_name"],
port=8080,
batch_size=100,
samples=5,
)
if "expected" in df.columns:
zeno_config.label_column = "expected"
return zeno_config
def get_model_fn(dfs: Dict[str, DataFrame]):
def model_fn(name):
model_df = dfs[name]
def mod(df, ops: ZenoOptions):
return ModelReturn(model_output=model_df["sampled"].loc[df["id"]].tolist())
return mod
return model(model_fn)
def get_correct_fn(dfs: Dict[str, DataFrame]):
def correct(df, ops: ZenoOptions):
mod = [mod for mod in list(dfs.keys()) if mod in ops.output_column][0]
return DistillReturn(distill_output=dfs[mod]["correct"].loc[df["id"]])
return distill(correct)
@contextmanager
def add_to_path(p):
old_path = sys.path
sys.path = sys.path[:]
sys.path.insert(0, p)
try:
yield
finally:
sys.path = old_path
def parse_testing_file(test_file):
# To allow relative imports in test files,
# add their directory to path temporarily.
with add_to_path(os.path.dirname(os.path.abspath(test_file))):
spec = util.spec_from_file_location(str(test_file), test_file)
test_module = util.module_from_spec(spec) # type: ignore
spec.loader.exec_module(test_module) # type: ignore
functions = []
for func_name, func in getmembers(test_module):
if isfunction(func):
if (
hasattr(func, "predict_function")
or hasattr(func, "distill_function")
or hasattr(func, "metric_function")
or hasattr(func, "inference_function")
):
functions.append(func)
return functions
def get_metric_function(metric_name):
def metric_function(df, ops: ZenoOptions):
if len(df) == 0:
return MetricReturn(metric=0.0)
if (
df[metric_name].dtype == "object"
and df[metric_name].value_counts().shape[0] <= 2
):
return MetricReturn(
metric=df[metric_name].eq(df[metric_name][0]).mul(1).mean()
)
return MetricReturn(metric=df[metric_name].mean())
metric_function.__name__ = metric_name
return metric(metric_function)
@metric
def avg_correct(df, ops: ZenoOptions):
return MetricReturn(
metric=df[ops.distill_columns["correct"]].astype(int).mean() * 100
)
def read_results_file(data):
data_res = [d for d in data if "event_id" in d]
sampling_df = pd.DataFrame(
[
{
"id": d["sample_id"],
"prompt": d["data"]["prompt"],
"sampled": d["data"]["sampled"][0],
}
for d in data_res
if "type" in d and d["type"] == "sampling"
]
)
match_df = pd.DataFrame(
[
{
"id": d["sample_id"],
"correct": d["data"]["correct"],
"expected": d["data"]["expected"],
}
for d in data_res
if "type" in d and d["type"] == "match"
]
)
metric_names = []
for d in data_res:
if "type" in d and d["type"] == "metrics":
metric_names = list(d["data"].keys())
break
metrics = []
for d in data_res:
if "type" in d and d["type"] == "metrics":
met_obj = {"id": d["sample_id"]}
for name in metric_names:
met_obj[name] = d["data"][name]
metrics.append(met_obj)
metrics_df = pd.DataFrame(metrics)
df = sampling_df
if len(match_df) > 0:
df = df.join(match_df.set_index("id"), on="id")
if len(metrics_df) > 0:
df = df.join(metrics_df.set_index("id"), on="id")
df.set_index("id", inplace=True, drop=False)
return df, metric_names
def get_model_name(data):
name = data[0]["spec"]["completion_fns"][0]
return name.replace(".", "_")
def main(
results_file: str, second_results_file: str = None, functions_file: str = None
):
"""Visualize a result from OpenAI evals using Zeno.
Args:
results_file (path): Result .jsonl file from OpenAI evals.
Often stored in the /tmp/evallogs/ directory.
second_results_file (path): Second result .jsonl file from OpenAI
evals for comparison. Often stored in the /tmp/evallogs/ directory.
functions_file (path, optional): Path to a Python file containing
additional Zeno processing functions. Defaults to None.
"""
eval = ZenoEvals(results_file, second_results_file, functions_file)
config = eval.generate_zeno_config()
zeno(config)
def cli():
fire.Fire(main) | zeno-evals | /zeno_evals-0.1.10-py3-none-any.whl/zeno_evals/main.py | main.py |
MIT License
Copyright (c) 2022 Ángel Alexander Cabrera
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| zeno-evals | /zeno_evals-0.1.10-py3-none-any.whl/zeno_evals-0.1.10.dist-info/LICENSE.md | LICENSE.md |
import logging
from typing import Tuple, Union
import numpy as np
from scipy import sparse as sp
from scipy.stats import rankdata
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.validation import check_is_fitted
from sliceline.validation import check_array, check_X_e
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Slicefinder(BaseEstimator, TransformerMixin):
"""Slicefinder class.
SliceLine is a fast, linear-algebra-based slice finding for ML Model Debugging.
Given an input dataset (`X`) and a model error vector (`errors`), SliceLine finds
the `k` slices in `X` that identify where the model performs significantly worse.
A slice is a subspace of `X` defined by one or more predicates.
The maximal dimension of this subspace is controlled by `max_l`.
The slice scoring function is the linear combination of two objectives:
- Find sufficiently large slices, with more than `min_sup` elements
(high impact on the overall model)
- With substantial errors
(high negative impact on sub-group/model)
The importance of each objective is controlled through a single parameter `alpha`.
Slice enumeration and pruning techniques are done via sparse linear algebra.
Parameters
----------
alpha: float, default=0.6
Weight parameter for the importance of the average slice error.
0 < `alpha` <= 1.
k: int, default=1
Maximum number of slices to return.
Note: in case of equality between `k`-th slice score and the following ones,
all those slices are returned, leading to `_n_features_out` slices returned.
(`_n_features_out` >= `k`)
max_l: int, default=4
Maximum lattice level.
In other words: the maximum number of predicate to define a slice.
min_sup: int or float, default=10
Minimum support threshold.
Inspired by frequent itemset mining, it ensures statistical significance.
If `min_sup` is a float (0 < `min_sup` < 1),
it represents the faction of the input dataset (`X`).
verbose: bool, default=True
Controls the verbosity.
Attributes
----------
top_slices_: np.ndarray of shape (_n_features_out, number of columns of the input dataset)
The `_n_features_out` slices with the highest score.
`None` values in slices represent unused column in the slice.
average_error_: float
Mean value of the input error.
top_slices_statistics_: list of dict of length `len(top_slices_)`
The statistics of the slices found sorted by slice's scores.
For each slice, the following statistics are stored:
- slice_score: the score of the slice (defined in `_score` method)
- sum_slice_error: the sum of all the errors in the slice
- max_slice_error: the maximum of all errors in the slice
- slice_size: the number of elements in the slice
- slice_average_error: the average error in the slice (sum_slice_error / slice_size)
References
----------
`SliceLine: Fast, Linear-Algebra-based Slice Finding for ML Model Debugging
<https://mboehm7.github.io/resources/sigmod2021b_sliceline.pdf>`__,
from *Svetlana Sagadeeva* and *Matthias Boehm* of Graz University of Technology.
"""
def __init__(
self,
alpha: float = 0.6,
k: int = 1,
max_l: int = 4,
min_sup: Union[int, float] = 10,
verbose: bool = True,
):
self.alpha = alpha
self.k = k
self.max_l = max_l
self.min_sup = min_sup
self.verbose = verbose
self._one_hot_encoder = self._top_slices_enc = None
self.top_slices_ = self.top_slices_statistics_ = None
self.average_error_ = None
if self.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
def _check_params(self):
"""Check transformer parameters."""
if not 0 < self.alpha <= 1:
raise ValueError(f"Invalid 'alpha' parameter: {self.alpha}")
if self.k <= 0:
raise ValueError(f"Invalid 'k' parameter: {self.k}")
if self.max_l <= 0:
raise ValueError(f"Invalid 'max_l' parameter: {self.max_l}")
if self.min_sup < 0 or (
isinstance(self.min_sup, float) and self.min_sup >= 1
):
raise ValueError(f"Invalid 'min_sup' parameter: {self.min_sup}")
def _check_top_slices(self):
"""Check if slices have been found."""
# Check if fit has been called
check_is_fitted(self)
# Check if a slice has been found
if self.top_slices_.size == 0:
raise ValueError("No transform: Sliceline did not find any slice.")
def fit(self, X, errors):
"""Search for slice(s) on `X` based on `errors`.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
errors: array-like of shape (n_samples, )
Errors of a machine learning model.
Returns
-------
self: object
Returns the instance itself.
"""
self._check_params()
# Update min_sup for a fraction of the input dataset size
if 0 < self.min_sup < 1:
self.min_sup = int(self.min_sup * len(X))
# Check that X and e have correct shape
X_array, errors = check_X_e(X, errors, y_numeric=True)
self._check_feature_names(X, reset=True)
self._search_slices(X_array, errors)
return self
def transform(self, X):
"""Generate slices masks for `X`.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
slices_masks: np.ndarray of shape (n_samples, _n_features_out)
`slices_masks[i, j] == 1`: the `i`-th sample of `X` is in the `j`-th `top_slices_`.
"""
self._check_top_slices()
# Input validation
X = check_array(X)
slices_masks = self._get_slices_masks(X)
return slices_masks.T
def get_slice(self, X, slice_index: int):
"""Filter `X` samples according to the `slice_index`-th slice.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Dataset, where `n_samples` is the number of samples
and `n_features` is the number of features.
slice_index: int
Index of the slice to get from `top_slices_`.
Returns
-------
X_slice: np.ndarray of shape (n_samples in the `slice_index`-th slice, n_features)
Filter `X` samples that are in the `slice_index`-th slice.
"""
self._check_top_slices()
# Input validation
X = check_array(X, force_all_finite=False)
slices_masks = self._get_slices_masks(X)
return X[np.where(slices_masks[slice_index])[0], :]
def get_feature_names_out(self):
"""Get output feature names for transformation.
Returns
-------
feature_names_out : ndarray of str objects
The following output feature names are generated:
`["slice_0", "slice_1", ..., "slice_(_n_features_out)"]`.
"""
check_is_fitted(self)
feature_names = [f"slice_{i}" for i in range(self._n_features_out)]
return np.array(feature_names, dtype=object)
def _get_slices_masks(self, X):
"""Private utilities function generating slices masks for `X`."""
X_encoded = self._one_hot_encoder.transform(X)
# Shape X_encoded: (X.shape[0], total number of modalities in _one_hot_encoder.categories_)
# Shape _top_slices_enc: (top_slices_.shape[0], X_encoded[1])
slice_candidates = self._top_slices_enc @ X_encoded.T
# self._top_slices_enc.sum(axis=1) is the number of predicate(s) for each top_slices_
slices_masks = (
slice_candidates == self._top_slices_enc.sum(axis=1)
).A.astype(int)
return slices_masks
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.top_slices_.shape[0]
@staticmethod
def _dummify(array: np.ndarray, n_col_x_encoded: int) -> sp.csr_matrix:
"""Dummify `array` with respect to `n_col_x_encoded`.
Assumption: v does not contain any 0."""
assert (
0 not in array
), "Modality 0 is not expected to be one-hot encoded."
one_hot_encoding = sp.lil_matrix(
(array.size, n_col_x_encoded), dtype=bool
)
one_hot_encoding[np.arange(array.size), array - 1] = True
return one_hot_encoding.tocsr()
def _maintain_top_k(
self,
slices: sp.csr_matrix,
statistics: np.ndarray,
top_k_slices: sp.csr_matrix,
top_k_statistics: np.ndarray,
) -> Tuple[sp.csr_matrix, np.ndarray]:
"""Add new `slices` to `top_k_slices` and update the top-k slices."""
# prune invalid min_sup and scores
valid_slices_mask = (statistics[:, 3] >= self.min_sup) & (
statistics[:, 0] > 0
)
if np.sum(valid_slices_mask) != 0:
slices, statistics = (
slices[valid_slices_mask],
statistics[valid_slices_mask],
)
if (slices.shape[1] != top_k_slices.shape[1]) & (
slices.shape[1] == 1
):
slices, statistics = slices.T, statistics.T
# evaluated candidates and previous top-k
slices = sp.vstack([top_k_slices, slices])
statistics = np.concatenate([top_k_statistics, statistics])
# extract top-k
top_slices_bool = (
rankdata(-statistics[:, 0], method="min") <= self.k
)
top_k_slices, top_k_statistics = (
slices[top_slices_bool],
statistics[top_slices_bool],
)
top_slices_indices = np.argsort(-top_k_statistics[:, 0])
top_k_slices, top_k_statistics = (
top_k_slices[top_slices_indices],
top_k_statistics[top_slices_indices],
)
return top_k_slices, top_k_statistics
def _score_ub(
self,
slice_sizes_ub: np.ndarray,
slice_errors_ub: np.ndarray,
max_slice_errors_ub: np.ndarray,
n_col_x_encoded: int,
) -> np.ndarray:
"""Compute the upper-bound score for all the slices."""
# Since slice_scores is either monotonically increasing or decreasing, we
# probe interesting points of slice_scores in the interval [min_sup, ss],
# and compute the maximum to serve as the upper bound
potential_solutions = np.column_stack(
(
self.min_sup * np.ones(slice_sizes_ub.shape[0]),
np.maximum(
slice_errors_ub / max_slice_errors_ub, self.min_sup
),
slice_sizes_ub,
)
)
slice_scores_ub = np.amax(
(
self.alpha
* (
np.minimum(
potential_solutions.T * max_slice_errors_ub,
slice_errors_ub,
).T
/ self.average_error_
- potential_solutions
)
- (1 - self.alpha) * (n_col_x_encoded - potential_solutions)
)
/ potential_solutions,
axis=1,
)
return slice_scores_ub
@staticmethod
def _analyse_top_k(top_k_statistics: np.ndarray) -> tuple:
"""Get the maximum and the minimum slices scores."""
max_slice_scores = min_slice_scores = -np.inf
if top_k_statistics.shape[0] > 0:
max_slice_scores = top_k_statistics[0, 0]
min_slice_scores = top_k_statistics[
top_k_statistics.shape[0] - 1, 0
]
return max_slice_scores, min_slice_scores
def _score(
self,
slice_sizes: np.ndarray,
slice_errors: np.ndarray,
n_row_x_encoded: int,
) -> np.ndarray:
"""Compute the score for all the slices."""
slice_scores = self.alpha * (
(slice_errors / slice_sizes) / self.average_error_ - 1
) - (1 - self.alpha) * (n_row_x_encoded / slice_sizes - 1)
return np.nan_to_num(slice_scores, nan=-np.inf)
def _eval_slice(
self,
x_encoded: sp.csr_matrix,
errors: np.ndarray,
slices: sp.csr_matrix,
level: int,
) -> np.ndarray:
"""Compute several statistics for all the slices."""
slice_candidates = x_encoded @ slices.T == level
slice_sizes = slice_candidates.sum(axis=0).A[0]
slice_errors = errors @ slice_candidates
max_slice_errors = slice_candidates.T.multiply(errors).max(axis=1).A
# score of relative error and relative size
slice_scores = self._score(
slice_sizes, slice_errors, x_encoded.shape[0]
)
return np.column_stack(
[slice_scores, slice_errors, max_slice_errors, slice_sizes]
)
def _create_and_score_basic_slices(
self,
x_encoded: sp.csr_matrix,
n_col_x_encoded: int,
errors: np.ndarray,
) -> Tuple[sp.csr_matrix, np.ndarray]:
"""Initialise 1-slices, i.e. slices with one predicate."""
slice_sizes = x_encoded.sum(axis=0).A[0]
slice_errors = errors @ x_encoded
max_slice_errors = x_encoded.T.multiply(errors).max(axis=1).A[:, 0]
# working set of active slices (#attr x #slices) and top-k
valid_slices_mask = (slice_sizes >= self.min_sup) & (slice_errors > 0)
attr = np.arange(1, n_col_x_encoded + 1)[valid_slices_mask]
slice_sizes = slice_sizes[valid_slices_mask]
slice_errors = slice_errors[valid_slices_mask]
max_slice_errors = max_slice_errors[valid_slices_mask]
slices = self._dummify(attr, n_col_x_encoded)
# score 1-slices and create initial top-k
slice_scores = self._score(
slice_sizes, slice_errors, x_encoded.shape[0]
)
statistics = np.column_stack(
(slice_scores, slice_errors, max_slice_errors, slice_sizes)
)
n_col_dropped = n_col_x_encoded - sum(valid_slices_mask)
logger.debug(
"Dropping %i/%i features below min_sup = %i."
% (n_col_dropped, n_col_x_encoded, self.min_sup)
)
return slices, statistics
def _get_pruned_s_r(
self, slices: sp.csr_matrix, statistics: np.ndarray
) -> Tuple[sp.csr_matrix, np.ndarray]:
"""Prune invalid slices.
Do not affect overall pruning effectiveness due to handling of missing parents."""
valid_slices_mask = (statistics[:, 3] >= self.min_sup) & (
statistics[:, 1] > 0
)
return slices[valid_slices_mask], statistics[valid_slices_mask]
@staticmethod
def _join_compatible_slices(
slices: sp.csr_matrix, level: int
) -> np.ndarray:
"""Join compatible slices according to `level`."""
slices_int = slices.astype(int)
join = (slices_int @ slices_int.T).A == level - 2
return np.triu(join, 1) * join
@staticmethod
def _combine_slices(
slices: sp.csr_matrix,
statistics: np.ndarray,
compatible_slices: np.ndarray,
) -> Tuple[sp.csr_matrix, np.ndarray, np.ndarray, np.ndarray]:
"""Combine slices by exploiting parents node statistics."""
parent_1_idx, parent_2_idx = np.where(compatible_slices == 1)
pair_candidates = slices[parent_1_idx] + slices[parent_2_idx]
slice_errors = np.minimum(
statistics[parent_1_idx, 1], statistics[parent_2_idx, 1]
)
max_slice_errors = np.minimum(
statistics[parent_1_idx, 2], statistics[parent_2_idx, 2]
)
slice_sizes = np.minimum(
statistics[parent_1_idx, 3], statistics[parent_2_idx, 3]
)
return pair_candidates, slice_sizes, slice_errors, max_slice_errors
@staticmethod
def _prune_invalid_self_joins(
feature_offset_start: np.ndarray,
feature_offset_end: np.ndarray,
pair_candidates: sp.csr_matrix,
slice_sizes: np.ndarray,
slice_errors: np.ndarray,
max_slice_errors: np.ndarray,
) -> Tuple[sp.csr_matrix, np.ndarray, np.ndarray, np.ndarray]:
"""Prune invalid self joins (>1 bit per feature)."""
valid_slices_mask = np.full(pair_candidates.shape[0], True)
for start, end in zip(feature_offset_start, feature_offset_end):
valid_slices_mask = (
valid_slices_mask
* (pair_candidates[:, start:end].sum(axis=1) <= 1).A[:, 0]
)
return (
pair_candidates[valid_slices_mask],
slice_sizes[valid_slices_mask],
slice_errors[valid_slices_mask],
max_slice_errors[valid_slices_mask],
)
@staticmethod
def _prepare_deduplication_and_pruning(
feature_offset_start: np.ndarray,
feature_offset_end: np.ndarray,
feature_domains: np.ndarray,
pair_candidates: sp.csr_matrix,
) -> np.ndarray:
"""Prepare IDs for deduplication and pruning."""
ids = np.zeros(pair_candidates.shape[0])
dom = feature_domains + 1
for j, (start, end) in enumerate(
zip(feature_offset_start, feature_offset_end)
):
sub_pair_candidates = pair_candidates[:, start:end]
# sub_p should not contain multiple True on the same line
i = sub_pair_candidates.argmax(axis=1).T + np.any(
sub_pair_candidates.A, axis=1
)
ids = ids + i.A * np.prod(dom[(j + 1) : dom.shape[0]])
return ids
def _get_pair_candidates(
self,
slices: sp.csr_matrix,
statistics: np.ndarray,
top_k_statistics: np.ndarray,
level: int,
n_col_x_encoded: int,
feature_domains: np.ndarray,
feature_offset_start: np.ndarray,
feature_offset_end: np.ndarray,
) -> sp.csr_matrix:
"""Compute and prune plausible slices candidates."""
compatible_slices = self._join_compatible_slices(slices, level)
if np.sum(compatible_slices) == 0:
return sp.csr_matrix(np.empty((0, slices.shape[1])))
(
pair_candidates,
slice_sizes,
slice_errors,
max_slice_errors,
) = self._combine_slices(slices, statistics, compatible_slices)
(
pair_candidates,
slice_sizes,
slice_errors,
max_slice_errors,
) = self._prune_invalid_self_joins(
feature_offset_start,
feature_offset_end,
pair_candidates,
slice_sizes,
slice_errors,
max_slice_errors,
)
if pair_candidates.shape[0] == 0:
return sp.csr_matrix(np.empty((0, slices.shape[1])))
ids = self._prepare_deduplication_and_pruning(
feature_offset_start,
feature_offset_end,
feature_domains,
pair_candidates,
)
# remove duplicate candidates and select corresponding statistics
_, unique_candidate_indices, duplicate_counts = np.unique(
ids, return_index=True, return_counts=True
)
# Slices at level i normally have i parents (cf. section 3.1 in the paper)
# We want to keep only slices whose parents have not been pruned.
# If all the parents are present they are going to get combined 2 by 2 in i*(i-1)/2 ways
# So, we select only candidates which appear with the correct cardinality.
all_parents_mask = duplicate_counts == level * (level - 1) / 2
unique_candidate_indices = unique_candidate_indices[all_parents_mask]
pair_candidates = pair_candidates[unique_candidate_indices]
slice_sizes = slice_sizes[unique_candidate_indices]
slice_errors = slice_errors[unique_candidate_indices]
max_slice_errors = max_slice_errors[unique_candidate_indices]
slice_scores = self._score_ub(
slice_sizes,
slice_errors,
max_slice_errors,
n_col_x_encoded,
)
# Seems to be always fully True
# Due to maintain_top_k that apply slice_sizes filter
pruning_sizes = slice_sizes >= self.min_sup
_, min_slice_scores = self._analyse_top_k(top_k_statistics)
pruning_scores = (slice_scores > min_slice_scores) & (slice_scores > 0)
return pair_candidates[pruning_scores & pruning_sizes]
def _search_slices(
self,
input_x: np.ndarray,
errors: np.ndarray,
) -> None:
"""Main function of the SliceLine algorithm."""
# prepare offset vectors and one-hot encoded input_x
self._one_hot_encoder = OneHotEncoder(handle_unknown="ignore")
x_encoded = self._one_hot_encoder.fit_transform(input_x)
feature_domains: np.ndarray = np.array(
[len(sub_array) for sub_array in self._one_hot_encoder.categories_]
)
feature_offset_end = np.cumsum(feature_domains)
feature_offset_start = feature_offset_end - feature_domains
# initialize statistics and basic slices
n_col_x_encoded = x_encoded.shape[1]
self.average_error_ = float(np.mean(errors))
slices, statistics = self._create_and_score_basic_slices(
x_encoded,
n_col_x_encoded,
errors,
)
# initialize top-k
top_k_slices, top_k_statistics = self._maintain_top_k(
slices,
statistics,
sp.csr_matrix((0, n_col_x_encoded)),
np.zeros((0, 4)),
)
max_slice_scores, min_slice_scores = self._analyse_top_k(
top_k_statistics
)
logger.debug(
"Initial top-K: count=%i, max=%f, min=%f"
% (top_k_slices.shape[0], max_slice_scores, min_slice_scores)
)
# lattice enumeration w/ size/error pruning, one iteration per level
# termination condition (max #feature levels)
level = 1
min_condition = min(input_x.shape[1], self.max_l)
while (
(slices.shape[0] > 0)
& (slices.sum() > 0)
& (level < min_condition)
):
level += 1
# enumerate candidate join pairs, including size/error pruning
slices, statistics = self._get_pruned_s_r(slices, statistics)
nr_s = slices.shape[0]
slices = self._get_pair_candidates(
slices,
statistics,
top_k_statistics,
level,
n_col_x_encoded,
feature_domains,
feature_offset_start,
feature_offset_end,
)
logger.debug("Level %i:" % level)
logger.debug(
" -- generated paired slice candidates: %i -> %i"
% (nr_s, slices.shape[0])
)
# extract and evaluate candidate slices
statistics = self._eval_slice(x_encoded, errors, slices, level)
# maintain top-k after evaluation
top_k_slices, top_k_statistics = self._maintain_top_k(
slices, statistics, top_k_slices, top_k_statistics
)
max_slice_scores, min_slice_scores = self._analyse_top_k(
top_k_statistics
)
valid = np.sum(
(statistics[:, 3] >= self.min_sup) & (statistics[:, 1] > 0)
)
logger.debug(
" -- valid slices after eval: %s/%i" % (valid, slices.shape[0])
)
logger.debug(
" -- top-K: count=%i, max=%f, min=%f"
% (top_k_slices.shape[0], max_slice_scores, min_slice_scores)
)
self._top_slices_enc = top_k_slices.copy()
if top_k_slices.shape[0] == 0:
self.top_slices_ = np.empty((0, input_x.shape[1]))
else:
self.top_slices_ = self._one_hot_encoder.inverse_transform(
top_k_slices
)
# compute slices' average errors
top_k_statistics = np.column_stack(
(
top_k_statistics,
np.divide(top_k_statistics[:, 1], top_k_statistics[:, 3]),
)
)
# transform statistics to a list of dict
statistics_names = [
"slice_score",
"sum_slice_error",
"max_slice_error",
"slice_size",
"slice_average_error",
]
self.top_slices_statistics_ = [
{
stat_name: stat_value
for stat_value, stat_name in zip(statistic, statistics_names)
}
for statistic in top_k_statistics
]
logger.debug("Terminated at level %i." % level) | zeno-sliceline | /zeno_sliceline-0.0.1-py3-none-any.whl/sliceline/slicefinder.py | slicefinder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.