code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
```
import sys
import datetime
import time
import re
import os
# To add path so that we can import zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
from zeno_etl_libs.helper.parameter.job_parameter import parameter
env = "prod"
notebook_file = "\\scripts\\ipc-pmf\\ipc_pmf.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = 'dev'
args = parameter.get_params(job_id=171)
reset_stores = args["reset_stores"]
batch_size = 4
store_batch_split = [reset_stores[i:i+batch_size] for i in range(0, len(reset_stores), batch_size)]
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
for batch_stores in store_batch_split:
run_batch = store_batch_split.index(batch_stores) + 1
tot_batch = len(store_batch_split)
# add to parameters
parameters["run_batch"] = run_batch
parameters["tot_batch"] = tot_batch
parameters["batch_stores"] = batch_stores
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=7200,
in_vpc=True,
instance_type="ml.c5.4xlarge",
env=env,
check_completion_status=False
)
# instances:
# ml.c5.2xlarge, ml.c5.4xlarge, ml.c5.9xlarge
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_ipc_pmf.ipynb | run_ipc_pmf.ipynb |
```
!pip uninstall zeno_etl_libs
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\tweets\\zeno-tweets.ipynb"
parameters = {
"env": env,
"full_run":0
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.m5.large",
env=env
)
!aws s3 cp s3://sagemaker-ap-south-1-921939243643/papermill_output/zeno-tweets-2022-06-24-12-54-23.ipynb .
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_zeno_tweets.ipynb | run_zeno_tweets.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\new-stores-ss-main\\new_stores_ss_main.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
env=env
)
# if env == "dev":
# import papermill as pm
# pm.execute_notebook(
# input_path=input_file,
# output_path=f"{output_path}/{result}",
# parameters=parameters
# )
# elif env in ("stage", "prod"):
# run_notebook.run_notebook(
# image=f"{env}-notebook-runner",
# notebook=input_file,
# output=output_path,
# parameters=parameters,
# timeout_in_sec=7200,
# in_vpc=True,
# env=env,
# instance_type="ml.c5.9xlarge"
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_new_stores_ss_main.ipynb | run_new_stores_ss_main.ipynb |
```
!pip uninstall zeno_etl_libs
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\playstore-review\\playstore-reviews-analysis.ipynb"
parameters = {
"env": env,
"full_run":1
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.m5.large",
env=env
)
!aws s3 cp s3://sagemaker-ap-south-1-921939243643/papermill_output/playstore-reviews-analysis-2022-07-20-13-06-31.ipynb .
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_playstore_reviews_analysis.ipynb | run_playstore_reviews_analysis.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = 'prod'
notebook_file = "\\scripts\\drug-std-info\\drug_std_info.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
env=env,
instance_type="ml.c5.9xlarge"
)
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_drug_std_info.ipynb | run_drug_std_info.ipynb |
```
"""main wrapper for IPC safety stock reset"""
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = 'stage'
notebook_file = "/stage-scripts/Demo/Demo.ipynb"
parameters = {
"f_name":"Ram",
"l_name":"Babu"
}
os.environ['env'] = env
base = "/".join(os.getcwd().split("/")[:-2])
input_file = base + notebook_file
output_path = base + "/run/logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600
)
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run.ipynb | run.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can import zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
from zeno_etl_libs.helper.parameter.job_parameter import parameter
env = "prod"
notebook_file = "\\scripts\\ipc2-ss-main\\ipc2_ss_main.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = 'dev'
args = parameter.get_params(job_id=140)
reset_stores = args["reset_stores"]
batch_size = 4
store_batch_split = [reset_stores[i:i+batch_size] for i in range(0, len(reset_stores), batch_size)]
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
for batch_stores in store_batch_split:
run_batch = store_batch_split.index(batch_stores) + 1
tot_batch = len(store_batch_split)
# add to parameters
parameters["run_batch"] = run_batch
parameters["tot_batch"] = tot_batch
parameters["batch_stores"] = batch_stores
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.c5.2xlarge",
env=env,
check_completion_status=False
)
# instances:
# ml.c5.2xlarge, ml.c5.4xlarge, ml.c5.9xlarge
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_ipc2_ss_main.ipynb | run_ipc2_ss_main.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\ipc-ss-main\\ipc_ss_main.ipynb"
parameters = {
"env": "prod",
"email_to":"[email protected]",
"debug_mode":"Y"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=7200,
in_vpc=True,
instance_type="ml.c5.2xlarge",
env=env
)
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_ipc_ss_main.ipynb | run_ipc_ss_main.ipynb |
```
!pip uninstall zeno_etl_libs
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "\\scripts\\playstore-review\\playstore-patients-batch-process.ipynb"
parameters = {
"env": env,
"full_run":1,
"email_to":"NA"
}
os.environ['env'] = env
base = "\\".join(os.getcwd().split("\\")[:-2])
input_file = base + notebook_file
output_path = base + "\\run\\logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.m5.large",
env=env
)
!aws s3 cp s3://sagemaker-ap-south-1-921939243643/papermill_output/playstore-patients-batch-process-2022-08-02-07-45-06.ipynb .
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_playstore_patients.ipynb | run_playstore_patients.ipynb |
```
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
notebook_file = "/scripts/warehouse/wh_forecast_reset.ipynb"
parameters = {
"env": env
}
os.environ['env'] = env
base = "/".join(os.getcwd().split("/")[:-2])
input_file = base + notebook_file
output_path = base + "/run//logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=7200,
in_vpc=True,
env=env,
instance_type="ml.c5.9xlarge"
)
# if env == "dev":
# import papermill as pm
# pm.execute_notebook(
# input_path=input_file,
# output_path=f"{output_path}/{result}",
# parameters=parameters
# )
# elif env in ("stage", "prod"):
# run_notebook.run_notebook(
# image=f"{env}-notebook-runner",
# notebook=input_file,
# output=output_path,
# parameters=parameters,
# timeout_in_sec=7200,
# in_vpc=True,
# env=env,
# instance_type="ml.c5.9xlarge"
# )
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_wh_forecast_reset.ipynb | run_wh_forecast_reset.ipynb |
```
!pip uninstall zeno_etl_libs
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = "prod"
os.environ['env'] = env
notebook_file = "/scripts/playstore-review/playstore-patients-batch-process.ipynb"
parameters = {
"env":env,
"full_run": 1,
"email_to": "NA",
"batch_size": 25
}
base = "/".join(os.getcwd().split("/")[:-2])
input_file = base + notebook_file
output_path = base + "/run/logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600,
in_vpc=True,
instance_type="ml.m5.large",
env=env
)
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/run_playstore_patients_batch.ipynb | run_playstore_patients_batch.ipynb |
```
"""main wrapper for IPC safety stock reset"""
import sys
import datetime
import time
import re
import os
# To add path so that we can improt zeno_etl_libs from local folder
sys.path.append('../../../..')
from zeno_etl_libs.helper.run_notebook import run_notebook
env = 'stage'
notebook_file = "/scripts/experiments/demo.ipynb"
parameters = {
"f_name":"Ram",
"l_name":"Babu"
}
os.environ['env'] = env
base = "/".join(os.getcwd().split("/")[:-2])
input_file = base + notebook_file
output_path = base + "/run/logs"
name = os.path.basename(notebook_file)
nb_name, nb_ext = os.path.splitext(name)
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
result = "{}-{}{}".format(nb_name, timestamp, nb_ext)
if env == "dev":
import papermill as pm
pm.execute_notebook(
input_path=input_file,
output_path=f"{output_path}/{result}",
parameters=parameters
)
elif env in ("stage", "prod"):
run_notebook.run_notebook(
image=f"{env}-notebook-runner",
notebook=input_file,
output=output_path,
parameters=parameters,
timeout_in_sec=3600
)
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/sagemaker-jobs/src/run/code/.ipynb_checkpoints/run-checkpoint.ipynb | run-checkpoint.ipynb |
set -ue
module_name="glue-jobs"
env=$1
echo "========================[ Build: $module_name ]========================="
src_base_path=$(find $CODEBUILD_SRC_DIR/$module_name/ -iname src -type d)
ml_src_base_path=$(find $CODEBUILD_SRC_DIR/sagemaker-jobs/ -iname src -type d)
templates_base_path=$(find $CODEBUILD_SRC_DIR/$module_name/ -iname templates -type d)
artifacts_base_path="s3://aws-$env-glue-assets-921939243643-ap-south-1/artifact/$module_name"
ml_artifact_base_path="s3://aws-$env-glue-assets-921939243643-ap-south-1/artifact/sagemaker-jobs"
# Packing with dependencies
yum install gcc-c++ python3-devel unixODBC-devel -y
ln -s /usr/libexec/gcc/x86_64-amazon-linux/7.2.1/cc1plus /usr/bin/
python3.6 -m pip install --upgrade pip
# creation of folder for extra dependencies
mkdir $src_base_path/etl_libs/
#mkdir $src_base_path/etl_libs/google/
mkdir $src_base_path/etl_libs/regex/
mkdir $src_base_path/zeno_etl_libs/
echo "========================[ Packaging etl libs ]========================="
cp $CODEBUILD_SRC_DIR/etl_libs/setup.py $src_base_path/etl_libs/setup.py
#cp -R $CODEBUILD_SRC_DIR/extra_dependency/google/* $src_base_path/etl_libs/google
#cp -R $CODEBUILD_SRC_DIR/extra_dependency/regex/* $src_base_path/etl_libs/regex
python3.6 -m pip install -r $CODEBUILD_SRC_DIR/requirements.txt --target $src_base_path/etl_libs/
cd $src_base_path/etl_libs/
#python3.6 -m pip install https://aws-stage-glue-assets-921939243643-ap-south-1.s3.ap-south-1.amazonaws.com/artifact/glue-jobs/src/regex-2022.4.24-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
python3.6 setup.py bdist_egg
cp ./dist/etl_pkg-0.0.6-py3.6.egg $src_base_path/
cd ..
rm -rf $src_base_path/etl_libs/
echo "========================[ Packaging Zeno etl libs ]========================="
mkdir $src_base_path/zeno_etl_libs/zeno_etl_libs
echo "copying files from zeno_etl_libs"
cp -R $CODEBUILD_SRC_DIR/zeno_etl_libs/* $src_base_path/zeno_etl_libs/zeno_etl_libs
cp $CODEBUILD_SRC_DIR/zeno_etl_libs/setup.py $src_base_path/zeno_etl_libs
cd $src_base_path/zeno_etl_libs/
python3.6 setup.py bdist_egg
cp ./dist/zeno_etl_pkg-0.0.6-py3.6.egg $src_base_path/
cd ..
rm -rf $src_base_path/zeno_etl_libs/
echo "========================[ Sagemaker Deployment ]========================="
#aws codebuild delete-project --name create-sagemaker-container-$env-notebook-runner
#python $CODEBUILD_SRC_DIR/setup.py sdist bdist_wheel
#nohup twine upload dist/* --verbose -u kuldeepsingh -p bEmham-6sonke-forcex &
#pip install zeno-etl-libs
#pip install https://github.com/aws-samples/sagemaker-run-notebook/releases/download/v0.20.0/sagemaker_run_notebook-0.20.0.tar.gz
#cp $CODEBUILD_SRC_DIR/extra_dependency/Dockerfile /root/.pyenv/versions/3.9.5/lib/python3.9/site-packages/sagemaker_run_notebook/container
#nohup run-notebook create-container $env-notebook-runner --requirements $CODEBUILD_SRC_DIR/requirements-ml.txt --no-logs &
echo "========================[ Syncing artifacts ]========================="
# Upload templates to artifacts-bucket
echo "Syncing the artifacts"
aws s3 sync $templates_base_path/ $artifacts_base_path/templates/
aws s3 sync $src_base_path/ $artifacts_base_path/src/
aws s3 sync $ml_src_base_path/ $ml_artifact_base_path
echo "Build.sh completed" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/build.sh | build.sh |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "delivery-tracking-metadata"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"delivery-status",
"assigned-to",
"assigned-to-id",
"dispatcher",
"receiver",
"delivered-at",
"completed-at",
"vendor-bill-number",
"no-of-deliveries",
"scheduled-at",
"assigned-at"
)
select
"patient-store-order-id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
"delivery-status" as "delivery-status" ,
"assigned-to" as "assigned-to",
"assigned-to-id" as "assigned-to_id",
"dispatcher" ,
"receiver" ,
"delivered-at" as "delivered-at" ,
"completed-at" as "completed-at",
"vendor-bill-number" as "vendor-bill-number",
"no-of-deliveries",
"scheduled-at",
"assigned-at"
from
(
select
"patient-store-order-id",
"delivery-status" ,
"assigned-to" ,
"assigned-to-id" ,
"dispatcher" ,
"vendor-bill-number",
"receiver" ,
"delivered-at" ,
"completed-at",
row_number () over (partition by "patient-store-order-id"
order by
"delivered-at" desc) as row_num,
dense_rank () over (partition by "patient-store-order-id"
order by
"delivered-at" ) as "no-of-deliveries",
dt."schedule-at" as "scheduled-at",
dt."created-at" as "assigned-at"
from
"prod2-generico"."delivery-tracking" dt
inner join "prod2-generico"."patients-store-orders" pso on
pso.id = dt."patient-store-order-id"
where
pso."bill-id" is not null
and pso."order-type" = 'delivery') d1
where
d1.row_num = 1;
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."delivery-tracking-metadata";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/delivery-tracking/delivery-tracking.py | delivery-tracking.py |
import argparse
import os
import sys
sys.path.append('../../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
import datetime
from dateutil.tz import gettz
from zeno_etl_libs.db.db import DB, PostGreWrite
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-bs', '--batch_size', default=10000, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
batch_size = args.batch_size
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
# PostGre
pg = PostGreWrite()
pg.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'generic-affinity'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =============================================================================
# Retrieve recent patient entries
# =============================================================================
mdate ="""
select
max(date(last_visit)) as max_date
from
generic_affinity ga ;
"""
mdate1 ="""
select
max("last-visit"::date) as max_date
from
"prod2-generico"."generic-affinity";
"""
m_date = pd.read_sql_query(mdate, pg.connection)
m_date1 = rs_db.get_df(mdate1)
m_date = m_date.append(m_date1)
max_date = m_date['max_date'].min()
if pd.isna(max_date)==False:
date1 = max_date
else:
date1= '2017-05-13'
date1= datetime.datetime.strptime(date1, '%Y-%m-%d')
date1 = date1.strftime('%Y-%m-%d')
mquery = f""" (select
"patient-id"
from
"prod2-generico"."sales"
where
"created-date" >= '{date1}'
and "created-date" < CURRENT_DATE
group by
"patient-id") """
# =============================================================================
# patient level bill count
# =============================================================================
gb = f"""
select
s1."patient-id" ,
count(distinct case when s1."type" = 'generic' and s1."bill-flag" = 'gross' then s1."bill-id" end ) as "generic_bill_count",
count(distinct case when s1."bill-flag" = 'gross' then s1."bill-id" end) as gross_bill_count
from
"prod2-generico".sales s1
inner join
{mquery} s2 on
s1."patient-id" = s2."patient-id"
where
date(s1."created-at")< CURRENT_DATE
group by
s1."patient-id"
"""
p_bills=rs_db.get_df(gb)
logger.info("Data: patient level bill count fetched successfully")
p_bills.columns = [c.replace('-', '_') for c in p_bills.columns]
p_bills[['generic_bill_count', 'gross_bill_count']].fillna(0, inplace=True)
p_bills['generic_bill_pct'] = (p_bills['generic_bill_count'] / (p_bills['gross_bill_count'])) * 100
# =============================================================================
# patient level substitution
# =============================================================================
subs = f"""
select
s1."patient-id" ,
s1."substitution-status" ,
sum(s1."quantity") as quantity
from
"prod2-generico"."sales" s1
inner join
{mquery} s2 on
s2."patient-id" = s1."patient-id"
where
s1."bill-flag" = 'gross'
and s1."type" = 'generic'
and date(s1."created-at")<CURRENT_DATE
group by
s1."patient-id", s1."substitution-status"
"""
p_subs=rs_db.get_df(subs)
p_subs.columns = [c.replace('-', '_') for c in p_subs.columns]
logger.info(f"p_subs: {len(p_subs)}")
p_subs['quantity'].fillna(0, inplace=True)
p_subs1 = pd.pivot_table(
p_subs, values='quantity', index=['patient_id'], columns=['substitution_status']).reset_index()
p_subs1.columns = [c.replace('-', '_') for c in p_subs1.columns]
p_subs1.fillna(0, inplace=True)
col_list=[]
act_col=['substituted','not_substituted','not_in_inventory','generic_unavailable']
for i in act_col:
if i not in p_subs1.columns:
p_subs1[i]=0
p_subs1['subs_pct'] = (p_subs1['substituted'] / (p_subs1['substituted'] + p_subs1['not_substituted'])) * 100
metadata = pd.merge(right=p_subs1, left=p_bills, on=['patient_id'], how='left')
# =============================================================================
# patient level return %
# =============================================================================
ret = f"""
select
s1."patient-id" ,
count(distinct case when s1."type" = 'generic' and s1."bill-flag" = 'return' then s1."bill-id" end ) as "generic_return_bill_count",
count(distinct case when s1."bill-flag" = 'return' then s1."bill-id" end) as "gross_return_bill"
from
"prod2-generico".sales s1
inner join
{mquery} s2
on s2."patient-id" = s1."patient-id"
where
date(s1."created-at")< CURRENT_DATE
group by
s1."patient-id"
"""
p_return=rs_db.get_df(ret)
logger.info(f"p_return: {len(p_return)}")
p_return.columns = [c.replace('-', '_') for c in p_return.columns]
p_return[['generic_return_bill_count', 'gross_return_bill']].fillna(0, inplace=True)
p_return['return_pct'] = ((p_return['generic_return_bill_count']) / (p_return['gross_return_bill'])) * 100
p_return['not_return_pct'] = 100 - p_return['return_pct']
metadata = pd.merge(left=metadata, right=p_return, on=['patient_id'], how='left')
metadata['not_return_pct'].fillna(100, inplace=True)
# =============================================================================
# patient level recency
# =============================================================================
rec = f"""
select
s1."patient-id" ,
max(s1."created-at") as "last_visit"
from
"prod2-generico".sales s1
inner join
{mquery} s2
on s2."patient-id" = s1."patient-id"
where
s1."bill-flag" = 'gross'
and date(s1."created-at")<CURRENT_DATE
group by
s1."patient-id"
"""
p_recency=rs_db.get_df(rec)
logger.info(f"p_recency: {len(p_recency)}")
p_recency.columns = [c.replace('-', '_') for c in p_recency.columns]
prev_date = (datetime.datetime.today() + relativedelta(days=-1))
p_recency['last_visit'] = pd.to_datetime(p_recency['last_visit'], format="%y-%m-%d")
p_recency['prev_date'] = prev_date
p_recency['prev_date'] = pd.to_datetime(p_recency['prev_date'], format="%y-%m-%d")
p_recency['num_months'] = (p_recency['prev_date'] - p_recency['last_visit']) / np.timedelta64(1, 'M')
conditions = [
(
(p_recency['num_months'] <= 3)
),
(
(p_recency['num_months'] > 3) &
(p_recency['num_months'] <= 6)
),
(
(p_recency['num_months'] > 6) &
(p_recency['num_months'] <= 12)
),
(
(p_recency['num_months'] > 12)
)
]
choices = [100, 75, 50, 25]
p_recency['recency_pct'] = np.select(conditions, choices, default=0)
p_recency['recency_pct'].fillna(0, inplace=True)
metadata = pd.merge(left=metadata, right=p_recency, on=['patient_id'], how='left')
# =============================================================================
# patient level generic recency
# =============================================================================
gen_rec = f"""
select
s1."patient-id" ,
max(s1."created-at") as "last_generic_visit"
from
"prod2-generico".sales s1
inner join
{mquery} s2
on s2."patient-id" = s1."patient-id"
where
s1."bill-flag" = 'gross'
and s1."type" ='generic'
and date(s1."created-at")<CURRENT_DATE
group by
s1."patient-id"
"""
p_grecency=rs_db.get_df(gen_rec)
logger.info(f"p_grecency: {len(p_grecency)}")
p_grecency.columns = [c.replace('-', '_') for c in p_grecency.columns]
p_grecency['last_generic_visit'] = pd.to_datetime(p_grecency['last_generic_visit'], format="%y-%m-%d")
p_grecency['g_prev_date'] = prev_date
p_grecency['g_prev_date'] = pd.to_datetime(p_grecency['g_prev_date'], format="%y-%m-%d")
p_grecency['g_num_months'] = ((p_grecency['g_prev_date'] - p_grecency['last_generic_visit'])
/ np.timedelta64(1, 'M'))
conditions = [
(
(p_grecency['g_num_months'] <= 3)
),
(
(p_grecency['g_num_months'] > 3) &
(p_grecency['g_num_months'] <= 6)
),
(
(p_grecency['g_num_months'] > 6) &
(p_grecency['g_num_months'] <= 12)
),
(
(p_grecency['g_num_months'] > 12)
)
]
choices = [100, 75, 50, 25]
p_grecency['gen_recency_pct'] = np.select(conditions, choices, default=0)
p_grecency.drop('g_prev_date', axis='columns', inplace=True)
p_grecency['gen_recency_pct'].fillna(0, inplace=True)
metadata = pd.merge(left=metadata, right=p_grecency, on=['patient_id'], how='left')
metadata.fillna(0, inplace=True)
metadata['generic_likelihood'] = ((metadata['generic_bill_pct'] + metadata['gen_recency_pct']
+ metadata['subs_pct'] + metadata['not_return_pct'] +
metadata['recency_pct']) / (5))
conditions = [
(
(metadata['generic_likelihood'] >= 80)
),
(
(metadata['generic_likelihood'] < 80) &
(metadata['generic_likelihood'] >= 60)
),
(
(metadata['generic_likelihood'] < 60) &
(metadata['generic_likelihood'] >= 50)
),
(
(metadata['generic_likelihood'] < 50) &
(metadata['generic_likelihood'] >= 25)
),
(
(metadata['generic_likelihood'] < 25)
)
]
choices = [5, 4, 3, 2, 1]
metadata['generic_affinity_score'] = np.select(conditions, choices, default=3)
generic_affinity = metadata
logger.info('length of generic_affinity is :' + str(len(generic_affinity)))
generic_affinity.columns = [c.replace('_', '-') for c in generic_affinity.columns]
generic_affinity['refreshed-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')
generic_affinity['refreshed-at'] = pd.to_datetime(generic_affinity['refreshed-at'],
format="%Y-%m-%d %H:%M:%S")
generic_affinity = generic_affinity[generic_affinity['gross-bill-count'] > 0]
generic_affinity = generic_affinity[['patient-id'
, 'gross-bill-count', 'generic-bill-count', 'generic-bill-pct'
, 'generic-unavailable', 'not-in-inventory', 'not-substituted'
, 'substituted', 'subs-pct', 'gross-return-bill', 'generic-return-bill-count'
, 'return-pct', 'not-return-pct', 'last-visit', 'prev-date'
, 'num-months', 'recency-pct', 'last-generic-visit', 'g-num-months'
, 'gen-recency-pct', 'generic-likelihood', 'generic-affinity-score', 'refreshed-at']]
generic_affinity.columns = [c.replace('-', '_') for c in generic_affinity.columns]
table_update = f"{table_name}-update".replace("-", "_")
# Write to PostGre
query = f''' DELETE FROM {table_update}'''
pg.engine.execute(query)
generic_affinity[['patient_id']].to_sql(
name=table_update, con=pg.engine, if_exists='append', chunksize=500, method='multi',
index=False)
main_table = table_name.replace("-", "_")
query = f''' DELETE FROM {main_table} m1 using (select patient_id from {table_update}) m2 where
m1.patient_id = m2.patient_id'''
pg.engine.execute(query)
generic_affinity= generic_affinity.sort_values(by='last_visit',ascending=True)
total_insert = 0
for ga_df in helper.batch(generic_affinity, batch_size):
ga_df.to_sql(name=main_table, con=pg.engine, if_exists='append',
chunksize=500, method='multi', index=False)
total_insert += ga_df.shape[0]
logger.info(f"Postgres DB write completed: {total_insert}")
# Write to Redshift Also
generic_affinity.columns = [c.replace('_', '-') for c in generic_affinity.columns]
generic_affinity['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')
generic_affinity['created-by'] = 'etl-automation'
generic_affinity['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')
generic_affinity['updated-by'] = 'etl-automation'
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where date("last-visit")>= '{date1}' '''
rs_db.execute(truncate_query)
total_insert1 = 0
for ga_df1 in helper.batch(generic_affinity, batch_size):
s3.write_df_to_db(df=ga_df1[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
total_insert1 += ga_df1.shape[0]
logger.info(f"Redshift DB write completed: {total_insert}")
# Closing the DB Connection
rs_db.close_connection()
pg.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/generic-affinity/generic-affinity.py | generic-affinity.py |
import argparse
import sys
sys.path.append('../../../..')
import os
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "composition-activation"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert into
"prod2-generico"."{table_name}" (
"created-by",
"created-at",
"updated-by",
"updated-at",
"store-id",
"composition-master-id",
"system-first-inv-date",
"system-first-bill-date",
"store-first-inv-date",
"store-first-bill-date")
select
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
c."store-id" as "store-id",
d."composition-master-id" as "composition-master-id",
max(y.system_first_inv_date) as "system-first-inv-date",
max(y.system_first_bill_date) as "system-first-bill-date",
MIN(c."created-at") as "store-first-inv-date",
MIN(b."created-at") as "store-first-bill-date"
from
"prod2-generico"."inventory-1" c
left join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
left join "prod2-generico"."bills-1" b on
b."id" = a."bill-id"
left join "prod2-generico"."drugs" d on
d."id" = c."drug-id"
left join (
select
d."composition-master-id", MIN(b."created-at") as "system_first_bill_date", MIN(c."created-at") as "system_first_inv_date"
from
"prod2-generico"."inventory-1" c
left join "prod2-generico"."bill-items-1" a on
c."id" = a."inventory-id"
left join "prod2-generico"."bills-1" b on
b."id" = a."bill-id"
left join "prod2-generico"."drugs" d on
d."id" = c."drug-id"
where
d."composition-master-id" is not null
and d."company-id" = 6984
group by
d."composition-master-id" ) as y on
d."composition-master-id" = y."composition-master-id"
where
d."composition-master-id" is not null
and d."company-id" = 6984
group by
c."store-id",
d."composition-master-id"
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."composition-activation";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/composition-activation/composition_activation.py | composition_activation.py |
import argparse
import os
import sys
from datetime import datetime as dt
from datetime import timedelta
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.email.email import Email
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
read_schema = 'prod2-generico'
report_date = dt.now().date()
# Fetching earn and burn data for past one week
members_q = f"""
SELECT
*
FROM
"{read_schema}"."member-details"
WHERE DATE("created-at") <= '{str(report_date)}' ;
"""
subscription_q = f"""
SELECT
*
FROM
"{read_schema}"."subscriptions-meta"
WHERE DATE("created-at") <= '{str(report_date)}';
"""
subs_sku_q = f"""
SELECT
"subscription-meta-id",
COUNT(DISTINCT "drug-id") "sku-count",
AVG(quantity) as "q-per-sku",
MAX(DATE("created-at")) AS "created-at"
FROM
"{read_schema}"."subscriptions"
WHERE DATE("created-at") <= '{str(report_date)}'
GROUP BY
"subscription-meta-id";
"""
subs_amount_q = f"""
SELECT
s."subscription-meta-id",
SUM(s.quantity * T2.rate) "sub-amount",
MAX(DATE(s."created-at")) AS "created-at"
FROM
"{read_schema}"."subscriptions" s
LEFT JOIN "{read_schema}"."subscriptions-meta" sm ON
s."subscription-meta-id" = sm.id
LEFT JOIN (
SELECT
*
FROM
(
SELECT
"store-id",
"drug-id",
"selling-rate" rate,
RANK() OVER(
PARTITION BY "store-id",
"drug-id"
ORDER BY
i."created-at" DESC) AS RRank
FROM
"{read_schema}"."inventory-1" i )T
WHERE
RRank = 1) T2 ON
s."drug-id" = T2."drug-id"
AND sm."preferred-store-id" = T2."store-id"
WHERE DATE(s."created-at") <= '{str(report_date)}'
GROUP BY
s."subscription-meta-id";
"""
calling_q = f"""
SELECT
cd."call-date",
COUNT(cd.id) "call-attempt",
SUM(T.connected) "call-connected"
FROM
"{read_schema}"."calling-dashboard" cd
LEFT JOIN (
SELECT
"calling-dashboard-id",
(CASE
WHEN SUM(CASE WHEN (connected = 1) OR (("call-recording-url" is not null)
AND ("call-recording-url" != '')) THEN 1 ELSE 0 END) > 0 THEN 1
ELSE 0
END) connected
FROM
"{read_schema}"."calling-history" ch
GROUP BY
"calling-dashboard-id") T ON
cd.id = T."calling-dashboard-id"
WHERE
cd."campaign-id" = 34
AND cd.status = 'closed'
AND cd."call-date" <= '{str(report_date)}'
GROUP BY
cd."call-date" ;"""
members = rs_db.get_df(members_q)
subscription = rs_db.get_df(subscription_q)
subs_sku = rs_db.get_df(subs_sku_q)
subs_amount = rs_db.get_df(subs_amount_q)
calling = rs_db.get_df(calling_q)
# date format conversion
members['created-at'] = pd.to_datetime(members['created-at']).dt.date
subscription['created-at'] = pd.to_datetime(subscription['created-at']).dt.date
subs_sku['created-at'] = pd.to_datetime(subs_sku['created-at']).dt.date
subs_amount['created-at'] = pd.to_datetime(subs_amount['created-at']).dt.date
# Previous Day
p_date = report_date - timedelta(days=1)
call_attempt = calling[calling['call-date'] == p_date]['call-attempt'].sum()
call_connect = calling[calling['call-date'] == p_date]['call-connected'].sum()
total_members = members[members['created-at'] == p_date]['patient-id'].nunique()
patients_subscribed = subscription[subscription['created-at'] == p_date]['patient-id'].nunique()
total_subscription = subscription[subscription['created-at'] == p_date]['id'].nunique()
try:
sku_per_sub = subs_sku[subs_sku['created-at'] == p_date]['sku-count'].mean().round()
q_per_sku = subs_sku[subs_sku['created-at'] == p_date]['q-per-sku'].mean().round()
sub_value = subs_amount[subs_amount['created-at'] == p_date]['sub-amount'].mean().round(2)
except Exception:
sku_per_sub = subs_sku[subs_sku['created-at'] == p_date]['sku-count'].mean()
q_per_sku = subs_sku[subs_sku['created-at'] == p_date]['q-per-sku'].mean()
sub_value = subs_amount[subs_amount['created-at'] == p_date]['sub-amount'].mean()
previous_day = """
Previous Day Stats
Membership calls done (attempted) : {attempt}
Membership calls connected (Phone picked up by the customer) : {call_connect}
Membership count : {mc}
Patient subscription : {ps}
Average number of SKU per subscription : {a_sku_ps}
Quantity per SKU per subscription : {q_sku_ps}
Average subscription value : {sv}
Total subscriptions : {ts}
""".format(mc=total_members, ps=patients_subscribed, a_sku_ps=sku_per_sub, q_sku_ps=q_per_sku,
sv=sub_value, ts=total_subscription, attempt=call_attempt, call_connect=call_connect)
# Today
call_attempt = calling[calling['call-date'] == report_date]['call-attempt'].sum()
call_connect = calling[calling['call-date'] == report_date]['call-connected'].sum()
total_members = members[members['created-at'] == report_date]['patient-id'].nunique()
patients_subscribed = subscription[subscription['created-at'] == report_date]['patient-id'].nunique()
total_subscription = subscription[subscription['created-at'] == report_date]['id'].nunique()
try:
sku_per_sub = subs_sku[subs_sku['created-at'] == report_date]['sku-count'].mean().round()
q_per_sku = subs_sku[subs_sku['created-at'] == report_date]['q-per-sku'].mean().round()
sub_value = subs_amount[subs_amount['created-at'] == report_date]['sub-amount'].mean().round(2)
except Exception:
sku_per_sub = subs_sku[subs_sku['created-at'] == report_date]['sku-count'].mean()
q_per_sku = subs_sku[subs_sku['created-at'] == report_date]['q-per-sku'].mean()
sub_value = subs_amount[subs_amount['created-at'] == report_date]['sub-amount'].mean()
current_day = """
Today Stats
Membership calls done (attempted) : {attempt}
Membership calls connected (Phone picked up by the customer) : {call_connect}
Membership count : {mc}
Patient subscription : {ps}
Average number of SKU per subscription : {a_sku_ps}
Quantity per SKU per subscription : {q_sku_ps}
Average subscription value : {sv}
Total subscriptions : {ts}
""".format(mc=total_members, ps=patients_subscribed, a_sku_ps=sku_per_sub, q_sku_ps=q_per_sku,
sv=sub_value, ts=total_subscription, attempt=call_attempt, call_connect=call_connect)
# Till Today
call_attempt = calling[calling['call-date'] <= report_date]['call-attempt'].sum()
call_connect = calling[calling['call-date'] <= report_date]['call-connected'].sum()
total_members = members[members['created-at'] <= report_date]['patient-id'].nunique()
patients_subscribed = subscription[subscription['created-at'] <= report_date]['patient-id'].nunique()
total_subscription = subscription[subscription['created-at'] <= report_date]['id'].nunique()
sku_per_sub = subs_sku[subs_sku['created-at'] <= report_date]['sku-count'].mean().round()
q_per_sku = subs_sku[subs_sku['created-at'] <= report_date]['q-per-sku'].mean().round()
sub_value = subs_amount[subs_amount['created-at'] <= report_date]['sub-amount'].mean().round(2)
till_today = """
Report till now
Membership calls done (attempted) : {attempt}
Membership calls connected (Phone picked up by the customer) : {call_connect}
Membership count : {mc}
Patient subscription : {ps}
Average number of SKU per subscription : {a_sku_ps}
Quantity per SKU per subscription : {q_sku_ps}
Average subscription value : {sv}
Total subscriptions : {ts}
""".format(mc=total_members, ps=patients_subscribed, a_sku_ps=sku_per_sub, q_sku_ps=q_per_sku,
sv=sub_value, ts=total_subscription, attempt=call_attempt, call_connect=call_connect)
mail_body = f"""
Hey Everyone
{previous_day} \n
{current_day} \n
{till_today} \n
Thanks & Regards
"""
# Sending email
subject = 'Membership Program Summary'
mail_body = mail_body
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=[])
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/membership_meta/membership_meta.py | membership_meta.py |
# !/usr/bin/env python
# coding: utf-8
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
import argparse
import pandas as pd
import re
from datetime import date
from datetime import datetime
import dateutil.relativedelta
import numpy as np
from dateutil.tz import gettz
start_dt = (datetime.now() + dateutil.relativedelta.relativedelta(months=-1)
).replace(day=1).date().strftime("%Y-%m-%d")
end_dt = date.today().strftime("%Y-%m-%d")
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sd', '--start_date', default='NA', type=str, required=False)
parser.add_argument('-ed', '--end_date', default='NA', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
start_date = args.start_date
end_date = args.end_date
if start_date == 'NA' and end_date == 'NA':
start_date = start_dt
end_date = end_dt
logger = get_logger()
logger.info(f"env: {env}")
logger.info('Script Manager Initialized')
rs_db = DB()
rs_db.open_connection()
start_date_n = datetime.strptime('{0}'.format(start_date), "%Y-%m-%d").replace(day=1).date()
end_date_n = datetime.strptime('{0}'.format(end_date), "%Y-%m-%d").replace(day=1).date()
drug_q = '''
select
d.id as "drug_id",
d."drug-name" as "drug_name",
d."type" as "drug_type",
d.category as "drug_category",
d."repeatability-index" as "drug_repeatability_index",
d.schedule as "drug_schedule",
d.company as "drug_company",
d.composition as "drug_composition",
d.pack as "drug_pack",
d."pack-form" as "drug_pack_form"
from
"prod2-generico".drugs d'''
drugs = rs_db.get_df(query=drug_q)
# Extract drug composition from drug name using regex
# added 'r' prefix
drugs['drug_dosage'] = drugs['drug_composition'].apply(lambda x:
re.findall(r"\((.*?)\)", x))
# Remove brackets from drug dosage column
# drugs = drugs.copy(deep=True)
drugs['drug_dosage'] = drugs['drug_dosage'].astype(str)
drugs['drug_dosage'] = drugs['drug_dosage'].str.strip('[]')
# Remove brackets from drug dosage column
# drugs = drugs.copy(deep=True)
# drugs['drug_dosage'] = drugs['drug_dosage'].astype(str)
# drugs['drug_dosage'] = drugs['drug_dosage'].str.strip('[]')
# Remove inverted commas from drug dosage column
drugs['drug_dosage'] = drugs['drug_dosage'].apply(lambda x: x.replace("'", ''))
# Remove dosage from composition
drugs['drug_composition'] = drugs['drug_composition'].apply(lambda x:
re.sub(r" ?\([^)]+\)", "", x))
# Apply repeatability condition to check whether drug is repeatable or not
drugs['drug_is_repeatable'] = np.where(((drugs['drug_repeatability_index'] >= 80) |
((drugs['drug_category'] == 'chronic') &
(drugs['drug_repeatability_index'] >= 40))),
'yes', 'no')
bi_data_grp = '''
select
s."month-created-at" as "bill_month",
s."year-created-at" as "bill_year" ,
"store-id" as "store_id",
"drug-id" as "drug_id",
COUNT(distinct "patient-id") as "no_of_customers",
COUNT(distinct "bill-id") as "no_of_bills",
SUM(quantity) as "gross_quantity",
SUM(s.quantity * s.rate) as "gross_sales",
SUM(s.quantity * s."purchase-rate") as "gross_cogs",
SUM(s.quantity * s.mrp ) as "gross_mrp_sales"
from
"prod2-generico".sales s
where s."bill-flag" ='gross' and date(s."created-at")>='{}' and date(s."created-at")<='{}'
group by
s."month-created-at",
s."year-created-at",
"store-id",
"drug-id"
'''.format(start_date, end_date)
bi_data_grp = rs_db.get_df(query=bi_data_grp)
logger.info(bi_data_grp)
bi_data_grp['avg_selling_rate'] = bi_data_grp['gross_sales'] / bi_data_grp['gross_quantity']
bi_data_grp['avg_drug_cogs'] = bi_data_grp['gross_cogs'] / bi_data_grp['gross_quantity']
bi_data_grp['avg_drug_mrp'] = bi_data_grp['gross_mrp_sales'] / bi_data_grp['gross_quantity']
bi_data_grp['abv'] = bi_data_grp['gross_sales'] / bi_data_grp['no_of_bills']
data_return_grp = '''
select
s."month-created-at" as "return_month",
s."year-created-at" as "return_year" ,
"store-id" as "store_id",
"drug-id" as "drug_id",
SUM(quantity) as "return_quantity",
SUM(s.quantity * s.rate) as "return_value",
SUM(s.quantity * s."purchase-rate") as "return_cogs",
SUM(s.quantity * s.mrp ) as "return_mrp_value"
from
"prod2-generico".sales s
where s."bill-flag" ='return' and date(s."created-at")>='{start_date}' and date(s."created-at")<='{end_date}'
group by
s."month-created-at" ,
s."year-created-at" ,
"store-id",
"drug-id"
'''.format(start_date=start_date, end_date=end_date)
data_return_grp = rs_db.get_df(query=data_return_grp)
data_return_grp['avg_return_rate'] = data_return_grp['return_value'] / data_return_grp['return_quantity']
data_return_grp['avg_drug_cogs'] = data_return_grp['return_cogs'] / data_return_grp['return_quantity']
data_return_grp['avg_drug_mrp'] = data_return_grp['return_mrp_value'] / data_return_grp['return_quantity']
data_return_grp.drop(['return_cogs', 'return_mrp_value'], axis=1, inplace=True)
# Merge patient bill item data and customer returns data by outer
# on 'bill_month'/'return_month',
# 'bill_year'/'return_year', 'store_id','drug_id' to get aggregated data
# on store,drug,month level
# Outer merge has been done so that going forward, net measures
# can be calculated on
# exact month/year of returns only
merge_data = pd.merge(bi_data_grp, data_return_grp, how='outer',
left_on=['bill_month', 'bill_year', 'store_id', 'drug_id'],
right_on=['return_month', 'return_year', 'store_id', 'drug_id'])
bi_data_grp = ''
data_return_grp = ''
# Fill up avg_drug_cogs and avg_drug_mrp values in bill item data table
# from returns table
# wherever they are null
merge_data['avg_drug_cogs_x'] = np.where(merge_data['avg_drug_cogs_x'].isnull(),
merge_data['avg_drug_cogs_y'], merge_data['avg_drug_cogs_x'])
merge_data.drop('avg_drug_cogs_y', axis=1, inplace=True)
merge_data.rename({'avg_drug_cogs_x': 'avg_drug_cogs'},
axis=1, inplace=True)
merge_data['avg_drug_mrp_x'] = np.where(merge_data['avg_drug_mrp_x'].isnull(),
merge_data['avg_drug_mrp_y'], merge_data['avg_drug_mrp_x'])
merge_data.drop('avg_drug_mrp_y', axis=1, inplace=True)
merge_data.rename({'avg_drug_mrp_x': 'avg_drug_mrp'}, axis=1, inplace=True)
# Fill up null values of important numeric columns
merge_data['return_quantity'].fillna(0, inplace=True)
merge_data['return_value'].fillna(0, inplace=True)
merge_data['gross_quantity'].fillna(0, inplace=True)
merge_data['gross_sales'].fillna(0, inplace=True)
# Net quantity and net sales columns are calculated
merge_data['net_quantity'] = merge_data['gross_quantity'] - merge_data['return_quantity']
merge_data['net_sales'] = merge_data['gross_sales'] - merge_data['return_value']
merge_data['avg_drug_cogs'] = pd.to_numeric(merge_data['avg_drug_cogs'])
merge_data['return_cogs'] = merge_data['avg_drug_cogs'] * merge_data['return_quantity']
merge_data['gross_cogs'].fillna(0, inplace=True)
merge_data['gross_cogs'] = pd.to_numeric(merge_data['gross_cogs'])
merge_data['net_cogs'] = merge_data['gross_cogs'] - merge_data['return_cogs']
merge_data['avg_drug_mrp'] = pd.to_numeric(merge_data['avg_drug_mrp'])
merge_data['net_quantity'] = pd.to_numeric(merge_data['net_quantity'])
merge_data['net_mrp_sales'] = merge_data['avg_drug_mrp'] * merge_data['net_quantity']
merge_data['gross_sales'] = pd.to_numeric(merge_data['gross_sales'])
merge_data['gross_cogs'] = pd.to_numeric(merge_data['gross_cogs'])
merge_data['gross_margin'] = merge_data['gross_sales'] - merge_data['gross_cogs']
merge_data['net_sales'] = pd.to_numeric(merge_data['net_sales'])
merge_data['net_cogs'] = pd.to_numeric(merge_data['net_cogs'])
merge_data['net_margin'] = merge_data['net_sales'] - merge_data['net_cogs']
merge_data['gross_margin_percentage'] = merge_data['gross_margin'] / merge_data['gross_sales'] * 100
merge_data['net_margin_percentage'] = merge_data['net_margin'] / merge_data['net_sales'] * 100
merge_data['final_month'] = merge_data['bill_month']
merge_data['final_year'] = merge_data['bill_year']
merge_data['final_month'] = np.where(merge_data['final_month'].isnull(),
merge_data['return_month'], merge_data['final_month'])
merge_data['final_year'] = np.where(merge_data['final_year'].isnull(),
merge_data['return_year'], merge_data['final_year'])
# Import Shortbook-1 data to calculate drug fill rate:
sb_q_total_order = '''
select
sb."store-id",
sb."drug-id",
sb.quantity as sb_quantity,
sb."created-at" as "sb-created-at" ,
sb."received-at" as "sb-received-at"
from
"prod2-generico"."short-book-1" sb
where sb.quantity >0 and sb."auto-generated" =0 and date(sb."created-at")>='{start_date}' and date(sb."created-at")<='{end_date}'
'''.format(start_date=start_date, end_date=end_date)
data_sb_1 = rs_db.get_df(query=sb_q_total_order)
sb_q_fullfilled_order = '''
select
sb."store-id",
sb."drug-id",
sb.quantity as sb_quantity,
sb."created-at" as "sb-created-at" ,
sb."received-at" as "sb-received-at"
from
"prod2-generico"."short-book-1" sb
where sb.quantity >0 and sb."auto-generated" =0 and date(sb."created-at")>='{start_date}' and date(sb."created-at")<='{end_date}'
and sb."received-at" !='0101-01-01 00:00:00.000'
'''.format(start_date=start_date, end_date=end_date)
data_sb_2 = rs_db.get_df(query=sb_q_fullfilled_order)
data_sb_1.columns = [c.replace('-', '_') for c in data_sb_1.columns]
data_sb_2.columns = [c.replace('-', '_') for c in data_sb_2.columns]
data_sb_1['sb_created_at'] = pd.to_datetime(data_sb_1['sb_created_at'])
data_sb_1['sb_month'] = data_sb_1['sb_created_at'].dt.month
data_sb_1['sb_year'] = data_sb_1['sb_created_at'].dt.year
data_sb_2['sb_created_at'] = pd.to_datetime(data_sb_2['sb_created_at'])
data_sb_2['sb_month'] = data_sb_2['sb_created_at'].dt.month
data_sb_2['sb_year'] = data_sb_2['sb_created_at'].dt.year
data_sb_1 = data_sb_1.groupby(['sb_month', 'sb_year', 'store_id', 'drug_id'])['sb_created_at'].count().to_frame(
name='total_orders').reset_index()
#data_sb_2 = data_sb[data_sb['sb_received_at'] == '0101-01-01 00:00:00.000']
data_sb_2 = data_sb_2.groupby(['sb_month', 'sb_year', 'store_id', 'drug_id'])['sb_received_at'].count().to_frame(
name='orders_fulfilled').reset_index()
data_sb = pd.merge(data_sb_1, data_sb_2, how='left',
on=['sb_month', 'sb_year', 'store_id', 'drug_id'])
data_sb['total_orders'].fillna(0, inplace=True)
data_sb['orders_fulfilled'].fillna(0, inplace=True)
# Entire bill item and returns combined data-frame is merged
# with the drugs dataframe on 'drug_id'
category_data = pd.merge(drugs, merge_data, how='right', on='drug_id')
drugs = ''
# Group by 'final_month','final_year','store_id','drug_id','drug_type'
# to find revenue and qty by drug type
category_data_grp = category_data.groupby(['final_month', 'final_year', 'store_id',
'drug_id', 'drug_type']
)['gross_sales', 'gross_quantity'].sum().reset_index()
category_data_grp['generic_revenue'] = np.where(category_data_grp['drug_type'] == 'generic',
category_data_grp['gross_sales'], 0)
category_data_grp['generic_quantity'] = np.where(category_data_grp['drug_type'] == 'generic',
category_data_grp['gross_quantity'], 0)
category_data_grp['ethical_revenue'] = np.where(category_data_grp['drug_type'] == 'ethical',
category_data_grp['gross_sales'], 0)
category_data_grp['ethical_quantity'] = np.where(category_data_grp['drug_type'] == 'ethical',
category_data_grp['gross_quantity'], 0)
# Group by 'final_month','final_year','store_id','drug_id'
# and exclude drug type to now find aggregates for later on calculating substitution %
category_data_grp = category_data_grp.groupby(['final_month', 'final_year', 'store_id', 'drug_id'])[
'gross_sales', 'gross_quantity', 'generic_revenue', 'generic_quantity',
'ethical_revenue', 'ethical_quantity'].sum().reset_index()
# Drop gross sales and gross quantity columns to avoid column duplicates later on
category_data_grp.drop(['gross_sales', 'gross_quantity'], axis=1, inplace=True)
# Merge main category data frame with the substitution(category_data_grp) data frame
category_data = pd.merge(category_data, category_data_grp, how='left',
on=['final_month', 'final_year', 'store_id', 'drug_id'])
# Merge this data-frame with short-book dataframe on 'month','year','store_id','drug_id'
category_data = pd.merge(category_data, data_sb, how='left',
left_on=['final_month', 'final_year',
'store_id', 'drug_id'],
right_on=['sb_month', 'sb_year',
'store_id', 'drug_id'])
data_sb = ''
category_data_grp = ''
# Calculate drug fill rate by dividing orders fulfilled by total orders drug-wise
category_data['drug_fill_rate_percentage'] = category_data['orders_fulfilled'] / category_data['total_orders'] * 100
# Calculate normalized date using final_month and final_year
category_data['final_year'] = category_data['final_year'].astype(int)
category_data['final_month'] = category_data['final_month'].astype(int)
category_data['day'] = 1
category_data.rename({'final_year': 'year', 'final_month': 'month'}, axis=1, inplace=True)
category_data['final_date'] = pd.to_datetime(category_data[['year', 'month', 'day']])
# Drop day column and rename month year columns to final_month and final_year.
# Also change their data types to float again
category_data.drop('day', axis=1, inplace=True)
category_data.rename({'year': 'final_year', 'month': 'final_month'}, axis=1, inplace=True)
category_data['final_year'] = category_data['final_year'].astype(float)
category_data['final_month'] = category_data['final_month'].astype(float)
# Re-order columns
category_data = category_data[
['final_month', 'final_year', 'bill_month',
'bill_year', 'return_month', 'return_year',
'store_id', 'drug_id',
'drug_name', 'drug_composition', 'drug_dosage',
'drug_company', 'drug_type', 'drug_category',
'drug_schedule', 'drug_pack', 'drug_pack_form',
'drug_repeatability_index', 'drug_is_repeatable',
'no_of_customers', 'no_of_bills', 'abv',
'generic_revenue', 'ethical_revenue',
'generic_quantity', 'ethical_quantity',
'gross_quantity', 'avg_selling_rate', 'gross_sales',
'return_quantity', 'avg_return_rate', 'return_value',
'net_quantity', 'net_sales', 'avg_drug_mrp',
'gross_mrp_sales', 'net_mrp_sales', 'avg_drug_cogs',
'gross_cogs', 'return_cogs', 'net_cogs',
'gross_margin', 'net_margin',
'gross_margin_percentage', 'net_margin_percentage',
'sb_month', 'sb_year', 'total_orders', 'orders_fulfilled',
'drug_fill_rate_percentage', 'final_date']]
# Round off all numeric columns to two decimal places
numeric_cols = ['final_month', 'final_year', 'bill_month', 'bill_year',
'return_month', 'return_year',
'store_id', 'drug_id', 'drug_repeatability_index',
'no_of_customers', 'no_of_bills', 'abv',
'generic_revenue', 'ethical_revenue',
'generic_quantity', 'ethical_quantity',
'gross_quantity', 'avg_selling_rate', 'gross_sales',
'return_quantity', 'avg_return_rate', 'return_value',
'net_quantity', 'net_sales',
'avg_drug_mrp', 'gross_mrp_sales', 'net_mrp_sales',
'avg_drug_cogs', 'gross_cogs', 'return_cogs',
'net_cogs', 'gross_margin', 'net_margin',
'gross_margin_percentage', 'net_margin_percentage',
'sb_month', 'sb_year',
'total_orders', 'orders_fulfilled',
'drug_fill_rate_percentage']
category_data[numeric_cols] = category_data[numeric_cols].round(2)
category_data = category_data.replace([np.inf, -np.inf], np.nan)
category_data['created-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
category_data['updated-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
category_data['created-by'] = 'etl-automation'
category_data['updated-by'] = 'etl-automation'
category_data.columns = [c.replace('_', '-') for c in category_data.columns]
category_data['final-date'] = category_data['final-date'].dt.date
category_data[["drug-id","drug-repeatability-index"]]=category_data[["drug-id","drug-repeatability-index"]].apply(pd.to_numeric, errors='ignore').astype('Int64')
# Delete records of current and last month from table data
delete_q = """
DELETE
FROM
"prod2-generico".category
WHERE
"final-date" >= '{start_date_n}'
and "final-date" <= '{end_date_n}'
""".format(start_date_n=start_date_n, end_date_n=end_date_n)
rs_db.execute(delete_q)
logger.info('Delete for recent period done')
s3 = S3()
schema = 'prod2-generico'
table_name = 'category'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# Upload data in table for current and last month
s3.write_df_to_db(df=category_data[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/category/category.py | category.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.queries.sales import sales_config
import pandas as pd
def main(db, table_suffix):
table_name = f"sales"
bill_table_name = "bill-flags"
stores_master_table_name = "stores-master"
if table_suffix:
table_name = f"sales_{table_suffix}"
bill_table_name = f"bill-flags-{table_suffix}"
stores_master_table_name = f"stores-master-{table_suffix}"
db.execute(query="begin ;")
db.execute(sales_config.max_bill_id.format(table_name), params=None)
sales_intermediate: pd.DataFrame = rs_db.cursor.fetch_dataframe()
max_bill_id = sales_intermediate.values[0][0]
if max_bill_id is None:
max_bill_id = 0
db.execute(sales_config.max_return_id.format(table_name), params=None)
returns_intermediate: pd.DataFrame = rs_db.cursor.fetch_dataframe()
max_return_id = returns_intermediate.values[0][0]
if max_return_id is None:
max_return_id = 0
query = sales_config.insert_sales_record.format(
table_name, bill_table_name, stores_master_table_name, max_bill_id,
bill_table_name, stores_master_table_name, max_return_id)
db.execute(query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-ts', '--table_suffix', default="", type=str, required=False,
help="Table suffix for testing.")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
table_suffix = args.table_suffix
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db, table_suffix=table_suffix)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sales/sales.py | sales.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db, table_suffix):
table_name = f"sales"
bill_table_name = "bill-flags"
stores_master_table_name = "stores-master"
if table_suffix:
table_name = f"sales_{table_suffix}"
bill_table_name = f"bill-flags-{table_suffix}"
stores_master_table_name = f"stores-master-{table_suffix}"
# db.execute(query="begin ;")
#Update drugs
drugs_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"drug-name" = c."drug-name",
"type" = c."type",
"category" = c."category",
"hsncode" = c."hsncode",
"is-repeatable" = c."is-repeatable",
"composition" = c."composition",
"company" = c."company",
"company-id" = c."company-id",
"composition-master-id" = c."composition-master-id"
from
"prod2-generico"."{table_name}" s
join "prod2-generico"."drugs" c on
c."id" = s."drug-id"
where
(NVL(s."drug-name",'') != c."drug-name"
or NVL(s."type",'') != c."type"
or NVL(s."category",'') != c."category"
or NVL(s."hsncode",'') != c."hsncode"
or NVL(s."is-repeatable") != c."is-repeatable"
or NVL(s."composition",'') != c."composition"
or NVL(s."company",'') != c."company"
or NVL(s."company-id",0) != c."company-id"
or NVL(s."composition-master-id",0) != c."composition-master-id");
"""
db.execute(query=drugs_update_query)
# Update group
group_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"group" = d."group"
from
"prod2-generico"."{table_name}" s
join "prod2-generico"."drug-unique-composition-mapping" d on
s."drug-id" = d."drug-id"
where
(NVL(s."group", '')!= d."group");
"""
db.execute(query=group_update_query)
# Update patients info
patients_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"patient-category" = p."patient-category",
"p-reference" = p."reference"
from
"prod2-generico"."{table_name}" s
join "prod2-generico"."patients" p on
s."patient-id" = p."id"
where
(NVL(s."patient-category",'') != p."patient-category" or
NVL(s."p-reference",'') != p."reference");
"""
db.execute(query=patients_update_query)
# Update patients_metadata info
patients_m_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"first-bill-date" = pm."first-bill-date"
from
"prod2-generico"."{table_name}" s
join "prod2-generico"."patients-metadata-2" pm on
s."patient-id" = pm."id"
where
(NVL(to_char(s."first-bill-date", 'YYYY-MM-DD'),'') != to_char(pm."first-bill-date", 'YYYY-MM-DD'));
"""
db.execute(query=patients_m_update_query)
# Update stores information
stores_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"store-manager" = msm."store-manager",
"line-manager" = msm."line-manager",
"abo" = msm.abo,
"city" = msm.city,
"acquired" = msm."acquired" ,
"old-new-static" = msm."old-new-static",
"store-name" = msm.store
from
"prod2-generico"."{table_name}" s
join "prod2-generico"."{stores_master_table_name}" as msm on
s."store-id" = msm."id"
where
(NVL(s."store-manager",'') != msm."store-manager"
or NVL(s."line-manager",'') != msm."line-manager"
or NVL(s."abo",'') != msm.abo
or NVL(s."city",'') != msm.city
or NVL(s."acquired",999) != msm."acquired"
or NVL(s."old-new-static",'') != msm."old-new-static"
or NVL(s."store-name",'') != msm.store);
"""
db.execute(query=stores_update_query)
# Update bill_flags information
bill_flags_update_query = f"""
update
"prod2-generico"."{table_name}"
set
"updated-at" = convert_timezone('Asia/Calcutta', GETDATE()),
"pr-flag" = NVL(pso2."pr-flag", false),
"hd-flag" = NVL(pso2."hd-flag", false),
"ecom-flag" = NVL(pso2."ecom-flag", false),
"crm-flag" = NVL(pso2."crm-flag", false)
from
"prod2-generico"."{table_name}" s
join "prod2-generico"."{bill_table_name}" as pso2 on
s."bill-id" = pso2."id"
where
(NVL(s."pr-flag",false) != NVL(pso2."pr-flag", false) or NVL(s."hd-flag",false) != NVL(pso2."hd-flag", false) or NVL(s."ecom-flag", false) != NVL(pso2."ecom-flag", false)
or NVL(s."crm-flag", false) != NVL(pso2."crm-flag", false));
"""
db.execute(query=bill_flags_update_query)
""" committing the transaction """
# db.execute(query=" commit; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-ts', '--table_suffix', default="", type=str, required=False,
help="Table suffix for testing.")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
table_suffix = args.table_suffix
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = True
""" calling the main function """
main(db=rs_db, table_suffix=table_suffix)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sales/update_optimized_sales.py | update_optimized_sales.py |
import argparse
import os
import sys
sys.path.append('../../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
import datetime
from dateutil.tz import gettz
from zeno_etl_libs.db.db import DB, PostGreWrite
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'store-ops-metrics'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
date1= (datetime.datetime.today() + relativedelta(months=-1)).replace(day=1).strftime('%Y-%m-%d')
date2= (datetime.datetime.today() + relativedelta(days=-1)).strftime('%Y-%m-%d')
# =============================================================================
# Importing all stores with opening date
# =============================================================================
sm = """
select
sm.id as "store-id"
from
"prod2-generico"."stores-master" sm
inner join "prod2-generico"."stores" s on
s.id = sm.id
where
date(sm."opened-at") != '0101-01-01'
and s."is-active" = 1
group by
sm.id;
"""
sm_data = rs_db.get_df(sm)
sm_data.columns = [c.replace('-', '_') for c in sm_data.columns]
sm_data['join']='A'
# =============================================================================
# Date range explode
# =============================================================================
d_date = pd.DataFrame({'join':['A']})
#d_date['join']='A'
d_date['start_date']= date1
d_date['end_date']= date2
d_date['date'] = [pd.date_range(s, e, freq='d') for s, e in
zip(pd.to_datetime(d_date['start_date']),
pd.to_datetime(d_date['end_date']))]
#d_date = d_date.explode('date')
d_date = pd.DataFrame({'date': np.concatenate(d_date.date.values)})
d_date['join']='A'
#d_date.drop(['start_date','end_date'],axis=1,inplace=True)
d_date['date'] = d_date['date'].astype('str')
m_data = pd.merge(left=sm_data,right=d_date,on=['join'],how='inner')
m_data.drop('join',axis=1,inplace=True)
# =============================================================================
# AS PR received TAT
# =============================================================================
as_pr = f"""
select
"store-id" ,
date("received-at") as "date",
avg( case when (sb."auto-short" = 0 AND sb."auto-generated" = 0 AND sb."status" NOT IN ('deleted')) then (datediff('hour', sd."store-delivered-at", sb."received-at")) end) as "pr_received_tat",
avg( case when (sb."auto-short" = 1 and sb."home-delivery" = 0 and sb."patient-id" = 4480 and sb."status" NOT IN ('deleted')) then (datediff('hour', sd."store-delivered-at", sb."received-at")) end) as "as_received_tat"
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico"."store-delivered" sd on
sd.id = sb.id
where
date("received-at")>= '{date1}'
and date("received-at")<= '{date2}'
group by
"store-id" ,
date("received-at");
"""
as_pr_tat = rs_db.get_df(as_pr)
as_pr_tat.columns = [c.replace('-', '_') for c in as_pr_tat.columns]
as_pr_tat['date'] = as_pr_tat['date'].astype('str')
m_data = pd.merge(left=m_data,right=as_pr_tat,how='left',on=['store_id','date'])
# =============================================================================
# Audit Loss
# =============================================================================
a_loss = f"""
select
date(a."created-at") as "date",
a."store-id",
sum(aps."actual-quantity" * aps."final-ptr") as "actual-value",
sum((aps."actual-quantity"-(case when aps."accounted-quantity">aps."actual-quantity" then aps."actual-quantity" else aps."accounted-quantity" end )-aps."corrected-qty")* aps."final-ptr") as "accounted-value",
sum((aps."actual-quantity"-(aps."accounted-quantity")-aps."corrected-qty")* aps."final-ptr") as "billing-page-value",
sum(case when (aps."actual-quantity"-aps."accounted-quantity")<0 and aps."correction-requested-qty">0 then 1 else 0 end) as "merchandizing-issue"
from
"prod2-generico"."audits" a
left join "prod2-generico"."audit-process-sku" aps
on
a.id = aps."audit-id"
where
date(a."created-at") >= '{date1}'
and date(a."created-at") <= '{date2}'
group by 1,2
;
"""
audit_loss = rs_db.get_df(a_loss)
audit_loss.columns = [c.replace('-', '_') for c in audit_loss.columns]
audit_loss['date'] = audit_loss['date'].astype('str')
m_data = pd.merge(left=m_data,right=audit_loss,on=['store_id','date'],how='left')
# =============================================================================
# LP Liquidation + LP PR PCT
# =============================================================================
lp = f"""
select
lp."store-id" ,
lp."received-date" as "date",
sum(lp."lp-sales-sum") as "lp-sales-sum",
sum(lp."lp-value") as "lp-value",
sum(s."lp_pr_sales") as "lp_pr_sales"
from
(
select
lp."store-id" ,
lp."received-date",
sum(lp."lp-sales-sum") as "lp-sales-sum",
sum(lp."lp-value-sum") as "lp-value"
from
"prod2-generico"."lp-liquidation" lp
where
date(lp."received-date")>= '{date1}'
and date(lp."received-date")<= '{date2}'
group by
lp."store-id" ,
date(lp."received-date")) lp
inner join (
select
"store-id" ,
"created-date",
sum(case when "pr-flag" = true then "revenue-value" end) as "lp_pr_sales"
from
"prod2-generico"."sales"
where
date("created-at")>= '{date1}'
and date("created-at")<= '{date2}'
group by
1,
2) s on
s."store-id" = lp."store-id"
and s."created-date" = lp."received-date"
where
date(lp."received-date")>= '{date1}'
and date(lp."received-date")<= '{date2}'
group by
lp."store-id" ,
date(lp."received-date");
"""
lp_liq = rs_db.get_df(lp)
lp_liq.columns = [c.replace('-', '_') for c in lp_liq.columns]
lp_liq['date'] = lp_liq['date'].astype('str')
m_data = pd.merge(left=m_data,right=lp_liq,on=['store_id','date'],how='left')
# =============================================================================
# OOS less than min + STore level OOS
# =============================================================================
oos = f"""
select
oos."closing-date" as "date",
oos."store-id" ,
sum( case when oos."bucket" in ('AW', 'AX', 'AY') and oos."oos-min-count" = 0 then oos."drug-count" end) as min_count_oos_ax,
sum(case when oos."bucket" in ('AW', 'AX', 'AY') then oos."drug-count" end) as "total_drug_count_oos_ax",
sum(case when oos."oos-min-count" = 0 and d."company-id" = 6984 then oos."drug-count" end) as "goodaid_min_count",
sum(case when d."company-id" = 6984 then oos."drug-count" end) as "goodaid_total_count",
sum(oos."drug-count") as "total_drug_count_oos",
sum(oos."oos-count") as "total_oos_drug_count_oos"
from
"prod2-generico"."out-of-shelf-drug-level" oos
inner join "prod2-generico"."drugs" d on
oos."drug-id" = d."id"
where
oos."max-set" = 'Y'
and oos."mature-flag" = 'Y'
and date(oos."closing-date") >='{date1}'
and date(oos."closing-date") <='{date2}'
group by
1,
2;
"""
oos_data = rs_db.get_df(oos)
oos_data.columns = [c.replace('-', '_') for c in oos_data.columns]
oos_data['date'] = oos_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=oos_data,on=['store_id','date'],how='left')
# =============================================================================
# Feedback rating and bill pct
# =============================================================================
fb = f"""
select
date(b."created-at") as "date",
b."store-id",
count(distinct case when f.rating is not null then b.id end)* 1.0 / count(distinct b.id)* 1.0 as "feedback-bills-pct",
NVL(count(distinct case when f.rating in (1, 2) then b.id end),
0) as "flag-rating",
count(distinct case when f.rating is not null then b.id end) as "feedback_bills"
from
"prod2-generico"."bills-1" b
left join "prod2-generico"."feedback" f on
f."bill-id" = b.id
where date(b."created-at") >= '{date1}'
and date(b."created-at") <= '{date2}'
group by
date(b."created-at") ,
b."store-id";
"""
fb_data = rs_db.get_df(fb)
fb_data.columns = [c.replace('-', '_') for c in fb_data.columns]
fb_data['date'] = fb_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=fb_data,on=['store_id','date'],how='left')
# =============================================================================
# Sales related Metric
# =============================================================================
sd = f"""
select
"store-id",
"created-date" as "date",
sum(case when "bill-flag" = 'gross' and "substitution-status" = 'substituted' then quantity end) as "subs_num",
NVL(sum(case when "bill-flag" = 'gross' and "substitution-status" in ('substituted', 'not-substituted') then quantity end),1) as "subs_den",
sum(case when "bill-flag" = 'gross' and "substitution-status" = 'substituted' and "hd-flag" = True then quantity end) as "hd_subs_num",
NVL(sum(case when "bill-flag" = 'gross' and "substitution-status" in ('substituted', 'not-substituted') and "hd-flag" = True then quantity end),1) as "hd_subs_den",
sum(case when "bill-flag" = 'gross' and "substitution-status-g" = 'ga-substituted' and "goodaid-availablity-flag"='available' then quantity end) as "ga_subs_num",
NVL(sum(case when "bill-flag" = 'gross' and "goodaid-availablity-flag"='available' and "substitution-status" in ('ga-substituted', 'substituted', 'not-substituted') then quantity end),1) as "ga_subs_den",
sum(case when "bill-flag" = 'return' then "revenue-value" end) as "return-value",
sum(case when "bill-flag" = 'gross' then "revenue-value" end) as "gross-revennue",
count(distinct case when "promo-code" = 'BOGO' and "bill-flag" = 'gross' then "bill-id" end) as "bogo-bills",
sum("revenue-value") as revenue,
sum(case when "pr-flag" =True then "revenue-value" end) as "pr_sales",
sum(case when "hd-flag" =True then "revenue-value" end) as "hd_sales",
sum(case when "company-id" =6984 then "revenue-value" end) as "goodaid_sales",
sum(case when "ecom-flag" =True then "revenue-value" end) as "ecomm_sales",
sum(case when "type" ='generic' then "revenue-value" end) as "generic_sales",
count(DISTINCT case when "hd-flag" =True and "bill-flag" = 'gross' then "bill-id" end) as "hd_bills",
count(distinct case when "bill-flag" = 'gross' then "bill-id" end) as "NOB",
sum(case when "bill-flag" = 'gross' then "revenue-value" end)*1.0/NVL(count(distinct case when "bill-flag" = 'gross' then "bill-id" end),1)*1.0 as "ABV"
from
"prod2-generico"."sales"
where "created-date">='{date1}'
and "created-date"<='{date2}'
group by
"store-id" ,
"created-date";
"""
sales_data = rs_db.get_df(sd)
sales_data.columns = [c.replace('-', '_') for c in sales_data.columns]
sales_data['date'] = sales_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=sales_data,on=['store_id','date'],how='left')
# =============================================================================
# Missed Call info
# =============================================================================
msc = f"""
SELECT
scle."store-id",
date(scle."date-time") as "date",
count(case when scle."call-type" = 'MISSED' then scle."call-type" end) as "missed_calls",
count(scle."call-type") as "total_received_calls"
FROM
"prod2-generico"."store-call-logs-entries" scle
where
scle."call-type" in ('INCOMING',
'MISSED')
and date(scle."date-time") >= '{date1}'
and date(scle."date-time") <= '{date2}'
group by
scle."store-id",
date(scle."date-time");
"""
missed_call = rs_db.get_df(msc)
missed_call.columns = [c.replace('-', '_') for c in missed_call.columns]
missed_call['date'] = missed_call['date'].astype('str')
m_data = pd.merge(left=m_data,right=missed_call,on=['store_id','date'],how='left')
# =============================================================================
# Calling dashboard
# =============================================================================
call = f"""
select
cd."store-id" ,
date(cd."created-at") as "date",
count(distinct cd.id) as "target_calls",
count(distinct case when ch.id is not null then cd.id end) as "actual_calls",
count(distinct case when cd."backlog-days-count">0 then cd.id end) as "backlog_days_flag"
from
"prod2-generico"."calling-dashboard" cd
left join "prod2-generico"."calling-history" ch on
cd.id = ch."calling-dashboard-id"
where
date(cd."created-at")>= '{date1}'
and date(cd."created-at")<= '{date2}'
group by
cd."store-id" ,
date(cd."created-at");
"""
calling_data = rs_db.get_df(call)
calling_data.columns = [c.replace('-', '_') for c in calling_data.columns]
calling_data['date'] = calling_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=calling_data,on=['store_id','date'],how='left')
# =============================================================================
# NPI
# =============================================================================
npi = f"""
select
nrt."store-id" ,
date(nrt."store-return-created-at") as "date",
avg(DATEDIFF ('h', nrt."npi-added-in-store-at", nrt."check-created-at" )) as "hours-to-start-scanning",
avg(DATEDIFF ('h', nrt."npi-added-in-store-at", nrt."store-return-created-at" )) as "hours-to-mark-store-return"
from
"prod2-generico"."npi-returns-tracking" nrt
where date(nrt."store-return-created-at")>='{date1}'
and date(nrt."store-return-created-at")<= '{date2}'
group by
nrt."store-id",
date(nrt."store-return-created-at");
"""
npi_data = rs_db.get_df(npi)
npi_data.columns = [c.replace('-', '_') for c in npi_data.columns]
npi_data['date'] = npi_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=npi_data,on=['store_id','date'],how='left')
# =============================================================================
# Cluster FF
# =============================================================================
cff = f"""
Select
date(pso."created-at") AS "date",
-- PSO Created at
pstm."from-store-id" AS "store-id" ,
--pstm."to-store-id" AS "destination-store-id",
-- max(c.id) as "cluster-id" ,
-- max(sos."name") AS "source_store",
-- max(des."name") AS "destination_store",
-- max(pstm."item-quantity") AS "to-be-transferred-qty",
-- SUM(sti."quantity") as "actual-transferred-qty",
-- pso."status" as "pso-status",
-- pstm."status" AS "tn-status",
-- st."status" AS "st-status",
-- pso."drug-id" ,
-- pso."drug-name" ,
-- max(pstm.id) AS "pstm-id",
-- max(pstm."is-active") as "is-active",
avg(DATEDIFF ('h', pstm."created-at", st."initiated-at" )) as "hrs_cluster_order_ready_for_pickup",
avg(DATEDIFF ('h', pstm."created-at", st."transferred-at" )) as "hrs_cluster_biker_picked_up_order",
avg(DATEDIFF ('h', pstm."created-at", st."received-at" )) as "hrs_cluster_store_received_order"
-- PSO Created at
FROM "prod2-generico"."pso-stock-transfer-mapping" pstm
LEFT JOIN "prod2-generico"."stock-transfers-1" st on
pstm."stock-transfer-id" = st.id
Left JOIN "prod2-generico"."pso-stock-transfer-inventory-mapping" pstim ON
pstm.id = pstim."pso-stock-transfer-mapping-id"
LEFT JOIN "prod2-generico"."stock-transfer-items-1" sti ON
pstim."inventory-id" = sti."inventory-id"
AND st.id = sti."transfer-id"
Left join "prod2-generico"."patients-store-orders" pso ON
pstm."patient-store-order-id" = pso.id
left join "prod2-generico"."store-clusters" sc on
pstm."from-store-id" = sc."store-id"
left join "prod2-generico".stores sos on
pstm."from-store-id" = sos.id
left join "prod2-generico".stores des on
pstm."to-store-id" = des.id
inner join "prod2-generico".clusters c on
sc."cluster-id" = c.id
and sc."is-active" = 1
WHERE
sc."cluster-id" is not null
AND date(pso."created-at") >= '{date1}'
and date(pso."created-at") <= '{date2}'
GROUP BY pstm."from-store-id",
date(pso."created-at");
"""
cluster_data = rs_db.get_df(cff)
cluster_data.columns = [c.replace('-', '_') for c in cluster_data.columns]
cluster_data['date'] = cluster_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=cluster_data,on=['store_id','date'],how='left')
# =============================================================================
# as - pr otif
# =============================================================================
aspr = f"""
select
"store-id" ,
date("created-at") as "date",
sum(case
when "as-ms-pr" = 'as' then "requested-quantity"
end) as "as_requested_qty",
sum(case
when "as-ms-pr" = 'pr' then "requested-quantity"
end) as "pr_requested_qty",
sum(case
when "as-ms-pr" = 'as'
and date("store-delivered-at")!= '0101-01-01'
and "store-delivered-at" < "delivery-tat" then "requested-quantity"
end) as "as_otif_qty",
sum(case
when "as-ms-pr" = 'pr'
and date("store-delivered-at")!= '0101-01-01'
and "store-delivered-at" < "delivery-tat" then "requested-quantity"
end) as "pr_otif_qty"
from
"prod2-generico"."sb-sla"
where date("created-at")>='{date1}'
and date("created-at")<= '{date2}'
group by "store-id" ,date("created-at");
"""
aspr_data = rs_db.get_df(aspr)
aspr_data.columns = [c.replace('-', '_') for c in aspr_data.columns]
aspr_data['date'] = aspr_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=aspr_data,on=['store_id','date'],how='left')
# =============================================================================
# store opening closing
# =============================================================================
s_date = f"""
select
date("created-at") as "date",
"store-id" ,
min("created-at") as "first_search",
max("created-at") as "last_search"
from
"prod2-generico"."searches"
group by
date("created-at"),
"store-id";
"""
opening_data = rs_db.get_df(s_date)
opening_data.columns = [c.replace('-', '_') for c in opening_data.columns]
opening_data['date'] = opening_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=opening_data,on=['store_id','date'],how='left')
# =============================================================================
# store info
# =============================================================================
s_info = f"""
select
id as "store-id",
store ,
"line-manager" ,
abo ,
city ,
"franchisee-name",
acquired ,
"cluster-name" ,
"old-new-static"
from
"prod2-generico"."stores-master";
"""
store_info = rs_db.get_df(s_info)
store_info.columns = [c.replace('-', '_') for c in store_info.columns]
m_data = pd.merge(left=m_data,right=store_info,on=['store_id'],how='left')
# =============================================================================
# PR wholeness
# =============================================================================
pro = f"""
select
pr."store-id" ,
date(pr."turnaround-time") as "date",
sum(case when pr."pso-status" != 'pso-draft' then pr."selling-rate" end) as "pr_created_value",
sum(case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then pr."selling-rate" else 0 end) as "within_slot_delivered_pr_value",
sum(case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then pr."selling-rate" end) as "total_delivered_pr_value",
count(distinct case when pr."pso-status" != 'pso-draft' then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "pr_created_count",
count(distinct case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0)::text ) else null end) as "within_slot_delivered_count",
count(distinct case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "total_delivered_pr_count",
sum(case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' then pr."selling-rate" end) as "pr_created_value_delivery",
sum(case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then pr."selling-rate" else 0 end) as "within_slot_delivered_pr_value_delivery",
sum(case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then pr."selling-rate" end) as "total_delivered_pr_value_delivery",
count(distinct case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "pr_created_count_delivery",
count(distinct case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0)::text ) else null end) as "within_slot_delivered_count_delivery",
count(distinct case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "total_delivered_pr_count_delivery"
from
"prod2-generico"."patient-requests-metadata" pr
left join "prod2-generico"."home-delivery-metadata" hdm
on
hdm.id = pr.id
where
date(pr."created-at") >= '{date1}'
and date(pr."created-at") <= '{date2}'
group by
1,
2;
"""
pro_data = rs_db.get_df(pro)
pro_data.columns = [c.replace('-', '_') for c in pro_data.columns]
pro_data['date'] = pro_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=pro_data,on=['store_id','date'],how='left')
# =============================================================================
# Search to pr conversion
# =============================================================================
search = f"""
select date("search-date") as "date", "store-id",
sum(case when "pr-opportunity-converted-flag"=1 then "lost-sales" end) as "pr_achieved_sales",
sum(case when "pr-opportunity-converted-flag"=1 then "loss-quantity" end) as "pr_achieved_qty",
sum(case when "pr-opportunity-flag" =1 then "lost-sales" end) as "search_loss_sales",
sum(case when "pr-opportunity-flag" =1 then "loss-quantity" end) as "search_loss_qty"
from "prod2-generico"."cfr-searches-v2"
where
date("search-date") >= '{date1}'
and date("search-date") <= '{date2}'
group by
date("search-date"),"store-id" ;
"""
search_data = rs_db.get_df(search)
search_data.columns = [c.replace('-', '_') for c in search_data.columns]
search_data['date'] = search_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=search_data,on=['store_id','date'],how='left')
# =============================================================================
# cash tally data
# =============================================================================
ctally = f"""
select
"store-id" ,
date,
max("created-at") as max_cash_tally_date
from
"prod2-generico"."cash-tally" where
date("date") >= '{date1}'
and date("date") <= '{date2}'
group by
"store-id" ,
date;
"""
ctally_data = rs_db.get_df(ctally)
ctally_data.columns = [c.replace('-', '_') for c in ctally_data.columns]
ctally_data['date'] = ctally_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=ctally_data,on=['store_id','date'],how='left')
# =============================================================================
# Expiry Sales value
# =============================================================================
exp = f"""
select
si."snapshot-date" as "date",
si."entity-id" as "store-id" ,
SUM(case when si."inventory-sub-type-1" = 'expired'
then si."value-with-tax" end ) as "expired-value",
SUM(case when si."inventory-sub-type-1" = 'near-expiry'
then si."value-with-tax" end ) as "near-expiry-value"
from
"prod2-generico"."system-inventory" si
where
si."entity-type" = 'store'
and date(si."snapshot-date") >= '{date1}'
and date(si."snapshot-date") <= '{date2}'
group by
si."snapshot-date" ,
si."entity-id" ;
"""
exp_data = rs_db.get_df(exp)
exp_data.columns = [c.replace('-', '_') for c in exp_data.columns]
exp_data['date'] = exp_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=exp_data,on=['store_id','date'],how='left')
# =============================================================================
# PSO draft conversion %
# =============================================================================
draft = f"""
select
date("created-at") as "date",
"store-id" ,
sum(case when "pso-parent-id" is not null then 1 else 0 end) as "pso-draft-count",
sum(case when "pso-parent-id" is not null and status != 'pso-draft' then 1 else 0 end) as "pso-draft-converted-count"
from
"prod2-generico"."patients-store-orders" pso
where
date("created-at")>= '{date1}'
and date("created-at")<= '{date2}'
group by
1,
2 ;
"""
draft_data = rs_db.get_df(draft)
draft_data.columns = [c.replace('-', '_') for c in draft_data.columns]
draft_data['date'] = draft_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=draft_data,on=['store_id','date'],how='left')
m_data.info()
# Write to Redshift Also
m_data.columns = [c.replace('_', '-') for c in m_data.columns]
m_data.columns
m_data = m_data[[
'store-id' ,'date' ,'pr-received-tat' ,'as-received-tat'
,'lp-sales-sum' ,'lp-value'
,'lp-pr-sales' ,'min-count-oos-ax' ,'total-drug-count-oos-ax' ,'goodaid-min-count'
,'goodaid-total-count' ,'total-drug-count-oos' ,'total-oos-drug-count-oos' ,'feedback-bills-pct'
,'flag-rating' ,'subs-num' ,'subs-den' ,'hd-subs-num' ,'hd-subs-den'
,'ga-subs-num' ,'ga-subs-den' ,'return-value' ,'gross-revennue'
,'bogo-bills' ,'revenue' ,'pr-sales' ,'hd-sales'
,'goodaid-sales' ,'ecomm-sales' ,'hd-bills' ,'nob'
,'abv' ,'missed-calls' ,'total-received-calls' ,'target-calls'
,'actual-calls' ,'hours-to-start-scanning' ,'hours-to-mark-store-return' ,'hrs-cluster-order-ready-for-pickup'
,'hrs-cluster-biker-picked-up-order' ,'hrs-cluster-store-received-order' ,'as-requested-qty' ,'pr-requested-qty'
,'as-otif-qty' ,'pr-otif-qty' ,'first-search' ,'last-search'
,'store' ,'line-manager' ,'abo' ,'city' ,'franchisee-name'
,'acquired' ,'cluster-name' ,'old-new-static' ,'pr-created-value'
,'within-slot-delivered-pr-value' ,'total-delivered-pr-value' ,'pr-created-count' ,'within-slot-delivered-count'
,'total-delivered-pr-count','pr-achieved-sales' ,'pr-achieved-qty'
,'search-loss-sales' ,'search-loss-qty','feedback-bills' ,'max-cash-tally-date', 'backlog-days-flag',
'pr-created-value-delivery'
, 'within-slot-delivered-pr-value-delivery', 'total-delivered-pr-value-delivery', 'pr-created-count-delivery', 'within-slot-delivered-count-delivery'
, 'total-delivered-pr-count-delivery', 'generic-sales','expired-value', 'actual-value',
'accounted-value', 'billing-page-value', 'merchandizing-issue', 'pso-draft-count', 'pso-draft-converted-count', 'near-expiry-value'
]]
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=m_data[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-ops-metrics/store-ops-metrics.py | store-ops-metrics.py |
import argparse
import datetime
import sys
import os
import boto3
import time
from zeno_etl_libs.helper import helper
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-ss', '--schema_name', default="", type=str, required=False)
parser.add_argument('-d', '--date_prefix', default="NA", type=str, required=False)
parser.add_argument('-ndo', '--n_day_old', default="NA", type=str, required=False)
parser.add_argument('-lt', '--list_of_tables', default=None, type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rc', '--reason_code', default="NA", type=str, required=False)
parser.add_argument('-p', '--purpose', default="NA", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
schema_name = args.schema_name
list_of_tables = args.list_of_tables
email_to = args.email_to
reason_code = args.reason_code
date_prefix = args.date_prefix
n_day_old = args.n_day_old
purpose = args.purpose
logger.info(f"schema_name: {schema_name}")
logger.info(f"list_of_tables: {list_of_tables}")
logger.info(f"email_to: {email_to}")
logger.info(f"reason_code: {reason_code}")
logger.info(f"date_prefix: {date_prefix}")
logger.info(f"n_day_old: {n_day_old}")
logger.info(f"purpose: {purpose}")
list_of_tables = list_of_tables.split(",")
if not schema_name:
raise Exception("Please provide schema name !!!")
if not list_of_tables:
raise Exception("Please provide list of tables")
""" our aim is to decide the date_prefix, using date_prefix or n_day_old or purpose """
if date_prefix == "NA":
""" since glue gives error while passing empty string so, NA is used for empty string """
date_prefix = ""
""" n_day_old: n day old tables to be deleted """
if n_day_old and n_day_old != "NA":
n_day_old = int(n_day_old)
old_date = datetime.datetime.now() + datetime.timedelta(days=-n_day_old)
date_prefix = old_date.strftime("%Y-%m-%d")
if purpose in ("INV_LEDGER", "MIS"):
if purpose == "INV_LEDGER":
""" for daily inventory ledger calculation related tables """
email_to = "[email protected]"
old_date = datetime.datetime.now() + datetime.timedelta(days=-2)
date_prefix = old_date.strftime("%Y-%m-%d")
list_of_tables = ["inventory-1", "invoice-items-1", "invoices-1", "customer-return-items-1",
"customer-returns-1", "stock-transfer-items-1", "stock-transfers-1", "bill-items-1",
"bills-1", "return-items-1", "returns-to-dc-1", "deleted-invoices", "deleted-invoices-1",
"inventory-changes-1", "cluster-tasks"]
if purpose == "MIS":
""" for monthly mis related calculation """
email_to = "[email protected]"
old_date = datetime.datetime.now() + datetime.timedelta(days=-1)
date_prefix = old_date.strftime("%Y-%m-%d")
list_of_tables = ["bill-items-1", "bills-1", "customer-return-items-1", "customer-returns-1", "debit-notes",
"debit-notes-1", "delivery-tracking", "distributors", "drugs", "inventory", "inventory-1",
"invoice-items", "invoice-items-1", "invoices", "invoices-1", "patient-requests",
"patients-store-orders", "return-items", "return-items-1", "stores", "store-slots",
"store-dc-mapping", "returns-to-dc"]
if reason_code == "NA":
""" since glue gives error while passing empty string so, NA is used for empty string """
reason_code = ""
email = Email()
rs_db = DB(read_only=False)
rs_db.open_connection()
date_prefixes = date_prefix.split(",")
for date_prefix in date_prefixes:
for i in list_of_tables:
logger.info(f"started dropping table: {i}")
new_table_name = i
if reason_code:
new_table_name += f"-{reason_code}"
if date_prefix:
new_table_name += f"-{date_prefix}"
table_info = helper.get_table_info(db=rs_db, table_name=new_table_name, schema=schema_name)
if isinstance(table_info, type(None)):
logger.info(f"Table: {new_table_name} is absent, not need to drop.")
else:
logger.info(f"Table exists: {new_table_name}, so needs to drop.")
q = f"""
drop table "{schema_name}"."{new_table_name}";
"""
rs_db.execute(query=q)
logger.info(f"table dropped successfully: {new_table_name}")
""" closing the DB connection in the end """
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drop-tables/drop-tables.py | drop-tables.py |
To fetch past playstore reviews google to s3 to DB
"""""
import argparse
import os
import sys
from io import StringIO
import datetime
import dateutil
from dateutil.tz import gettz
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.db.db import DB
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-d', '--full_run', default=0, type=int, required=False)
parser.add_argument('-l', '--max_month', default=6, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
max_month = args.max_month
os.environ['env'] = env
logger = get_logger()
logger.info(f"info message")
logger.info(f"full_run: {full_run}")
rs_db = DB(read_only=False)
rs_db.open_connection()
reviews = pd.DataFrame()
s3 = S3()
logger.info(f"max_month: {max_month}")
if full_run == 1:
for year in (21, 22):
for month in range(1, 12):
if month > max_month and year == 22:
""" stopping """
continue
uri = f"s3://aws-glue-temporary-921939243643-ap-south-1/playstore-reviews/reviews_reviews_com.zenohealth.android_20{year}{str(month).zfill(2)}.csv"
logger.info(f"uri: {uri}")
csv_string = s3.get_file_object(uri=uri, encoding="utf-16")
df = pd.read_csv(StringIO(csv_string))
df['month'] = str(month).zfill(2)
df['year'] = str(year)
reviews = pd.concat([reviews, df], ignore_index=True)
else:
last_month_date = datetime.datetime.now() - datetime.timedelta(days=30)
last_year = last_month_date.strftime("%Y")[2:]
last_month = last_month_date.strftime("%m")
logger.info(f"last_month_date: {last_month_date} last year : {last_year} last month : {last_month} ")
uri = f"s3://aws-glue-temporary-921939243643-ap-south-1/playstore-reviews/reviews_reviews_com.zenohealth.android_20" \
f"{last_year}{str(last_month).zfill(2)}.csv"
logger.info(f"uri: {uri}")
csv_string = s3.get_file_object(uri=uri, encoding="utf-16")
reviews = pd.read_csv(StringIO(csv_string))
reviews['month'] = str(last_month)
# reviews['month'] = reviews['month'].astype('str', errors='ignore')[2:]
reviews['year'] = str(last_year)
# reviews['year'] = reviews['year'].astype('str', errors='ignore')
columns = [c.replace(" ", "-").lower() for c in reviews.columns]
reviews.columns = columns
for col in ['review-submit-date-and-time', 'review-last-update-date-and-time','developer-reply-date-and-time']:
reviews[col] = pd.to_datetime(reviews[col], errors='coerce').dt.strftime('%Y-%m-%d %H:%M:%S')
reviews[col] = reviews[col].replace('NaT', '')
reviews['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
reviews['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
reviews['created-by'] = 'etl-automation'
reviews['updated-by'] = 'etl-automation'
# Table info
schema = 'prod2-generico'
table_name = 'ecomm-playstore-old-reviews'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
logger.info(f"Table:{table_name} exists and input data has all columns")
if full_run == 1:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
else:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "month">={last_month} and "year">={last_year}'''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=reviews[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
logger.info("Pushed reviews successfully")
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm-playstore-old-reviews/ecomm-playstore-old-reviews.py | ecomm-playstore-old-reviews.py |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
from datetime import datetime, timedelta
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-dd', '--doh_days', default=60, type=int, required=False)
parser.add_argument('-sd', '--sold_days', default=90, type=int, required=False)
parser.add_argument('-ed', '--expiry_days', default=210, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
doh_days = args.doh_days
sold_days = args.sold_days
expiry_days = args.expiry_days
os.environ['env'] = env
logger = get_logger(level = 'INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("doh_days - " + str(doh_days))
logger.info("sold_days - " + str(sold_days))
logger.info("expiry_days -" + str(expiry_days))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
# =============================================================================
# NPI at store
# =============================================================================
npi_store_query = """
select
*
from
"prod2-generico"."dead-stock-inventory" dsi
where
"inventory-type" = 'Rotate'
"""
npi_store = rs_db.get_df(npi_store_query)
logger.info("Fetched NPI from dead-stock-inventory - lines -{}".format(len(npi_store)))
# =============================================================================
# Fetching Store-Clusters List
# =============================================================================
s0 = """
SELECT
s."store-id",
sc."cluster-id"
FROM
(
SELECT
s.id as "store-id"
FROM
"prod2-generico".stores s
WHERE
s.category = 'retail' )s
left join (
SELECT
sc."store-id" ,
sc."cluster-id"
FROM
"prod2-generico"."store-clusters" sc
WHERE
sc."is-active" = 1)sc
ON
s."store-id" = sc."store-id"
"""
clust_store = rs_db.get_df(s0)
logger.info("Fetched store-cluster combo")
npi_store = pd.merge(npi_store, clust_store[['store-id', 'cluster-id']], on='store-id', how='left')
# =============================================================================
# Adding Cluster Sold Quantity
# =============================================================================
cluster_sold_total = pd.DataFrame()
for cluster_id in tuple(map(int, (list(clust_store[clust_store['cluster-id'].notna()]['cluster-id'].astype(int).unique())))):
s00 = """
select
sc."store-id"
from
"prod2-generico"."store-clusters" sc
where
sc."is-active" = 1
and sc."cluster-id" in ({cluster_id})
""".format(cluster_id=cluster_id)
dist_stores = rs_db.get_df(s00)
stores_in_cluster = tuple(map(int, dist_stores['store-id'].values.tolist()))
logger.info('stores in cluster {}-{}'.format(cluster_id, stores_in_cluster))
drgs = tuple(map(int, (list(npi_store[npi_store['cluster-id'] == cluster_id]['drug-id'].unique()))))
s1 = """
select
'{cluster_id}' as "cluster-id",
"drug-id",
sum("net-quantity") as "clus-sales-qty"
from
"prod2-generico"."sales" sh
where
"store-id" in {stores_in_cluster}
-- and "created-date" >= '2022-04-14'
and date("created-at") >=dateadd(d,-{sold_days},current_date)
and "drug-id" in {drgs}
group by
"drug-id"
""".format(cluster_id=cluster_id, stores_in_cluster=stores_in_cluster, sold_days=sold_days, drgs=drgs)
cluster_sold = rs_db.get_df(s1)
cluster_sold_total = cluster_sold_total.append(cluster_sold, ignore_index='True')
logger.info('cluster-{},Cluster_sold_added'.format(cluster_id))
npi_store[['cluster-id', 'drug-id']]=npi_store[['cluster-id', 'drug-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
cluster_sold_total[['cluster-id', 'drug-id']]=cluster_sold_total[['cluster-id', 'drug-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
# =============================================================================
# Adding Flags - Cluster sold, Shelf life more than 6 months
# =============================================================================
npi_store = pd.merge(npi_store, cluster_sold_total, on=['cluster-id', 'drug-id'], how='left')
npi_store['days-to-expire'] = (pd.to_datetime(npi_store['expiry'])-datetime.today()).dt.days
def timecheck(a):
if a > expiry_days:
return 1
else:
return 0
npi_store['shelf-life-more-than-6-months-flag'] = npi_store['days-to-expire'].apply(timecheck)
npi_store['clus-sales-qty'].fillna(0, inplace=True)
npi_store['clus-sales-qty'] = npi_store['clus-sales-qty'].astype(int)
def clustsoldcheck(a):
if a == 0:
return 0
else:
return 1
npi_store['clust-sold-flag'] = npi_store['clus-sales-qty'].apply(clustsoldcheck)
logger.info("Added Flags in dead_stock_inventory - Current lines -{}".format(len(npi_store)))
npi_store_summary = npi_store[npi_store['shelf-life-more-than-6-months-flag']==1]
npi_store_summary = npi_store_summary[npi_store_summary['clust-sold-flag']==0]
npi_store_summary = npi_store_summary.groupby('store-id').agg({'store-name':'first',
'quantity':'sum',
'locked-quantity':'sum',
'value':'sum',
'locked-value':'sum'}).reset_index()
npi_store_summary['sns-time'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
npi_store['created-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
npi_store['created-by']= '[email protected]'
npi_store['updated-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
npi_store['updated-by'] = '[email protected]'
npi_store_summary =npi_store_summary[['store-id','store-name','quantity','locked-quantity','value','locked-value','sns-time']]
npi_store_summary['created-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
npi_store_summary['created-by']= '[email protected]'
npi_store_summary['updated-at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
npi_store_summary['updated-by'] = '[email protected]'
npi_store[['invoice-id','invoice-item-id','distributor-id','short-book-id']] = npi_store[['invoice-id','invoice-item-id','distributor-id','short-book-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
# =============================================================================
# writing to Redshift
# =============================================================================
schema = 'prod2-generico'
table_name = 'npi-inventory-at-store'
table_name2 = 'npi-inventory-at-store-sns-last-3-month'
table_name3 = 'npi-inventory-at-store-sns-summary'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
table_info2 = helper.get_table_info(db=rs_db_write, table_name=table_name2, schema=schema)
table_info3 = helper.get_table_info(db=rs_db_write, table_name=table_name3, schema=schema)
status2 = False
status1 = False
status3 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}" '''
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table deleted')
s3.write_df_to_db(df=npi_store[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status2 = True
if status2:
if isinstance(table_info2, type(None)):
raise Exception(f"table: {table_name2} do not exist, create the table first")
else:
logger.info(f"Table:{table_name2} exists")
npi_store['sns-time']= datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
delete_main_query = f'''
delete
from
"{schema}"."{table_name2}"
where
extract(mon from "sns-time") = extract(mon from current_date) '''
rs_db_write.execute(delete_main_query)
logger.info(str(table_name2) + ' table data deleted for same months entry')
delete_main_query = f'''
delete
from
"{schema}"."{table_name2}"
where
DATE("sns-time")< DATE(dateadd(d,-125,current_date)) '''
rs_db_write.execute(delete_main_query)
logger.info(str(table_name2) + ' table data deleted for 4+ month old data')
s3.write_df_to_db(df=npi_store[table_info2['column_name']], table_name=table_name2, db=rs_db_write,
schema=schema)
logger.info(str(table_name2) + ' table uploaded')
status1 = True
if status1:
logger.info(f"Table:{table_name2} exists")
delete_main_query = f'''
delete
from
"{schema}"."{table_name3}"
where
extract(mon from "sns-time") = extract(mon from current_date) '''
rs_db_write.execute(delete_main_query)
logger.info(str(table_name3) + ' table data deleted for same months entry')
s3.write_df_to_db(df=npi_store_summary[table_info3['column_name']], table_name=table_name3, db=rs_db_write,
schema=schema)
logger.info(str(table_name3) + ' table uploaded')
status3 = True
if status3 is True:
status = 'Success'
else:
status = 'Failed'
# logger.close()
end_time = datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name2} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/npi-inventory-at-store.py | npi-inventory-at-store.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
from zeno_etl_libs.utils.doid_write import doid_custom_write
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sku', '--sku_to_add_per_round', default=50, type=int, required=False)
parser.add_argument('-si', '--store_id_to_close', default=330, type=str, required=False)
parser.add_argument('-ccf', '--cold_chain_flag', default=0, type=str, required=False)
parser.add_argument('-dts', '--date_to_start', default='2022-09-11', type=str, required=False)
parser.add_argument('-lsos3', '--list_name_on_s3', default='NPI_Palghar_list_upload', type=str, required=False)
parser.add_argument('-ssmm', '--change_ss_min_max_to_zero_flag', default=0, type=int, required=False)
parser.add_argument('-bif', '--block_ipc_flag', default=0, type=int, required=False)
parser.add_argument('-bind', '--block_ipc_for_n_days', default=30, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
sku_to_add_per_round = args.sku_to_add_per_round
store_id_to_close = args.store_id_to_close
cold_chain_flag = args.cold_chain_flag
date_to_start = args.date_to_start
list_name_on_s3 = args.list_name_on_s3
change_ss_min_max_to_zero_flag = args.change_ss_min_max_to_zero_flag
block_ipc_flag = args.block_ipc_flag
block_ipc_for_n_days = args.block_ipc_for_n_days
store_id_to_close = int(store_id_to_close)
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
mysql_read = MySQL()
mysql_read.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("sku_to_add_per_round - " + str(sku_to_add_per_round))
logger.info("store_id_to_close - " + str(store_id_to_close))
logger.info("cold_chain_flag - " + str(cold_chain_flag))
logger.info("date_to_start - " + str(date_to_start))
logger.info("list_name_on_s3 - " + str(list_name_on_s3))
logger.info("change_ss_min_max_to_zero_flag - " + str(change_ss_min_max_to_zero_flag))
logger.info("block_ipc_flag - " + str(block_ipc_flag))
logger.info("block_ipc_for_n_days - " + str(block_ipc_for_n_days))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
store_last_sataus_query = """
select
*
from
(
select
row_number() over (partition by nd.`store-id`
order by
nd.`created-at` desc
) as `row`,
nd.`store-id`,
nd.status ,
nd.`created-at`
from
`npi-drugs` nd
where nd.`store-id` = {store_id_to_close}) nd
where
nd.`row` = 1
""".format(store_id_to_close=store_id_to_close)
store_last_sataus = pd.read_sql_query(store_last_sataus_query, mysql_read.connection)
if (len(store_last_sataus)==0) or (store_last_sataus.loc[0,'status']=='completed'):
# Getting npi list
npi_drug_list =pd.read_csv(s3.download_file_from_s3(file_name=f"npi_add_by_manual_list/{list_name_on_s3}.csv"))
npi_drug_list= npi_drug_list[['store-id','drug-id']]
drugs = tuple(map(int,npi_drug_list['drug-id'].unique()))
store_drug_prod_inv_query = '''
SELECT
i.`drug-id`,
d.`type` ,
d.`pack-form` ,
d.`cold-chain` ,
sum(i.quantity) as 'quantity'
FROM
`inventory-1` i
left join drugs d on i.`drug-id` = d.id
WHERE
i.`store-id` = {store_id_to_close}
and i.`drug-id` in {drugs}
group by
i.`drug-id`,
d.`type` ,
d.`pack-form` ,
d.`cold-chain`
'''.format(store_id_to_close=store_id_to_close,drugs=drugs)
store_drug_prod_inv = pd.read_sql_query(store_drug_prod_inv_query, mysql_read.connection)
npi_drug_list = npi_drug_list.merge(store_drug_prod_inv,on = 'drug-id', how = 'left')
npi_drug_list['quantity'] = npi_drug_list['quantity'].fillna(0)
npi_drug_list = npi_drug_list[npi_drug_list['quantity']>0]
store_drug_prod_query = '''
select
`store-id` ,
`drug-id`,
1 as `dummy`
from
`npi-drugs` nd
where
date(nd.`created-at`) >= '{date_to_start}'
and nd.`store-id` = {store_id_to_close}
'''.format(store_id_to_close=store_id_to_close,date_to_start=date_to_start)
store_drug_prod = pd.read_sql_query(store_drug_prod_query, mysql_read.connection)
# store_drug_prod_query = '''
# select
# "store-id" ,
# "drug-id",
# 1 as "dummy"
# from
# "prod2-generico"."npi-drugs" nd
# where
# date(nd."created-at") >= '{date_to_start}'
# and nd."store-id" = {store_id_to_close}
# '''.format(store_id_to_close=store_id_to_close,date_to_start=date_to_start)
# store_drug_prod = rs_db.get_df(store_drug_prod_query)
# merging prod and DSS to avoid duplicate entries
npi_drug_list = npi_drug_list.merge(store_drug_prod, how='left', on=['store-id', 'drug-id'])
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy == 0]
audit_drug_prod_query = '''
SELECT
a."store-id" ,
a."drug-id" ,
1 as dummy_audit
from
(
select
b."store-id" ,
a."drug-id" ,
1 as dummy,
ROW_NUMBER() OVER(PARTITION BY b."store-id" ,
a."drug-id"
ORDER BY
a.id DESC) as "row"
from
"prod2-generico"."inventory-check-items-1" as a
join "prod2-generico"."inventory-check-1" as b on
a."check-id" = b.id
where
b."complete" = 0)a
WHERE
a."row" = 1
'''
audit_drug_prod = rs_db.get_df(audit_drug_prod_query)
logger.info('Read audit_drug_prod - from RS')
# merging with audit drugs to avoid audit drugs entry
npi_drug_list = npi_drug_list.merge(audit_drug_prod, how='left', on=['store-id', 'drug-id'])
# replaceing null with 0 and extracting 35 rows
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy_audit == 0]
choice = [npi_drug_list['type'] == 'high-value-ethical',
npi_drug_list['type'] == 'ethical',
npi_drug_list['type'] == 'generic',
npi_drug_list['type'] == 'ayurvedic',
npi_drug_list['type'] == 'surgical',
npi_drug_list['type'] == 'category-4',
npi_drug_list['type'] == 'otc',
npi_drug_list['type'] == 'general',
npi_drug_list['type'] == 'baby-food',
npi_drug_list['type'] == 'baby-product',
npi_drug_list['type'] == 'glucose-test-kit',
npi_drug_list['type'] == 'discontinued-products',
npi_drug_list['type'] == 'banned']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
npi_drug_list['sort-type'] = np.select(choice, select, default=999)
choice = [npi_drug_list['pack-form'] == 'STRIP',
npi_drug_list['pack-form'] == 'PACKET',
npi_drug_list['pack-form'] == 'SACHET',
npi_drug_list['pack-form'] == 'TUBE',
npi_drug_list['pack-form'] == 'BOTTLE',
npi_drug_list['pack-form'] == 'TETRA PACK',
npi_drug_list['pack-form'] == 'PRE FILLED SYRINGE',
npi_drug_list['pack-form'] == 'VIAL',
npi_drug_list['pack-form'] == 'CARTRIDGE',
npi_drug_list['pack-form'] == 'JAR',
npi_drug_list['pack-form'] == 'SPRAY BOTTLE',
npi_drug_list['pack-form'] == 'BOX',
npi_drug_list['pack-form'] == 'TIN',
npi_drug_list['pack-form'] == 'AMPOULE',
npi_drug_list['pack-form'] == 'KIT']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
npi_drug_list['sort-pack-form'] = np.select(choice, select, default=999)
npi_drug_list.sort_values([ 'sort-pack-form', 'sort-type'],
ascending=[True,True], inplace=True)
if int(cold_chain_flag) == 0:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 0]
logger.info('removing cold chain products')
elif int(cold_chain_flag) == 2:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 1]
logger.info('considering only cold chain products')
else:
logger.info('Not caring whether cold chain items are added or not')
npi_drug_list = npi_drug_list.head(sku_to_add_per_round).reset_index(drop=True)
final_list_npi = npi_drug_list[['store-id', 'drug-id']]
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# inserting data into prod
logger.info("mySQL - Insert starting")
final_list_npi.to_sql(name='npi-drugs', con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
mysql_write.close()
if int(change_ss_min_max_to_zero_flag) == 1:
logger.info('start - change SS Min Max to 0')
# set max=0 for npi drugs in DOID
npi_store_drugs = final_list_npi[["store-id", "drug-id"]]
npi_store_drugs.columns = [c.replace('-', '_') for c in npi_store_drugs.columns]
doid_missed_entries = doid_custom_write(npi_store_drugs, logger)
# save email attachements to s3
# curr_date = str(datetime.date.today())
# doid_missed_entries_uri = s3.save_df_to_s3(doid_missed_entries,
# file_name=f"doid_missed_entries_{curr_date}.csv")
logger.info('end - change SS Min Max to 0')
else:
# doid_missed_entries_uri = []
logger.info('Not Changing SS Min Max to 0')
if int(block_ipc_flag) ==1:
logger.info(f'start : block ipc for {block_ipc_for_n_days} days')
# Rotation drugs to be appended in omit_ss_reset table
omit_drug_store = final_list_npi[["drug-id",
"store-id"]].drop_duplicates()
omit_drug_store["updated-at"] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
omit_drug_store["created-at"] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
omit_drug_store["created-by"] = '[email protected]'
omit_drug_store["updated-by"] = '[email protected]'
omit_drug_store["start-date"] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d')
omit_drug_store["end-date"] = (datetime.datetime.now(tz=gettz('Asia/Kolkata')) + datetime.timedelta(
days=block_ipc_for_n_days)).strftime('%Y-%m-%d')
omit_drug_store["is-active"] = 1
omit_drug_store["reason"] = 'NPI'
schema = 'prod2-generico'
table_name = 'omit-ss-reset'
# Uncomment following part once omit-ss-reset table is transferred to DSS
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
s3.write_df_to_db(df=omit_drug_store[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(f'End : block ipc for {block_ipc_for_n_days} days')
else:
logger.info(f'Not Blocking IPC for {block_ipc_for_n_days} days')
# npi_added_uri = s3.save_df_to_s3(df=npi_drug_list, file_name='npi_removal_details_{}.csv'.format(cur_date))
status = 'added'
email = Email()
email.send_email_file(subject=f"{env} : store id - {store_id_to_close} NPI List",
mail_body=f"list-{status},{len(final_list_npi)} SKU Added\n"
f"ipc change flag - {change_ss_min_max_to_zero_flag}, block ipc flag - {block_ipc_for_n_days}, block ipc for {block_ipc_for_n_days} days\n",
to_emails=email_to, file_uris=[])
else:
status = 'not-added'
email = Email()
email.send_email_file(subject=f"{env} : store id - {store_id_to_close} NPI List",
mail_body=f"list-{status},Previos Status - {store_last_sataus.loc[0,'status']}",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
rs_db_write.close_connection()
mysql_read.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/npi_push_through_manual_list.py | npi_push_through_manual_list.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sku', '--sku_to_add_per_round', default=25, type=int, required=False)
parser.add_argument('-si', '--store_id_to_close', default=320, type=str, required=False)
parser.add_argument('-ccf', '--cold_chain_flag', default=0, type=str, required=False)
parser.add_argument('-dts', '--date_to_start', default='2022-09-08', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
sku_to_add_per_round = args.sku_to_add_per_round
store_id_to_close = args.store_id_to_close
cold_chain_flag = args.cold_chain_flag
date_to_start = args.date_to_start
store_id_to_close = int(store_id_to_close)
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
mysql_read = MySQL()
mysql_read.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("sku_to_add_per_round - " + str(sku_to_add_per_round))
logger.info("store_id_to_close - " + str(store_id_to_close))
logger.info("cold_chain_flag - " + str(cold_chain_flag))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
store_last_sataus_query = """
select
*
from
(
select
row_number() over (partition by nd.`store-id`
order by
nd.`created-at` desc
) as `row`,
nd.`store-id`,
nd.status ,
nd.`created-at`
from
`npi-drugs` nd
where nd.`store-id` = {store_id_to_close}) nd
where
nd.`row` = 1
""".format(store_id_to_close=store_id_to_close)
store_last_sataus = pd.read_sql_query(store_last_sataus_query, mysql_read.connection)
if store_last_sataus.loc[0,'status']=='completed':
# Getting inventory detail
prod_inventory_query = '''
select
i."store-id" ,
i."drug-id" ,
d."drug-name" ,
d."pack-form" ,
d."type" ,
d."cold-chain" ,
sum(i.quantity) as "quantity",
sum(i.quantity + i."locked-for-check" + i."locked-for-audit" + i."locked-for-return" + i."locked-for-transfer" ) as "quantity-available-physically-at-store"
from
"prod2-generico"."prod2-generico"."inventory-1" i
left join "prod2-generico"."prod2-generico".stores s
on
i."store-id" = s.id
left join "prod2-generico"."prod2-generico".drugs d
on
d.id = i."drug-id"
left join "prod2-generico"."prod2-generico"."invoices-1" i2
on
i."franchisee-invoice-id" = i2.id
where
i."store-id" = {store_id_to_close}
and i2."franchisee-invoice" = 0
and (i.quantity >0
-- or i."locked-for-check" >0
-- or i."locked-for-audit" >0
-- or i."locked-for-return" >0
-- or i."locked-for-transfer" >0
)
group by
i."store-id" ,
i."drug-id" ,
d."drug-name",
d."pack-form" ,
d."type" ,
d."cold-chain"
'''.format(store_id_to_close=store_id_to_close)
npi_drug_list = rs_db.get_df(prod_inventory_query)
store_drug_prod_query = '''
select
`store-id` ,
`drug-id`,
1 as `dummy`
from
`npi-drugs` nd
where
date(nd.`created-at`) >= '{date_to_start}'
and nd.`store-id` = {store_id_to_close}
'''.format(store_id_to_close=store_id_to_close,date_to_start=date_to_start)
store_drug_prod = pd.read_sql_query(store_drug_prod_query, mysql_read.connection)
# store_drug_prod_query = '''
# select
# "store-id" ,
# "drug-id",
# 1 as "dummy"
# from
# "prod2-generico"."npi-drugs" nd
# where
# date(nd."created-at") >= '{date_to_start}'
# and nd."store-id" = {store_id_to_close}
# '''.format(store_id_to_close=store_id_to_close,date_to_start=date_to_start)
# store_drug_prod = rs_db.get_df(store_drug_prod_query)
# merging prod and DSS to avoid duplicate entries
npi_drug_list = npi_drug_list.merge(store_drug_prod, how='left', on=['store-id', 'drug-id'])
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy == 0]
choice = [npi_drug_list['type'] == 'high-value-ethical',
npi_drug_list['type'] == 'ethical',
npi_drug_list['type'] == 'generic',
npi_drug_list['type'] == 'ayurvedic',
npi_drug_list['type'] == 'surgical',
npi_drug_list['type'] == 'category-4',
npi_drug_list['type'] == 'otc',
npi_drug_list['type'] == 'general',
npi_drug_list['type'] == 'baby-food',
npi_drug_list['type'] == 'baby-product',
npi_drug_list['type'] == 'glucose-test-kit',
npi_drug_list['type'] == 'discontinued-products',
npi_drug_list['type'] == 'banned']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
npi_drug_list['sort-type'] = np.select(choice, select, default=999)
choice = [npi_drug_list['pack-form'] == 'STRIP',
npi_drug_list['pack-form'] == 'PACKET',
npi_drug_list['pack-form'] == 'SACHET',
npi_drug_list['pack-form'] == 'TUBE',
npi_drug_list['pack-form'] == 'BOTTLE',
npi_drug_list['pack-form'] == 'TETRA PACK',
npi_drug_list['pack-form'] == 'PRE FILLED SYRINGE',
npi_drug_list['pack-form'] == 'VIAL',
npi_drug_list['pack-form'] == 'CARTRIDGE',
npi_drug_list['pack-form'] == 'JAR',
npi_drug_list['pack-form'] == 'SPRAY BOTTLE',
npi_drug_list['pack-form'] == 'BOX',
npi_drug_list['pack-form'] == 'TIN',
npi_drug_list['pack-form'] == 'AMPOULE',
npi_drug_list['pack-form'] == 'KIT']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
npi_drug_list['sort-pack-form'] = np.select(choice, select, default=999)
npi_drug_list.sort_values([ 'sort-pack-form', 'drug-name','sort-type'],
ascending=[True,True,True], inplace=True)
if int(cold_chain_flag) == 0:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 0]
logger.info('removing cold chain products')
elif int(cold_chain_flag) == 2:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 1]
logger.info('considering only cold chain products')
else:
logger.info('Not caring whether cold chain items are added or not')
npi_drug_list = npi_drug_list.head(sku_to_add_per_round).reset_index(drop=True)
final_list_npi = npi_drug_list[['store-id', 'drug-id']]
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# inserting data into prod
logger.info("mySQL - Insert starting")
final_list_npi.to_sql(name='npi-drugs', con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
mysql_write.close()
# npi_added_uri = s3.save_df_to_s3(df=npi_drug_list, file_name='npi_removal_details_{}.csv'.format(cur_date))
status = 'added'
email = Email()
email.send_email_file(subject=f"{env} : Kokate NPI List",
mail_body=f"list-{status},{len(final_list_npi)} SKU Added",
to_emails=email_to, file_uris=[])
else:
status = 'not-added'
email = Email()
email.send_email_file(subject=f"{env} : Kokate NPI List",
mail_body=f"list-{status},Previos Status - {store_last_sataus.loc[0,'status']}",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
mysql_read.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/store_closure_item_push_through_npi.py | store_closure_item_push_through_npi.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
# =============================================================================
# NPI at WH Snapshot
# =============================================================================
npi_wh_query = """
SELECT
f.Itemc as "itemc",
i.name ,
i."Location" as "aisle",
i.Barcode as "drug-id",
sum(f.BQty) as "bqty"
FROM
"prod2-generico"."prod2-generico".fifo f
left join "prod2-generico"."prod2-generico".item i
on
f.Itemc = i.code
WHERE
f.Acno = 59353
and i.barcode !~'[^0-9]'
GROUP by
f.Itemc ,
i.name ,
i.Barcode,
i."location"
HAVING sum(f.BQty)> 0
order by f.itemc asc
"""
npi_wh = rs_db.get_df(npi_wh_query)
logger.info("Fetched NPI in WH - balance quantity -{}".format(int(sum(npi_wh['bqty']))))
npi_wh['bqty'] = npi_wh['bqty'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_wh['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# =============================================================================
# Writing table to RS
# =============================================================================
schema = 'prod2-generico'
table_name = 'npi-inv-at-wh-sns'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
status1 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}"
where date("updated-at")<date(dateadd(d,-30,current_date))
'''
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table 30 days + old data deleted')
truncate_query = f''' delete
from "{schema}"."{table_name}"
where date("updated-at")=date(current_date)
'''
rs_db_write.execute(truncate_query)
logger.info(
str(table_name) + 'table data deleted for current date to avoid duplicate entries in case of multiple entries')
s3.write_df_to_db(df=npi_wh[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status1 = True
if status1 is True:
status = 'Success'
else:
status = 'Failed'
# =============================================================================
# Sending Email
# =============================================================================
# logger.close()
end_time = datetime.datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
logger.info('min_to_complete_job - ' + str(min_to_complete))
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/npi-inv-at-wh-sns.py | npi-inv-at-wh-sns.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MSSql
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import datetime
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
mssql = MSSql(connect_via_tunnel=False)
connection = mssql.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
today_date = start_time.strftime('%Y-%m-%d')
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
# =============================================================================
# Fetching total return created as npi-salebale
# =============================================================================
redshift_returns_query = """
select
*
from
(
select
row_number() over(partition by ri1.id
order by
ri1.id desc,
nvl(nd.id, 0) desc,
nvl(nci."check-id", 0) desc) as "row",
case
when rtd1."store-id" = 111 then 'Return-via-Zippin-Central'
when nd.id is null then 'store-random-return'
else 'normal-npi-return'
end as "type-of-npi-return",
nd.id as "nd-id",
case
when nd.id is not null then nci."check-id"
else null
end as "check-id",
case
when nd.id is not null then nci.expected
else null
end as "expected",
case
when nd.id is not null then nci.accounted
else null
end as "accounted",
case
when nd.id is not null then nci.scanned
else null
end as "scanned",
case
when nd.id is not null then nci."created-by"
else null
end as "check-created-by",
case
when nd.id is not null then nci."created-at"
else null
end as "check-created-at",
case
when nd.id is not null then nc."type"
else null
end as "npi-type",
nd."created-at" as "npi-added-in-store-at",
rtd1."store-id" ,
s.name as "store-name",
s."franchisee-id" ,
f.name as "franchise-name",
ri1."inventory-id" ,
-- i1."batch-number" ,
d."drug-name" ,
d."type" as "drug-type",
-- i1.expiry ,
i1.barcode ,
i1."drug-id" ,
rs.name as "return-to-dc-wh",
rtd1.id as "return-id",
ri1.id as "return-item-id",
rtd1."created-at" as "store-return-created-at",
ri1."returned-quantity" as "return-qty",
ri1.net as "net-value",
ri1.taxable as "net-taxable",
ri1."return-reason" as "return-reason",
ri1.status as "return-status",
dn1.status as "DN-status",
dn1.serial as "dn-number",
dn1."net-value" as "DN-Net",
dn1."created-at" as "DN-Saved-at",
dn1."dispatched-at" as "DN-Dispatched-at",
dn1."received-at" as "DN-Received-AT",
dn1."received-by" as "DN-Received-by",
dn1."approved-at" as "dn-approved-at",
dn1."settled-at" as "dn-settled-at",
dn1."accounted-at" as "dn-accounted-at",
dn1."rejected-at" as "dn-rejected-at",
dn1."created-by" as "dn-created-by",
dn1."approved-by" as "dn-approved-by",
dn1."rejected-by" as "dn-rejected-by",
dn1."settled-by" as "dn-settled-by",
dn1."accounted-by" as "dn-accounted-by",
rit."transfer-note-no" ,
rit."inventory-transferred" ,
rit."transfer-dc-id" ,
rit."wms-transfer-id" ,
rit."transferred-at" as "rit-transferred-at" ,
split_part(rit."wms-transfer-id", '-', 6)as "mysql-srlno",
case
when ri1.status = 'saved' then 'Store Return Saved'
when ri1.status = 'approved'
and dn1.status is null then 'Store Return Saved'
when ri1.status = 'approved'
and dn1.status = 'saved' then 'Store DN Saved'
when ri1.status = 'approved'
and dn1.status = 'dispatched' then 'DN Dispatched'
when ri1.status = 'approved'
and dn1.status = 'received' then 'DN Received'
when rit.id is not null
and rit."transfer-dc-id" = 256
then 'Transferred to Expiry'
when rit.id is not null
and rit."transfer-dc-id" = 255
then 'Transferred to WMS'
when rit.id is null
and ri1.status = 'settled'
and dn1.status = 'settled'
then 'Settled Without transfer'
when rit.id is not null
and rit."transfer-dc-id" is null
and ri1.status = 'settled'
and dn1.status = 'settled'
then 'Transferred location unknown - Settled'
when ri1.status = 'discarded' then 'Discarded'
else 'status issue'
end as "Comprehensive-status"
from
"prod2-generico"."return-items-1" ri1
left join "prod2-generico"."returns-to-dc-1" rtd1
on
ri1."return-id" = rtd1.id
left join "prod2-generico"."npi-check-items" nci
on
nci."inventory-id" = ri1."inventory-id"
and nci.status = 'inventory-check-completed'
and nci."created-at" <= ri1."approved-at"
left join "prod2-generico"."npi-check" nc
on
nci."check-id" = nc.id
and nc."store-id" = rtd1."store-id"
left join "prod2-generico"."npi-drugs" nd
on
nc.id = nd."npi-check-id"
and nci."drug-id" = nd."drug-id"
and nc."store-id" = nd."store-id"
left join "prod2-generico"."inventory-1" i1
on
ri1."inventory-id" = i1.id
-- and nd."store-id" = i1."store-id"
-- and rtd1."store-id" = i1."store-id"
-- and ri1."inventory-id" = i1.id
left join "prod2-generico"."debit-note-items-1" dni1
on
ri1.id = dni1."item-id"
and dni1."is-active" != 0
left join "prod2-generico"."debit-notes-1" dn1
on
dni1."debit-note-id" = dn1.id
left join "prod2-generico"."return-item-transfers" rit on
ri1.id = rit."return-item-id"
left join "prod2-generico".stores s
on
rtd1."store-id" = s.id
left join "prod2-generico".drugs d
on
i1."drug-id" = d.id
left join "prod2-generico".franchisees f
on
s."franchisee-id" = f.id
left join "prod2-generico".stores rs
on
rtd1."dc-id" = rs.id
left join "prod2-generico"."return-items-1" ri1ref
on
ri1."return-item-reference" = ri1ref.id
where
((ri1."return-reason" = 'reason-npi-saleable')
or ((ri1."return-reason" = 'reason-npi-non-saleable')
and (ri1ref."return-reason" = 'reason-npi-saleable')))
and ri1.status not in ('deleted', 'amended')
and (dn1.id is null
or dn1."is-internal-debit-note" != 1)
and (dn1.status is null
or dn1.status not in ('rejected', 'transferred'))
order by
ri1.id desc)a
where
a."row" = 1
"""
redshift_returns = rs_db.get_df(redshift_returns_query)
logger.info("Fetched Redshift returns")
logger.info(f"redshift_returns - line items - {len(redshift_returns)}")
other_transferred = redshift_returns[redshift_returns['transfer-dc-id']!=255]
npi_transferred = redshift_returns[redshift_returns['transfer-dc-id']==255]
logger.info(f"npi_transferred - line items - {len(npi_transferred)}")
# =============================================================================
# Connecting redshift returns to wms via fifo (psrlnotrf-barcode)
# =============================================================================
wms_fifo_query = """
SELECT
f.Psrlno ,
f.PsrlnoTrf ,
f.Pbillno ,
f.Vno ,
f.Acno,
f."wms-drug-id",
f."wms-drug-name",
f."fifo-tqty",
f."fifo-bqty",
f."import-status"
from
(
SELECT
COUNT(f.Psrlno) over (partition by f.Pbillno ,
f.PsrlnoTrf
order by
f.Psrlno desc
range BETWEEN UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING) as "counted-match",
f.Psrlno ,
f.PsrlnoTrf ,
f.Pbillno ,
f.Vno ,
f.Acno,
i.Barcode as 'wms-drug-id',
i.name as 'wms-drug-name',
f.TQty as 'fifo-tqty',
f.BQty as 'fifo-bqty',
'imported' as 'import-status'
FROM
FIFO f
left join Item i on
f.Itemc = i.code
WHERE
f.Acno = 59353)f
where
f."counted-match" = 1
"""
wms_fifo = pd.read_sql(wms_fifo_query,connection)
logger.info("Fetched Fifo Returns")
npi_transferred['barcode'] = npi_transferred['barcode'].apply(pd.to_numeric, errors='ignore').astype('Int64')
wms_fifo['PsrlnoTrf']=wms_fifo['PsrlnoTrf'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_transferred = npi_transferred.merge(wms_fifo,left_on = ['barcode','transfer-note-no'], right_on = ['PsrlnoTrf','Pbillno'],how = 'left')
npi_transferred_fifo = npi_transferred[npi_transferred['PsrlnoTrf'].notna()]
logger.info(f"npi_transferred_fifo - line items - {len(npi_transferred_fifo)}")
npi_transferred_fifo['wms-link'] = 'barcode-psrlnotrf'
npi_transferred_notfifo = npi_transferred[npi_transferred['PsrlnoTrf'].isna()]
logger.info(f"npi_transferred_not_in_fifo - line items - {len(npi_transferred_notfifo)}")
# =============================================================================
# Connecting Items which are not imported yet
# =============================================================================
wms_apsync_query = """
SELECT
f.Psrlno,
f.Vno
from
(
SELECT
Psrlno ,
Vno ,
row_number() over (partition by asbt.Psrlno ,
asbt.Vno
order by
asbt.Psrlno desc
) as "row"
FROM
AP_SyncBrTrf asbt)F
WHERE
f."row"=1
"""
wms_apsync = pd.read_sql(wms_apsync_query,connection)
logger.info("Fetched AP_SyncBrTrf Returns")
wms_apsync['Vno']=wms_apsync['Vno'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_transferred_notfifo['transfer-note-no'] = npi_transferred_notfifo['transfer-note-no'].apply(pd.to_numeric, errors='ignore').astype('Int64')
wms_apsync['Psrlno']=wms_apsync['Psrlno'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_transferred_notfifo['barcode'] = npi_transferred_notfifo['barcode'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_transferred_notfifo = npi_transferred_notfifo.merge(wms_apsync,left_on = ['barcode','transfer-note-no'], right_on=['Psrlno','Vno'],how = 'left',suffixes = ['','-sync'])
npi_transferred_import_pending = npi_transferred_notfifo[npi_transferred_notfifo['Psrlno-sync'].notna()]
npi_transferred_import_pending['import-status'] = 'import-pending'
npi_transferred_import_pending['wms-link'] = 'import-pending'
logger.info(f"npi_transferred_import_pending - line items - {len(npi_transferred_import_pending)}")
npi_transferred_issue = npi_transferred_notfifo[npi_transferred_notfifo['Psrlno-sync'].isna()]
logger.info(f"npi_transferred_issue - line items - {len(npi_transferred_issue)}")
# =============================================================================
# Connecting Fifi (psrlnotrf-innventory-id)
# =============================================================================
wms_fifo['Pbillno']=wms_fifo['Pbillno'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_transferred_issue = npi_transferred_issue.merge(wms_fifo,left_on = ['inventory-id','transfer-note-no'], right_on = ['PsrlnoTrf','Pbillno'],how = 'left', suffixes =['','_inventory_match'] )
conditions = [(npi_transferred_issue['Psrlno_inventory_match'].notna())]
choices = ['inventory-psrlnotrf']
npi_transferred_issue['wms-link'] = np.select(conditions, choices)
npi_transferred_issue['Psrlno'] = npi_transferred_issue['Psrlno_inventory_match']
npi_transferred_issue['PsrlnoTrf'] = npi_transferred_issue['PsrlnoTrf_inventory_match']
npi_transferred_issue['Pbillno'] = npi_transferred_issue['Pbillno_inventory_match']
npi_transferred_issue['Vno'] = npi_transferred_issue['Vno_inventory_match']
npi_transferred_issue['Acno'] = npi_transferred_issue['Acno_inventory_match']
npi_transferred_issue['wms-drug-id'] = npi_transferred_issue['wms-drug-id_inventory_match']
npi_transferred_issue['wms-drug-name'] = npi_transferred_issue['wms-drug-name_inventory_match']
npi_transferred_issue['fifo-tqty'] = npi_transferred_issue['fifo-tqty_inventory_match']
npi_transferred_issue['fifo-bqty'] = npi_transferred_issue['fifo-bqty_inventory_match']
npi_transferred_issue['import-status'] = npi_transferred_issue['import-status_inventory_match']
npi_transferred_issue = npi_transferred_issue.drop(['Psrlno_inventory_match', 'PsrlnoTrf_inventory_match',
'Pbillno_inventory_match', 'Vno_inventory_match',
'Acno_inventory_match', 'wms-drug-id_inventory_match',
'wms-drug-name_inventory_match', 'fifo-tqty_inventory_match',
'fifo-bqty_inventory_match', 'import-status_inventory_match'], axis=1)
npi_transferred_inv_match = npi_transferred_issue[npi_transferred_issue['PsrlnoTrf'].notna()]
logger.info(f"npi_transferred_inv_match - line items - {len(npi_transferred_inv_match)}")
npi_transferred_issue = npi_transferred_issue[npi_transferred_issue['PsrlnoTrf'].isna()]
logger.info(f"npi_transferred_issue - line items - {len(npi_transferred_issue)}")
# =============================================================================
# Connecting Items by transfer note and drug id (where there is single drug for entire transfer note)
# =============================================================================
npi_transferred_issue['drug-transfernote'] = npi_transferred_issue['drug-id'].astype('str') + '-' + npi_transferred_issue['transfer-note-no'].astype('str')
durg_transfernote = tuple(map(str,npi_transferred_issue['drug-transfernote'].unique()))
wms_transfer_drug_query = """
SELECT
f.Psrlno ,
f.PsrlnoTrf ,
f.Pbillno ,
f.Vno ,
f.Acno,
f."wms-drug-id",
f."wms-drug-name",
f."fifo-tqty",
f."fifo-bqty",
f."import-status"
from
(
SELECT
COUNT(f.Psrlno) over (partition by f.Pbillno ,
i.Barcode
order by
f.Psrlno desc
range BETWEEN UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING) as "counted-match",
f.Psrlno ,
f.PsrlnoTrf ,
f.Pbillno ,
f.Vno ,
f.Acno,
i.Barcode as 'wms-drug-id',
i.name as 'wms-drug-name',
f.TQty as 'fifo-tqty',
f.BQty as 'fifo-bqty',
'imported' as 'import-status'
FROM
FIFO f
left join Item i on
f.Itemc = i.code
WHERE
f.Acno = 59353
and f.TQty > 0
and concat(i.Barcode, '-', f.Pbillno) in {durg_transfernote})f
where
f."counted-match" = 1
""".format(durg_transfernote=durg_transfernote)
wms_transfer_drug = pd.read_sql(wms_transfer_drug_query,connection)
logger.info("Fetched data for join based on transfer note and drug id Returns")
wms_transfer_drug['wms-drug-id'] = wms_transfer_drug['wms-drug-id'].apply(pd.to_numeric, errors='ignore').astype('Int64')
wms_transfer_drug['Pbillno'] = wms_transfer_drug['Pbillno'].apply(pd.to_numeric, errors='ignore').astype('Int64')
npi_transferred_issue = npi_transferred_issue.merge(wms_transfer_drug,left_on = ['drug-id','transfer-note-no'], right_on = ['wms-drug-id','Pbillno'], how ='left', suffixes = ['','_drug_transfernote'])
conditions = [(npi_transferred_issue['Psrlno_drug_transfernote'].notna())]
choices = ['drug-transfernote']
npi_transferred_issue['wms-link'] = np.select(conditions, choices)
npi_transferred_issue['Psrlno'] = npi_transferred_issue['Psrlno_drug_transfernote']
npi_transferred_issue['PsrlnoTrf'] = npi_transferred_issue['PsrlnoTrf_drug_transfernote']
npi_transferred_issue['Pbillno'] = npi_transferred_issue['Pbillno_drug_transfernote']
npi_transferred_issue['Vno'] = npi_transferred_issue['Vno_drug_transfernote']
npi_transferred_issue['Acno'] = npi_transferred_issue['Acno_drug_transfernote']
npi_transferred_issue['wms-drug-id'] = npi_transferred_issue['wms-drug-id_drug_transfernote']
npi_transferred_issue['wms-drug-name'] = npi_transferred_issue['wms-drug-name_drug_transfernote']
npi_transferred_issue['fifo-tqty'] = npi_transferred_issue['fifo-tqty_drug_transfernote']
npi_transferred_issue['fifo-bqty'] = npi_transferred_issue['fifo-bqty_drug_transfernote']
npi_transferred_issue['import-status'] = npi_transferred_issue['import-status_drug_transfernote']
npi_transferred_issue = npi_transferred_issue.drop(['Psrlno_drug_transfernote', 'PsrlnoTrf_drug_transfernote',
'Pbillno_drug_transfernote', 'Vno_drug_transfernote',
'Acno_drug_transfernote', 'wms-drug-id_drug_transfernote',
'wms-drug-name_drug_transfernote', 'fifo-tqty_drug_transfernote',
'fifo-bqty_drug_transfernote', 'import-status_drug_transfernote'], axis=1)
npi_transferred_drug_transfer_note = npi_transferred_issue[npi_transferred_issue['Psrlno'].notna()]
logger.info(f"npi_transferred_drug_transfer_note - line items - {len(npi_transferred_drug_transfer_note)}")
npi_transferred_issue = npi_transferred_issue[npi_transferred_issue['Psrlno'].isna()]
logger.info(f"npi_transferred_issue - line items - {len(npi_transferred_issue)}")
# =============================================================================
# Connecting by salepurchase2 Vtype - BR, Pbillno - Transfernote-no, srlno - wms-transfer-id's last number
# =============================================================================
npi_transferred_issue['mysql-pbillno-srlno'] = npi_transferred_issue['transfer-note-no'].astype(str) + '-' + npi_transferred_issue['mysql-srlno'].astype(str)
sp_list = tuple(map(str,npi_transferred_issue['mysql-pbillno-srlno'].unique()))
salepurchase2_query = """
SELECT
f.Psrlno ,
f.PsrlnoTrf ,
f.Pbillno ,
f.Vno ,
f.Acno,
f."wms-drug-id",
f."wms-drug-name",
f."fifo-tqty",
f."fifo-bqty",
f."import-status",
f."wms-srlno"
from
(
SELECT
sp.Psrlno ,
f.PsrlnoTrf,
sp.Pbillno ,
f.Vno ,
f.Acno ,
i.Barcode as 'wms-drug-id',
i.name as 'wms-drug-name',
f.TQty as 'fifo-tqty',
f.BQty as 'fifo-bqty',
'imported' as 'import-status',
sp.srlno as 'wms-srlno',
COUNT(sp.Psrlno) over (partition by sp.Pbillno ,
sp.srlno
order by
sp.Psrlno desc
range BETWEEN UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING) as "counted-match"
FROM
SalePurchase2 sp
left join Item i on
sp.Itemc = i.code
left join FIFO f on
f.Psrlno = sp.Psrlno
WHERE
sp.Vtype = 'BR'
-- and sp.Pbillno = transfer_note
-- and i.Barcode = drug_id
-- and sp.srlno = transfer_end_number
-- and f.PsrlnoTrf = barcode
-- and CONCAT(sp.Pbillno, '-', sp.srlno) in {list_sp}
) f
WHERE
f."counted-match" = 1
"""
salepurchase2 = pd.read_sql(salepurchase2_query ,connection)
logger.info("Fetched Salepurchase2 data with Vtype 'BR")
salepurchase2['Pbillno-srlno'] = salepurchase2['Pbillno'].astype(str) +'-' + salepurchase2['wms-srlno'].astype(str)
salepurchase2 = salepurchase2[salepurchase2['Pbillno-srlno'].isin(sp_list)]
salepurchase2['Pbillno'] = salepurchase2['Pbillno'].apply(pd.to_numeric, errors='ignore').astype('Int64')
salepurchase2['wms-srlno'] = salepurchase2['wms-srlno'].astype(str)
npi_transferred_issue['drug-id'] = npi_transferred_issue['drug-id'].astype(str)
npi_transferred_issue = npi_transferred_issue.merge(salepurchase2,left_on = ['transfer-note-no','drug-id','mysql-srlno'], right_on = ['Pbillno','wms-drug-id','wms-srlno'], how='left', suffixes =['','-sp2'])
npi_transferred_issue['drug-id'] = npi_transferred_issue['drug-id'].astype(int)
conditions = [(npi_transferred_issue['Psrlno-sp2'].notna())]
choices = ['sp2-pbillno-srlno']
npi_transferred_issue['wms-link'] = np.select(conditions, choices)
npi_transferred_issue['Psrlno'] = npi_transferred_issue['Psrlno-sp2']
npi_transferred_issue['PsrlnoTrf'] = npi_transferred_issue['PsrlnoTrf-sp2']
npi_transferred_issue['Pbillno'] = npi_transferred_issue['Pbillno-sp2']
npi_transferred_issue['Vno'] = npi_transferred_issue['Vno-sp2']
npi_transferred_issue['Acno'] = npi_transferred_issue['Acno-sp2']
npi_transferred_issue['wms-drug-id'] = npi_transferred_issue['wms-drug-id-sp2']
npi_transferred_issue['wms-drug-name'] = npi_transferred_issue['wms-drug-name-sp2']
npi_transferred_issue['fifo-tqty'] = npi_transferred_issue['fifo-tqty-sp2']
npi_transferred_issue['fifo-bqty'] = npi_transferred_issue['fifo-bqty-sp2']
npi_transferred_issue['import-status'] = npi_transferred_issue['import-status-sp2']
npi_transferred_issue = npi_transferred_issue.drop(['Psrlno-sp2', 'PsrlnoTrf-sp2', 'Pbillno-sp2', 'Vno-sp2', 'Acno-sp2',
'wms-drug-id-sp2', 'wms-drug-name-sp2', 'fifo-tqty-sp2',
'fifo-bqty-sp2', 'import-status-sp2', 'mysql-pbillno-srlno',
'wms-srlno'], axis=1)
npi_transferred_sp2 = npi_transferred_issue[npi_transferred_issue['Psrlno'].notna()]
logger.info(f"npi_transferred_sp2 - line items - {len(npi_transferred_sp2)}")
npi_transferred_issue = npi_transferred_issue[npi_transferred_issue['Psrlno'].isna()]
logger.info(f"npi_transferred_issue - line items - {len(npi_transferred_issue)}")
# =============================================================================
# Collating total npi wms transferred returns
# =============================================================================
conditions = [(npi_transferred_issue['Psrlno'].isna())]
choices = ['link-issue']
npi_transferred_issue['wms-link'] = np.select(conditions, choices)
npi_returns = pd.concat([npi_transferred_fifo ,npi_transferred_import_pending, npi_transferred_inv_match, npi_transferred_drug_transfer_note, npi_transferred_sp2,npi_transferred_issue])
logger.info(f"npi_returns - Total line items - {len(npi_returns)}")
logger.info(f"percentage-issue (return-item-wise)- {round((len(npi_transferred_issue)/len(npi_returns))*100,2)}%")
# =============================================================================
# Adding liquidation data
# =============================================================================
psrlno = tuple(map(int,npi_returns[npi_returns['Psrlno'].notna()]['Psrlno'].unique()))
liquidation_query = """
SELECT
sp.Psrlno ,
sum(sp.Qty) as 'liquidated-quantity'
FROM
SalePurchase2 sp
left join fifo f on
sp.Psrlno = f.Psrlno
WHERE
f.Acno = 59353
and sp.Vtype = 'SB'
GROUP by sp.Psrlno
"""
liquidation = pd.read_sql(liquidation_query,connection)
logger.info("Fetched liquidation data")
npi_returns = npi_returns.merge(liquidation,on ='Psrlno', how = 'left' )
# =============================================================================
# Adding Purchase Expiry data
# =============================================================================
pe_query = """
SELECT
sum(sppe.Qty) as 'purchase-expiry',
spge.Psrlno
FROM
SalePurchase2 sppe
left join BrExp be on
sppe.ChlnSrlno = be.BeSrlno
left join SalePurchase2 spge on
spge.Vtype = 'GE'
and spge.Vno = be.Vno
and be.Itemc = spge.Itemc
and spge.ScmPer = be.BeSrlno
left join FIFO f on
spge.Psrlno = f.Psrlno
WHERE
sppe.Vtype = 'PE'
and sppe.Acno = 59353
and f.TQty != 0
GROUP by spge.Psrlno
""".format(psrlno=psrlno)
purchase_expiry = pd.read_sql(pe_query,connection)
logger.info("Fetched purchase_expiry data")
npi_returns = npi_returns.merge(purchase_expiry,on ='Psrlno', how = 'left' )
if len(npi_returns)==len(npi_transferred):
logger.info('wms-transferred-line-matched')
else:
logger.info('issue-wms-transferred-line-match')
# =============================================================================
# Collating and creating table for upload
# =============================================================================
return_npi_saleable = pd.concat([other_transferred,npi_returns])
logger.info(f"return_npi_saleable - Total line items - {len(return_npi_saleable)}")
return_npi_saleable.columns = return_npi_saleable.columns.str.lower()
return_npi_saleable[['acno','vno','fifo-bqty','fifo-tqty','inventory-transferred','liquidated-quantity','purchase-expiry','transfer-dc-id','store-id','psrlno','psrlnotrf','check-id','return-id','expected','accounted','scanned','franchisee-id']] = return_npi_saleable[['acno','vno','fifo-bqty','fifo-tqty','inventory-transferred','liquidated-quantity','purchase-expiry','transfer-dc-id','store-id','psrlno','psrlnotrf','check-id','return-id','expected','accounted','scanned','franchisee-id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
return_npi_saleable[['dn-net','net-taxable','net-value']] = return_npi_saleable[['dn-net','net-taxable','net-value']].astype(float)
# =============================================================================
# Writing table to RS
# =============================================================================
try:
schema = 'prod2-generico'
table_name = 'npi-returns-tracking'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' delete
from "{schema}"."{table_name}"
'''
rs_db.execute(truncate_query)
logger.info(str(table_name) + ' table old data deleted')
s3.write_df_to_db(df=return_npi_saleable[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status = True
except Exception as error:
status = False
raise Exception(error)
finally:
rs_db.close_connection()
mssql.close_connection()
if status is True:
mssg = 'Success'
else:
mssg = 'Failed'
# =============================================================================
# Sending Email
# =============================================================================
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{mssg} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/npi-returns-tracking.py | npi-returns-tracking.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-ad', '--analysis_date_parameter', default="NULL", type=str, required=False)
parser.add_argument('-adis', '--analysis_date_parameter_inv_sns', default="NULL", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
analysis_date_parameter = args.analysis_date_parameter
analysis_date_parameter_inv_sns = args.analysis_date_parameter_inv_sns
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("analysis_date_parameter - " + analysis_date_parameter)
logger.info("analysis_date_parameter_inv_sns - " + analysis_date_parameter_inv_sns)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
analysis_date = (datetime.datetime.now(tz=gettz('Asia/Kolkata')) -datetime.timedelta(days=1)).strftime('%Y-%m-%d')
analysis_date_sns = (datetime.datetime.now(tz=gettz('Asia/Kolkata')) -datetime.timedelta(days=1)).strftime('%Y-%m-%d')
# analysis_date = '2022-05-11'
# analysis_date_sns = '2022-05-09'
# =============================================================================
# Setting Analysis date
# =============================================================================
# analysis_date decides - For which day Shortbook orders need to be evaluated
# analysis_date_sns decides - For which day Fifo Inventory is taken
# By Default - Yesterdays Order Analysis With Yesterdays FIFO Will be done, To Give Time for orders to bounce
if analysis_date_parameter!="NULL":
analysis_date = analysis_date_parameter
else:
analysis_date = analysis_date
logger.info('analysis-date - '+ str(analysis_date))
if analysis_date_parameter_inv_sns!="NULL":
analysis_date_sns = analysis_date_parameter_inv_sns
else:
analysis_date_sns = analysis_date_sns
logger.info('analysis_date_sns Inventory of this day will be used for analysis - '+ str(analysis_date_sns))
# =============================================================================
# Fetching NPI at WH for today morning
# =============================================================================
npi_wh_query = """
select
*
from
"prod2-generico"."npi-inv-at-wh-sns"
where date("updated-at") = '{analysis_date}'
""".format(analysis_date = analysis_date_sns)
whlivenpi= rs_db.get_df(npi_wh_query)
logger.info("Fetched NPI in WH - balance quantity -{}".format(int(sum(whlivenpi['bqty']))))
whlivenpi['aisle_number'] = (whlivenpi.loc[:,'aisle'].astype(str).str[1:3])
mask = (whlivenpi['aisle_number'] == '10') | (whlivenpi['aisle_number'] == '11')
whlivenpi = whlivenpi[mask]
whlivenpi = whlivenpi.groupby(['itemc','name','aisle_number','drug-id']).agg(sum).reset_index()
# =============================================================================
# Fetching Shortbook Orders for the day
# =============================================================================
drugs = tuple(map(int,(whlivenpi['drug-id'].unique())))
if len(drugs)==0:
drugs = [0]
drugs= str(list(drugs)).replace('[', '(').replace(']', ')')
sb_orders_query = """
select
sb.id as "short-book-id",
sb."store-id" ,
sb."drug-name",
sb."drug-id" ,
case
when sb."auto-short" = 0 then 'pr'
when sb."auto-short" = 1
and "patient-id" = 4480 then 'as'
end as "order-type",
sb.quantity AS "orderd-quantity",
sb."distributor-id",
d.name as "distributor-name",
case
when sb."distributor-id" = 8105 then 'warehouse-order'
else 'other-distributor-order'
end as "order-source"
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico".distributors d
on
sb."distributor-id" = d.id
where
(sb."auto-short" = 0
or (sb."auto-short" = 1
and "patient-id" = 4480))
and "drug-id" in {drugs}
and DATE(sb."created-at") = '{analysis_date}'
""".format(drugs=drugs, analysis_date=analysis_date)
sb_orders= rs_db.get_df(sb_orders_query)
logger.info("fetched shortbook orders for analysis date")
# =============================================================================
# Checking Error with Fixed Distrubutor tag
# =============================================================================
sb_orders['store-drug'] = sb_orders['store-id'].astype(str)+'-'+ sb_orders['drug-id'].astype(str)
store_drug = tuple(map(str, (list( sb_orders['store-drug'].unique()))))
fixed_distrubutor_query = """
select
concat(fd."store-id" , concat('-', fd."drug-id")) as "store-drug-fixed-distributor"
from
"prod2-generico"."fixed-distributors" fd
where
active = 1
and "distributor-id" = 8105
and concat(fd."store-id" , concat('-', fd."drug-id")) in {store_drug}
group by
"drug-id",
"store-id"
""".format(store_drug=store_drug)
fixed_distributor_check = rs_db.get_df(fixed_distrubutor_query)
logger.info("Fetched relevant fixed distributor data")
sb_orders = sb_orders.merge(fixed_distributor_check,left_on ='store-drug', right_on = 'store-drug-fixed-distributor',how = 'left')
def fixed_check(x):
if x is None:
return 0
else:
return 1
sb_orders['fixed-distributor-tag'] = sb_orders['store-drug-fixed-distributor'].apply(fixed_check)
sb_orders['dummy'] = 1
del sb_orders['store-drug']
del sb_orders['store-drug-fixed-distributor']
sb_orders_fix_tag = sb_orders
sb_orders_fix_tag = sb_orders_fix_tag.groupby(['drug-id','drug-name','order-type']).agg({'dummy':sum,
'fixed-distributor-tag':sum}).reset_index()
sb_orders_fix_tag['fixed-distributor-issue-percentage'] = 1 - (sb_orders_fix_tag['fixed-distributor-tag']/sb_orders_fix_tag['dummy'])
del sb_orders_fix_tag['dummy']
del sb_orders_fix_tag['fixed-distributor-tag']
def fixed_flag(x):
if x> 0:
return 1
else:
return 0
sb_orders_fix_tag['fixed-distributor-flag'] = sb_orders_fix_tag['fixed-distributor-issue-percentage'].apply(fixed_flag)
# =============================================================================
# Fetching shortbook-order logs to check bounce orders
# =============================================================================
dist_short_book_ids = tuple(map(int, list( sb_orders['short-book-id'].unique())))
if len(dist_short_book_ids )==0:
dist_short_book_ids = [0]
dist_short_book_ids = str(list(dist_short_book_ids )).replace('[', '(').replace(']', ')')
order_logs_query = """
select
"short-book-id" ,
"ordered-dist-id" ,
concat(coalesce("ordered-dist-id" ,0),concat('-',coalesce("status",'null'))) as "dist-status"
from
"prod2-generico"."short-book-order-logs" sbol
where
"short-book-id" in {dist_short_book_ids}
order by sbol.id ASC
""".format(dist_short_book_ids=dist_short_book_ids)
order_logs= rs_db.get_df(order_logs_query)
logger.info("Fetched order logs")
order_logs['dist-status'] = order_logs['dist-status'].astype(str)
order_logs['ordered-dist-id'] = order_logs['ordered-dist-id'].astype(str)
order_logs = order_logs.groupby(['short-book-id']).agg({'dist-status':','.join,
'ordered-dist-id':','.join}).reset_index()
order_logs['shortbookid-dist-status'] = str('(') + order_logs['short-book-id'].astype(str)+ str(' ') +order_logs['dist-status'] + str(')')
del order_logs['dist-status']
sb_orders= sb_orders.merge(order_logs, on = 'short-book-id', how = 'left')
sb_orders = pd.pivot_table(sb_orders, values='orderd-quantity', index=['drug-name','drug-id','order-type','store-id','ordered-dist-id','shortbookid-dist-status'], columns='order-source',aggfunc='sum',fill_value=0).reset_index()
sb_orders['total-orders'] = sb_orders['other-distributor-order']+sb_orders['warehouse-order']
sb_orders['store-id'] = sb_orders['store-id'].astype(str)
sb_orders = sb_orders.groupby(['drug-id','drug-name','order-type']).agg({'store-id':','.join,
'ordered-dist-id':','.join,
'shortbookid-dist-status':','.join,
'other-distributor-order':sum,
'warehouse-order':sum,
'total-orders':sum}).reset_index()
sb_orders = sb_orders.merge(sb_orders_fix_tag, on = ['drug-id','drug-name','order-type'], how = 'left')
def order_check(x):
if '8105' in x:
return 1
else:
return 0
sb_orders['ordered-flag'] = sb_orders['ordered-dist-id'].apply(order_check)
# =============================================================================
# extra-order-to-diff-distrubutor Check
# =============================================================================
whlivenpi['drug-id'] = whlivenpi['drug-id'].apply(pd.to_numeric, errors='ignore').astype('Int64')
sb_orders = sb_orders.merge(whlivenpi[['drug-id','bqty']],on='drug-id',how='left')
sb_orders.loc[sb_orders['bqty']>=sb_orders['total-orders'] , 'extra-order-to-diff-distrubutor'] = sb_orders['other-distributor-order']
sb_orders.loc[(sb_orders['total-orders'] >sb_orders['bqty']) & (sb_orders['other-distributor-order']>(sb_orders['total-orders']-sb_orders['bqty'])) , 'extra-order-to-diff-distrubutor'] = sb_orders['other-distributor-order'] - (sb_orders['total-orders']-sb_orders['bqty'])
sb_orders.loc[(sb_orders['total-orders'] >sb_orders['bqty']) & (sb_orders['other-distributor-order']<(sb_orders['total-orders']-sb_orders['bqty'])) , 'extra-order-to-diff-distrubutor'] = 0
sb_orders['extra-order-to-diff-distrubutor'] = sb_orders['extra-order-to-diff-distrubutor'].apply(pd.to_numeric, errors='ignore').astype('Int64')
conditions = [((sb_orders['extra-order-to-diff-distrubutor']>0)&(sb_orders['ordered-flag']==1))]
choices = [1]
sb_orders['bounce-flag'] = np.select(conditions, choices, default = 0)
conditions = [((sb_orders['extra-order-to-diff-distrubutor']>0)&(sb_orders['fixed-distributor-flag']==1))]
choices = [1]
sb_orders['fixed-distributor-flag'] = np.select(conditions, choices, default = 0)
conditions = [((sb_orders['extra-order-to-diff-distrubutor']>0)&(sb_orders['fixed-distributor-flag']==1))]
choices = [sb_orders['fixed-distributor-issue-percentage']]
sb_orders['fixed-distributor-issue-percentage'] = np.select(conditions, choices, default = 0)
sb_orders.rename(columns={'store-id':'store-ids'},inplace = True)
del sb_orders['ordered-dist-id']
del sb_orders['ordered-flag']
sb_orders['extra-order-to-diff-distrubutor'] = sb_orders['extra-order-to-diff-distrubutor'].fillna(0)
conditions = [sb_orders['extra-order-to-diff-distrubutor']==0,
((sb_orders['extra-order-to-diff-distrubutor']>0)&(sb_orders['bounce-flag']==1)),
((sb_orders['extra-order-to-diff-distrubutor']>0)&(sb_orders['fixed-distributor-flag']==1))]
choices = ['no-issue','bounce-issue','fixed-distributor-table-issue']
sb_orders['issue-type'] = np.select(conditions, choices, default = 'different-issue')
sb_orders['analysis-date'] = analysis_date
sb_orders = sb_orders[['drug-id','drug-name','order-type','store-ids','shortbookid-dist-status','bqty','warehouse-order','other-distributor-order','total-orders','extra-order-to-diff-distrubutor','bounce-flag','fixed-distributor-flag','fixed-distributor-issue-percentage','issue-type','analysis-date']]
logger.info("table is ready to be written in Redshift")
# =============================================================================
# Writing table to RS
# =============================================================================
schema = 'prod2-generico'
table_name = 'npi-missed-order-analysis'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
status1 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = ''' delete
from "{schema}"."{table_name}"
where date("analysis-date") = '{analysis_date}'
'''.format(schema=schema,table_name=table_name,analysis_date=analysis_date)
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table deleted')
s3.write_df_to_db(df=sb_orders[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status1 = True
if status1 is True:
status = 'Success'
else:
status = 'Failed'
# =============================================================================
# Sending Email
# =============================================================================
# logger.close()
end_time = datetime.datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
logger.info('min_to_complete_job - ' + str(min_to_complete))
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/npi-missed-order-analysis.py | npi-missed-order-analysis.py |
""
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.config.common import Config
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sku', '--sku_to_add_per_round', default=18, type=int, required=False)
parser.add_argument('-si', '--store_id', default=4, type=int, required=False)
parser.add_argument('-ccf', '--cold_chain_flag', default=1, type=str, required=False)
parser.add_argument('-dts', '--date_to_start', default='2022-11-01', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
sku_to_add_per_round = args.sku_to_add_per_round
store_id = args.store_id
cold_chain_flag = args.cold_chain_flag
date_to_start = args.date_to_start
store_id = int(store_id)
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
mysql_read = MySQL()
mysql_read.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("")
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("sku_to_add_per_round - " + str(sku_to_add_per_round))
logger.info("store_id_to_close - " + str(store_id))
logger.info("cold_chain_flag - " + str(cold_chain_flag))
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
store_last_sataus_query = """
select
*
from
(
select
row_number() over (partition by nd.`store-id`
order by
nd.`created-at` desc
) as `row`,
nd.`store-id`,
nd.status ,
nd.`created-at`
from
`npi-drugs` nd
where nd.`store-id` = {store_id}) nd
where
nd.`row` = 1
""".format(store_id=store_id)
store_last_sataus = pd.read_sql_query(store_last_sataus_query, mysql_read.connection)
if store_last_sataus.loc[0,'status']=='completed':
# Getting inventory detail
prod_inventory_query = '''
select
i."store-id" ,
i."drug-id" ,
d."drug-name" ,
d."pack-form" ,
d."type" ,
d."cold-chain" ,
sum(i.quantity) as "quantity",
sum(i.quantity + i."locked-for-check" + i."locked-for-audit" + i."locked-for-return" + i."locked-for-transfer" ) as "quantity-available-physically-at-store"
from
"prod2-generico"."prod2-generico"."inventory-1" i
left join "prod2-generico"."prod2-generico".stores s
on
i."store-id" = s.id
left join "prod2-generico"."prod2-generico".drugs d
on
d.id = i."drug-id"
left join "prod2-generico"."prod2-generico"."invoices-1" i2
on
i."franchisee-invoice-id" = i2.id
where
i."store-id" = {store_id}
and i2."franchisee-invoice" = 0
and (i.quantity >0
-- or i."locked-for-check" >0
-- or i."locked-for-audit" >0
-- or i."locked-for-return" >0
-- or i."locked-for-transfer" >0
)
group by
i."store-id" ,
i."drug-id" ,
d."drug-name",
d."pack-form" ,
d."type" ,
d."cold-chain"
'''.format(store_id=store_id)
prod_inventory = rs_db.get_df(prod_inventory_query)
store_assortment_query = """
SELECT
sda."store-id" ,
sda."drug-id"
FROM
"prod2-generico"."store-drug-assortment" sda
WHERE
sda."is-active" = 1
and sda."store-id" ={store_id}
""".format(store_id=store_id)
store_assortment = rs_db.get_df(store_assortment_query)
drugs_in_assortment = tuple(map(int, list(store_assortment['drug-id'].unique())))
npi_drug_list = prod_inventory[~prod_inventory['drug-id'].isin(drugs_in_assortment)]
npi_remaning = False
if len(npi_drug_list)== 0:
npi_remaning = False
else:
npi_remaning = True
logger.info('npi-present-check-1')
store_drug_prod_query = '''
select
"store-id" ,
"drug-id",
1 as "dummy"
from
"prod2-generico"."npi-drugs" nd
where
date(nd."created-at") >= date(dateadd(d,-15,current_date))
and nd."store-id" = {store_id}
'''.format(store_id=store_id,date_to_start=date_to_start)
store_drug_prod = rs_db.get_df(store_drug_prod_query)
# merging prod and DSS to avoid duplicate entries
npi_drug_list = npi_drug_list.merge(store_drug_prod, how='left', on=['store-id', 'drug-id'])
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy == 0]
if len(npi_drug_list) == 0:
npi_remaning = False
else:
npi_remaning = True
logger.info('npi-present-check-2')
choice = [npi_drug_list['type'] == 'high-value-ethical',
npi_drug_list['type'] == 'ethical',
npi_drug_list['type'] == 'generic',
npi_drug_list['type'] == 'ayurvedic',
npi_drug_list['type'] == 'surgical',
npi_drug_list['type'] == 'category-4',
npi_drug_list['type'] == 'otc',
npi_drug_list['type'] == 'general',
npi_drug_list['type'] == 'baby-food',
npi_drug_list['type'] == 'baby-product',
npi_drug_list['type'] == 'glucose-test-kit',
npi_drug_list['type'] == 'discontinued-products',
npi_drug_list['type'] == 'banned']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
npi_drug_list['sort-type'] = np.select(choice, select, default=999)
choice = [npi_drug_list['pack-form'] == 'STRIP',
npi_drug_list['pack-form'] == 'PACKET',
npi_drug_list['pack-form'] == 'SACHET',
npi_drug_list['pack-form'] == 'TUBE',
npi_drug_list['pack-form'] == 'BOTTLE',
npi_drug_list['pack-form'] == 'TETRA PACK',
npi_drug_list['pack-form'] == 'PRE FILLED SYRINGE',
npi_drug_list['pack-form'] == 'VIAL',
npi_drug_list['pack-form'] == 'CARTRIDGE',
npi_drug_list['pack-form'] == 'JAR',
npi_drug_list['pack-form'] == 'SPRAY BOTTLE',
npi_drug_list['pack-form'] == 'BOX',
npi_drug_list['pack-form'] == 'TIN',
npi_drug_list['pack-form'] == 'AMPOULE',
npi_drug_list['pack-form'] == 'KIT']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
npi_drug_list['sort-pack-form'] = np.select(choice, select, default=999)
npi_drug_list.sort_values(['cold-chain', 'sort-pack-form', 'drug-name','sort-type'],
ascending=[True,True,True,True], inplace=True)
if int(cold_chain_flag) == 0:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 0]
logger.info('removing cold chain products')
elif int(cold_chain_flag) == 2:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 1]
logger.info('considering only cold chain products')
else:
logger.info('Not caring whether cold chain items are added or not')
npi_drug_list = npi_drug_list.head(sku_to_add_per_round).reset_index(drop=True)
final_list_npi = npi_drug_list[['store-id', 'drug-id']]
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# inserting data into prod
logger.info("mySQL - Insert starting")
final_list_npi.to_sql(name='npi-drugs', con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
mysql_write.close()
# npi_added_uri = s3.save_df_to_s3(df=npi_drug_list, file_name='npi_removal_details_{}.csv'.format(cur_date))
if npi_remaning:
status = 'added'
else:
status = 'not-added-because-no-npi'
email = Email()
email.send_email_file(subject=f"{env} : {store_id} NPI List",
mail_body=f"list-{status},{len(final_list_npi)} SKU {status}",
to_emails=email_to, file_uris=[])
else:
status = 'not-added'
email = Email()
email.send_email_file(subject=f"{env} : {store_id} NPI List",
mail_body=f"list-{status},Previos Status - {store_last_sataus.loc[0,'status']}",
to_emails=email_to, file_uris=[])
rs_db.close_connection()
mysql_read.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/assortment_based_npi_push.py | assortment_based_npi_push.py |
# =============================================================================
# purpose: NPI REMOVAL CODE
# Author: Saurav Maskar
# =============================================================================
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import datetime
import argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sku', '--sku_to_add_daily', default=18, type=int, required=False)
parser.add_argument('-fsku', '--fofo_sku_to_add_daily', default=50, type=int, required=False)
parser.add_argument('-ccf', '--cold_chain_flag', default=0, type=str, required=False)
parser.add_argument('-si', '--stores_to_include_if_blank_all', default="NULL", type=str, required=False)
parser.add_argument('-se', '--stores_to_exclude_if_blank_none', default="NULL", type=str, required=False)
parser.add_argument('-ci', '--city_id_to_include_if_blank_all', default="NULL", type=str, required=False)
parser.add_argument('-ce', '--city_id_to_exclude_if_blank_none', default="NULL", type=str, required=False)
parser.add_argument('-ff', '--fofo_inclusion_flag', default="1", type=str, required=False)
parser.add_argument('-gif', '--goodaid_inclusion_flag', default=1, type=int, required=False)
parser.add_argument('-qc', '--quantity_cap', default=70, type=int, required=False)
parser.add_argument('-fqc', '--fofo_quantity_cap', default=70, type=int, required=False)
parser.add_argument('-rfm', '--read_from_mysql', default=1, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
sku_to_add_daily = args.sku_to_add_daily
fofo_sku_to_add_daily = args.fofo_sku_to_add_daily
# Cold Chain Parameter Logic - If 0 - Don't add cold chain products, IF 2 - Only add cold chain product, If 1 - Don't care if cold chain product is added or not
cold_chain_flag = args.cold_chain_flag
stores_to_include_if_blank_all = args.stores_to_include_if_blank_all
stores_to_exclude_if_blank_none = args.stores_to_exclude_if_blank_none
city_id_to_include_if_blank_all = args.city_id_to_include_if_blank_all
city_id_to_exclude_if_blank_none = args.city_id_to_exclude_if_blank_none
fofo_inclusion_flag = args.fofo_inclusion_flag
goodaid_inclusion_flag = args.goodaid_inclusion_flag
quantity_cap = args.quantity_cap
fofo_quantity_cap = args.fofo_quantity_cap
read_from_mysql= args.read_from_mysql
os.environ['env'] = env
logger = get_logger(level='INFO')
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
mysql_read = MySQL()
mysql_read.open_connection()
s3 = S3()
start_time = datetime.datetime.now()
logger.info('Script Manager Initialized')
logger.info("parameters reading")
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("sku_to_add_daily - " + str(sku_to_add_daily))
logger.info("fofo_sku_to_add_daily - " + str(sku_to_add_daily))
logger.info("cold_chain_flag - " + str(cold_chain_flag))
logger.info("stores_to_include_if_blank_all - " + str(stores_to_include_if_blank_all))
logger.info("stores_to_exclude_if_blank_none - " + str(stores_to_exclude_if_blank_none))
logger.info("city_id_to_include_if_blank_all - " + str(city_id_to_include_if_blank_all))
logger.info("city_id_to_exclude_if_blank_none - " + str(city_id_to_exclude_if_blank_none))
logger.info("fofo_inclusion_flag - " + str(fofo_inclusion_flag))
logger.info("goodaid_inclusion_flag - " + str(goodaid_inclusion_flag))
logger.info("quantity_cap - " + str(quantity_cap))
logger.info("fofo_quantity_cap - " + str(fofo_quantity_cap))
# date parameter
logger.info("code started at {}".format(datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
time_period_to_look_back = cur_date.day + 2
# =============================================================================
# set parameters, to adhere to adhoc request of adding/excluding NPI in mentioned stores only
# =============================================================================
parameter_input1 = False
parameter_input2 = False
parameter_input3 = False
parameter_input4 = False
# Writng this function so that we can get list of stores irrespective of input format in parameter
def fetch_number(list):
list2 = []
for i in list:
try:
list2.append(int(i))
except:
pass
return list2
if stores_to_include_if_blank_all == 'NULL' and stores_to_exclude_if_blank_none == 'NULL':
parameter_input1 = False
parameter_input2 = False
logger.info('Missing parameters, Taking all stores')
else:
if stores_to_include_if_blank_all != 'NULL':
parameter_input1 = True
stores_to_include_if_blank_all = stores_to_include_if_blank_all
stores_to_include_if_blank_all = fetch_number(stores_to_include_if_blank_all.split(','))
logger.info('read parameters to include stores, taking included stores only - {}'.format(
stores_to_include_if_blank_all))
if stores_to_exclude_if_blank_none != 'NULL':
parameter_input2 = True
stores_to_exclude_if_blank_none = stores_to_exclude_if_blank_none
stores_to_exclude_if_blank_none = fetch_number(stores_to_exclude_if_blank_none.split(','))
logger.info('read parameters to exclude stores, not taking excluded stores - {}'.format(
stores_to_exclude_if_blank_none))
if city_id_to_include_if_blank_all == 'NULL' and city_id_to_exclude_if_blank_none == 'NULL':
parameter_input3 = False
parameter_input4 = False
logger.info('Missing parameters, Taking all cities')
else:
if city_id_to_include_if_blank_all != 'NULL':
parameter_input3 = True
city_id_to_include_if_blank_all = city_id_to_include_if_blank_all
city_id_to_include_if_blank_all = fetch_number(city_id_to_include_if_blank_all.split(','))
logger.info('read parameters to include city, taking included cities only - {}'.format(
city_id_to_include_if_blank_all))
if city_id_to_exclude_if_blank_none != 'NULL':
parameter_input4 = True
city_id_to_exclude_if_blank_none = city_id_to_exclude_if_blank_none
city_id_to_exclude_if_blank_none = fetch_number(city_id_to_exclude_if_blank_none.split(','))
logger.info('read parameters to exclude city, not taking excluded cities - {}'.format(
city_id_to_exclude_if_blank_none))
# =============================================================================
# NPI Removal Script
# =============================================================================
# Getting prod drug detail
prod_drugs_query = '''
select
id as "drug-id",
"drug-name",
type,
"pack-form",
"cold-chain"
from
"prod2-generico"."drugs"
'''
prod_drugs = rs_db.get_df(prod_drugs_query)
# getting my sql store_drug list
if int(read_from_mysql) == 1:
store_drug_prod_query = '''
select
`store-id` ,
`drug-id`,
1 as `dummy`
from
`prod2-generico`.`npi-drugs` nd
where
status in ('saved', 'in-progress')
or (status = 'completed'
and date(nd.`created-at`) > date(DATE_ADD(date(now()) , INTERVAL -{time_period_to_look_back} Day)))
'''.format(time_period_to_look_back=time_period_to_look_back)
store_drug_prod = pd.read_sql_query(store_drug_prod_query, mysql_read.connection)
logger.info('Read store_drug_prod - from Mysql')
else:
store_drug_prod_query = '''
select
"store-id" ,
"drug-id",
1 as "dummy"
from
"prod2-generico"."npi-drugs" nd
where
status in ('saved', 'in-progress')
or (status = 'completed'
and date(nd."created-at") > date(dateadd(d,-{time_period_to_look_back},current_date)))
'''.format(time_period_to_look_back=time_period_to_look_back)
store_drug_prod = rs_db.get_df(store_drug_prod_query)
logger.info('Read store_drug_prod - from RS')
# Getting list of drugs in audit at the moment
if int(read_from_mysql) == 1:
audit_drug_prod_query = '''
SELECT
a.`store-id` ,
a.`drug-id` ,
1 as dummy_audit
from
(
select
b.`store-id` ,
a.`drug-id` ,
1 as dummy,
ROW_NUMBER() OVER(PARTITION BY b.`store-id` ,
a.`drug-id`
ORDER BY
a.id DESC) as 'row'
from
`inventory-check-items-1` as a
join `inventory-check-1` as b on
a.`check-id` = b.id
where
b.`complete` = 0)a
WHERE
a.`row` = 1
'''
audit_drug_prod = pd.read_sql_query(audit_drug_prod_query, mysql_read.connection)
logger.info('Read audit_drug_prod - from Mysql')
else:
audit_drug_prod_query = '''
SELECT
a."store-id" ,
a."drug-id" ,
1 as dummy_audit
from
(
select
b."store-id" ,
a."drug-id" ,
1 as dummy,
ROW_NUMBER() OVER(PARTITION BY b."store-id" ,
a."drug-id"
ORDER BY
a.id DESC) as "row"
from
"prod2-generico"."inventory-check-items-1" as a
join "prod2-generico"."inventory-check-1" as b on
a."check-id" = b.id
where
b."complete" = 0)a
WHERE
a."row" = 1
'''
audit_drug_prod = rs_db.get_df(audit_drug_prod_query)
logger.info('Read audit_drug_prod - from RS')
# getting store_id list
# connection = current_config.data_science_postgresql_conn()
# store_list_query = '''
# select distinct store_id
# from dead_stock_inventory dsi
# where inventory_type = 'Rotate'
# '''
# store_list = pd.read_sql_query(store_list_query, connection)
# connection.close()
store_list_query = '''
select
distinct "store-id"
from
"prod2-generico"."npi-inventory-at-store" nias
where
"inventory-type" = 'Rotate'
and nias."clust-sold-flag" = 0
and nias."shelf-life-more-than-6-months-flag" = 1
'''
store_list = rs_db.get_df(store_list_query)
# getting last day store status
store_completed = pd.DataFrame()
if int(read_from_mysql)==1:
store_last_status_query = """
select
*
from
(
select
row_number() over (partition by nd.`store-id`
order by
nd.`created-at` desc
) as `row`,
nd.`store-id`,
nd.status ,
nd.`created-at`
from
`prod2-generico`.`npi-drugs` nd) nd
where
nd.`row` = 1
"""
store_last_status = pd.read_sql_query(store_last_status_query, mysql_read.connection)
logger.info('Read store_last_status - from Mysql')
else:
store_last_status_query = """
select
*
from
(
select
row_number() over (partition by nd."store-id"
order by
nd."created-at" desc
) as "row",
nd."store-id",
nd.status ,
nd."created-at"
from
"prod2-generico"."npi-drugs" nd) nd
where
nd."row" = 1
"""
store_last_status = rs_db.get_df(store_last_status_query)
logger.info('Read store_last_status - from RS')
store_completed = store_last_status[store_last_status['status']=='completed']['store-id']
store_completed = pd.DataFrame(store_completed,columns=['store-id'])
# Checking If any new store is added
nd_stores = store_last_status['store-id'].unique()
new_stores = pd.DataFrame()
for store in store_list['store-id']:
if store not in nd_stores:
#print(store)
store_new = pd.DataFrame([store], columns=['store-id'])
new_stores = new_stores.append(store_new)
store_completed = pd.concat([store_completed,new_stores])
# Adding city ids and franchise flag to stores
store_info_query = '''
select
s.id as "store-id",
s."franchisee-id" ,
s."city-id"
from
"prod2-generico".stores s
'''
store_info = rs_db.get_df(store_info_query )
store_completed = store_completed.merge(store_info,on='store-id',how='left')
if parameter_input1:
store_completed = store_completed[store_completed ['store-id'].isin(stores_to_include_if_blank_all)]
if parameter_input2:
store_completed = store_completed[~store_completed ['store-id'].isin(stores_to_exclude_if_blank_none)]
if parameter_input3:
store_completed = store_completed[store_completed['city-id'].isin(city_id_to_include_if_blank_all)]
if parameter_input4:
store_completed = store_completed[~store_completed['city-id'].isin(city_id_to_exclude_if_blank_none)]
if int(fofo_inclusion_flag) == 0:
store_completed = store_completed[store_completed['franchisee-id']==1]
elif int(fofo_inclusion_flag) == 2:
store_completed = store_completed[store_completed['franchisee-id'] != 1]
elif int(fofo_inclusion_flag) == 1:
store_completed = store_completed
del store_completed['city-id']
# for store in store_list['store-id']:
# store_completed_query = '''
# select
# distinct "store-id"
# from
# "prod2-generico"."npi-drugs"
# where
# date("created-at") =
# (
# select
# Max(date("created-at"))
# from
# "prod2-generico"."npi-drugs"
# where
# "store-id"= {store})
# and status = 'completed'
# and "store-id"= {store}
# '''.format(store=store)
# store_completed_1 = rs_db.get_df(store_completed_query)
#
# if len(store_completed_1)== 0:
# new_store = """
# SELECT
# DISTINCT nd."store-id"
# FROM
# "prod2-generico"."npi-drugs" nd
# WHERE
# nd."store-id" = {store}
# """.format(store=store)
# new_store = rs_db.get_df(new_store)
#
# if len(new_store)== 0:
# store_completed_1 = pd.DataFrame([store],columns=['store-id'])
#
# store_completed = store_completed_1.append(store_completed)
# getting PG drug list
# connection = current_config.data_science_postgresql_conn()
# npi_drug_list = """
# select store_id, drug_id,
# sum(locked_quantity + quantity) as "total_quantity",
# sum(locked_value + value) as "total_value"
# from dead_stock_inventory dsi
# where inventory_type = 'Rotate'
# group by store_id, drug_id
# """
# npi_drug_list = pd.read_sql_query(npi_drug_list, connection)
# connection.close()
npi_drug_list = """
select
"store-id",
"drug-id",
sum("locked-quantity" + "quantity") as "total-quantity",
sum("locked-value" + "value") as "total-value"
from
"prod2-generico"."npi-inventory-at-store" nias
where
"inventory-type" = 'Rotate'
and nias."clust-sold-flag" = 0
and nias."shelf-life-more-than-6-months-flag" = 1
group by
"store-id",
"drug-id"
"""
npi_drug_list = rs_db.get_df(npi_drug_list)
# merging npi list with drugs table for packform
npi_drug_list = npi_drug_list.merge(prod_drugs, how='inner', on='drug-id')
# =============================================================================
# Adding Quantity Sold at System level
# =============================================================================
drgs = tuple(map(int,npi_drug_list['drug-id'].unique()))
s1 = """
select
"drug-id",
sum("net-quantity") as "system-sales-qty-last-90-days"
from
"prod2-generico"."sales" sh
where
date("created-at") >= date(current_date - 90)
and date("created-at") <= date(current_date)
and "drug-id" in {drgs}
group by
"drug-id"
""".format( drgs=drgs)
quantity_sold = rs_db.get_df(s1)
npi_drug_list = npi_drug_list.merge(quantity_sold,on = 'drug-id', how ='left')
npi_drug_list['system-sales-qty-last-90-days'] = npi_drug_list['system-sales-qty-last-90-days'].fillna(0)
# =============================================================================
# System Searched quantity last 90 days
# =============================================================================
s2 = """
select
"drug-id",
sum("search-count-clean") as "system-searched-qty-last-90-days"
from
"prod2-generico"."cfr-searches-v2" csv2
where
date("search-date") >= date(current_date - 90)
and date("search-date") <= date(current_date)
and "drug-id" in {drgs}
group by
"drug-id"
""".format( drgs=drgs)
drugs_searched = rs_db.get_df(s2)
npi_drug_list = npi_drug_list.merge(drugs_searched,on = 'drug-id', how ='left')
npi_drug_list['system-searched-qty-last-90-days'] = npi_drug_list['system-searched-qty-last-90-days'].fillna(0)
npi_drug_list['liquidation-index'] = npi_drug_list['system-sales-qty-last-90-days']*0.8+npi_drug_list['system-searched-qty-last-90-days']*0.2
# GA drugs inclusion flag
if int(goodaid_inclusion_flag) == 0:
logger.info('removing GA drugs')
goodaid_drug_query = '''
select
d.id as "drug-id"
from
"prod2-generico".drugs d
where
d."company-id" = 6984
'''
goodaid_drugs = rs_db.get_df(goodaid_drug_query)
goodaid_drug_id = tuple(map(int, goodaid_drugs['drug-id'].unique()))
npi_drug_list = npi_drug_list[~npi_drug_list['drug-id'].isin(goodaid_drug_id)]
logger.info('removed GA drugs')
else:
logger.info('not removing GA drugs')
if int(cold_chain_flag) == 0:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain']==0]
logger.info('removing cold chain products')
elif int(cold_chain_flag) == 2:
npi_drug_list = npi_drug_list[npi_drug_list['cold-chain'] == 1]
logger.info('considering only cold chain products')
else:
logger.info('Not caring whether cold chain items are added or not')
# merging prod and DSS to avoid duplicate entries
npi_drug_list = npi_drug_list.merge(store_drug_prod, how='left', on=['store-id', 'drug-id'])
# merging with completed stores
npi_drug_list = npi_drug_list.merge(store_completed, how='inner', on=['store-id'])
# replaceing null with 0 and extracting 35 rows
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy == 0]
# merging with audit drugs to avoid audit drugs entry
npi_drug_list = npi_drug_list.merge(audit_drug_prod, how='left', on=['store-id', 'drug-id'])
# replaceing null with 0 and extracting 35 rows
npi_drug_list = npi_drug_list.replace(np.nan, 0)
npi_drug_list = npi_drug_list[npi_drug_list.dummy_audit == 0]
npi_drug_list=npi_drug_list[~npi_drug_list['type'].isin(['discontinued-products','banned'])]
choice = [npi_drug_list['type'] == 'high-value-ethical',
npi_drug_list['type'] == 'ethical',
npi_drug_list['type'] == 'generic',
npi_drug_list['type'] == 'ayurvedic',
npi_drug_list['type'] == 'surgical',
npi_drug_list['type'] == 'category-4',
npi_drug_list['type'] == 'otc',
npi_drug_list['type'] == 'general',
npi_drug_list['type'] == 'baby-food',
npi_drug_list['type'] == 'baby-product',
npi_drug_list['type'] == 'glucose-test-kit',
npi_drug_list['type'] == 'discontinued-products',
npi_drug_list['type'] == 'banned']
select = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
npi_drug_list['sort-type'] = np.select(choice, select, default=999)
npi_drug_list.sort_values(['store-id', 'liquidation-index', 'sort-type', 'pack-form', 'drug-name'],
ascending=[True, False, True, True, True], inplace=True)
# Adding decided SKU (18 - parameter - sku_to_add_daily) per day
npi_drug_list_franchisee = npi_drug_list[npi_drug_list['franchisee-id']!=1]
npi_drug_list_coco = npi_drug_list[npi_drug_list['franchisee-id']==1]
final_list_franchisee = npi_drug_list_franchisee.groupby('store-id').head(fofo_sku_to_add_daily).reset_index(drop=True)
final_list_coco = npi_drug_list_coco.groupby('store-id').head(sku_to_add_daily).reset_index(drop=True)
final_list = pd.concat([final_list_franchisee,final_list_coco],sort = True).reset_index(drop=True)
# Capping quantity to decided number for outside mumbai(70 - Paramenter - quantity_cap)
final_list['total-quantity'] = final_list['total-quantity'].astype(float)
final_list['cum_sum_quantity_per_store'] = final_list.groupby(['store-id'])['total-quantity'].cumsum()
# Atleast one sku should be added
final_list['sku_rank'] = final_list.groupby(['store-id']).cumcount()+1
# Adding city ids
# Mumbai citi ids - 1 - Mumbai, 3 - Thane, 2 - Navi Mumbai
store_ids = tuple(map(int,final_list['store-id'].unique()))
additng_city_id_query = """
select
s.id as "store-id",
s."city-id",
zc."name" as "city-name"
from
"prod2-generico".stores s
left join "prod2-generico"."zeno-city" zc
on
s."city-id" = zc.id
where s.id in {store_ids}
""".format(store_ids=store_ids + (0,0))
additng_city_id = rs_db.get_df(additng_city_id_query)
final_list = final_list.merge(additng_city_id,how = 'left', on = 'store-id')
final_list['city-id'] = final_list['city-id'].astype(int)
conditions = [final_list['city-id'].isin([1,2,3]),final_list['sku_rank']==1,final_list['franchisee-id']!=1,final_list['sku_rank']!=1]
choices = [1,1,1,final_list['cum_sum_quantity_per_store']]
final_list['quantity_cap_index'] = np.select(conditions, choices, default = 0)
final_list = final_list[((final_list['franchisee-id']==1) & (final_list['quantity_cap_index']<quantity_cap))|((final_list['franchisee-id']!=1) & (final_list['quantity_cap_index']<fofo_quantity_cap))]
logger.info(f'for outside mumbai cities quantity is capped to {quantity_cap}')
final_list['created-date'] = cur_date
final_list['created-by'] = '[email protected]'
final_list_npi = final_list[['store-id', 'drug-id']]
expected_data_length_insert = len(final_list_npi)
logger.info("mySQL - Resulted data length after insert should be is {}".format(expected_data_length_insert))
schema = 'prod2-generico'
table_name = 'npi-removal'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
status1 = False
status2 = False
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
s3.write_df_to_db(df=final_list[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status1 = True
if status1:
mysql_write = MySQL(read_only=False)
mysql_write.open_connection()
# inserting data into prod
logger.info("mySQL - Insert starting")
final_list_npi.to_sql(name='npi-drugs', con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
status2 = True
npi_added_uri = s3.save_df_to_s3(df=final_list, file_name='npi_removal_details_{}.csv'.format(cur_date))
if status2 is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.datetime.now()
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[npi_added_uri])
rs_db.close_connection()
mysql_write.close()
mysql_read.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/npi/npi-removal.py | npi-removal.py |
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.forecast_main import ipc_comb_forecast
from zeno_etl_libs.utils.ipc_pmf.ipc_drug_fcst.forecast_main import ipc_drug_forecast
from zeno_etl_libs.utils.ipc_pmf.ipc_combination_fcst.fcst_mapping import fcst_comb_drug_map
from zeno_etl_libs.utils.ipc_pmf.safety_stock import safety_stock_calc
from zeno_etl_libs.utils.ipc_pmf.post_processing import post_processing
from zeno_etl_libs.utils.ipc_pmf.heuristics.recency_corr import fcst_correction
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
def main(debug_mode, reset_stores, reset_date, type_list_comb_lvl,
type_list_drug_lvl, v4_active_flag, drug_type_list_v4,
read_schema, rs_db_read, write_schema, rs_db_write, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# Define empty variables if required in case of fail
safety_stock_df = pd.DataFrame()
df_one_one = pd.DataFrame()
df_one_many = pd.DataFrame()
df_one_none = pd.DataFrame()
df_none_one = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
try:
for store_id in reset_stores:
logger.info(f"Running for store id: {store_id} and reset date: {reset_date}")
type_list_comb_lvl_str = str(type_list_comb_lvl).replace('[', '(').replace(']', ')')
type_list_drug_lvl_str = str(type_list_drug_lvl).replace('[', '(').replace(']', ')')
# RUNNING IPC-COMBINATION FORECAST PIPELINE
logger.info("Combination Forecast Pipeline starts")
fcst_df_comb_lvl, seg_df_comb_lvl, \
comb_sales_latest_12w, comb_sales_4w_wtd = ipc_comb_forecast(
store_id, reset_date, type_list_comb_lvl_str, read_schema, rs_db_read,
logger)
# RUNNING IPC-DRUG FORECAST PIPELINE
logger.info("Drug Forecast Pipeline starts")
fcst_df_drug_lvl, seg_df_drug_lvl, drug_sales_latest_12w,\
drug_sales_latest_4w, drug_sales_4w_wtd = ipc_drug_forecast(
store_id, reset_date, type_list_drug_lvl_str, read_schema,
rs_db_read, logger)
# RECENCY CORRECTION IF FCST=0, FCST=AVG_DEMAND_28D (FROM LATEST 12W)
logger.info("Recency correction starts")
fcst_df_comb_lvl, fcst_df_drug_lvl = fcst_correction(
fcst_df_comb_lvl, comb_sales_latest_12w, fcst_df_drug_lvl,
drug_sales_latest_12w, drug_sales_latest_4w, comb_sales_4w_wtd,
drug_sales_4w_wtd, logger)
# MAPPING FORECASTS TO ASSORTMENT DRUGS
logger.info("Allotting combination forecasts to drugs")
df_fcst_final, df_one_one, df_one_many, \
df_one_none, df_none_one = fcst_comb_drug_map(
store_id, reset_date, fcst_df_comb_lvl, fcst_df_drug_lvl,
type_list_comb_lvl, read_schema, rs_db_read, logger)
# SAFETY STOCK CALCULATIONS
logger.info("Safety Stock Calculations starts")
safety_stock_df = safety_stock_calc(
df_fcst_final, store_id, reset_date,
v4_active_flag, drug_type_list_v4, drug_sales_latest_12w,
read_schema, rs_db_read, logger)
# POST PROCESSING SS DF
logger.info("Post Processing SS-DF starts")
safety_stock_df, seg_df_comb_lvl, seg_df_drug_lvl = post_processing(
store_id, safety_stock_df, seg_df_comb_lvl, seg_df_drug_lvl,
read_schema, rs_db_read, logger)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-pmf-safety-stock
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-pmf-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc-pmf-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc-pmf-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc-pmf-comb-segmentation
seg_df_comb_lvl['reset_date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
seg_df_comb_lvl['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_comb_lvl['created-by'] = 'etl-automation'
seg_df_comb_lvl['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_comb_lvl['updated-by'] = 'etl-automation'
seg_df_comb_lvl.columns = [c.replace('_', '-') for c in seg_df_comb_lvl.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-pmf-comb-segmentation',
schema=write_schema)
columns = list(table_info['column_name'])
seg_df_comb_lvl = seg_df_comb_lvl[columns] # required column order
logger.info("Writing to table: ipc-pmf-comb-segmentation")
s3.write_df_to_db(df=seg_df_comb_lvl,
table_name='ipc-pmf-comb-segmentation',
db=rs_db_write, schema=write_schema)
# writing table ipc-pmf-drug-segmentation
seg_df_drug_lvl['reset_date'] = dt.datetime.strptime(reset_date,
'%Y-%m-%d').date()
seg_df_drug_lvl['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_drug_lvl['created-by'] = 'etl-automation'
seg_df_drug_lvl['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df_drug_lvl['updated-by'] = 'etl-automation'
seg_df_drug_lvl.columns = [c.replace('_', '-') for c in
seg_df_drug_lvl.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc-pmf-drug-segmentation',
schema=write_schema)
columns = list(table_info['column_name'])
seg_df_drug_lvl = seg_df_drug_lvl[columns] # required column order
logger.info("Writing to table: ipc-pmf-drug-segmentation")
s3.write_df_to_db(df=seg_df_drug_lvl,
table_name='ipc-pmf-drug-segmentation',
db=rs_db_write, schema=write_schema)
logger.info("All writes to RS-DB completed!")
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in
safety_stock_df.columns]
ss_data_upload = safety_stock_df.loc[
(safety_stock_df["order_upto_point"] > 0)]
ss_data_upload = ss_data_upload[['store_id', 'drug_id',
'safety_stock',
'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list_drug_lvl_str, rs_db_write,
write_schema, logger, gaid_omit=False)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
status = 'Success'
except Exception as error:
logger.exception(error)
return status, safety_stock_df, df_one_one, df_one_many, df_one_none, \
df_none_one, new_drug_entries, missed_entries
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]",
type=str, required=False)
parser.add_argument('-d', '--debug_mode', default="Y", type=str,
required=False)
parser.add_argument('-rd', '--reset_date', default="YYYY-MM-DD", type=str,
required=False)
parser.add_argument('-rs', '--reset_stores',
default=[4], nargs='+', type=int,
required=False)
parser.add_argument('-v4', '--v4_active_flag', default="Y", type=str,
required=False)
parser.add_argument('-v4tl', '--drug_type_list_v4',
default="{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'others':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}'}",
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
# JOB EXCLUSIVE PARAMS
reset_date = args.reset_date
reset_stores = args.reset_stores
v4_active_flag = args.v4_active_flag
drug_type_list_v4 = args.drug_type_list_v4
# EVALUATE REQUIRED JSON PARAMS
drug_type_list_v4 = literal_eval(drug_type_list_v4)
type_list_comb_lvl = ['ethical', 'generic', 'discontinued-products',
'high-value-ethical']
type_list_drug_lvl = ['ethical', 'ayurvedic', 'generic',
'discontinued-products', 'banned', 'general',
'high-value-ethical', 'baby-product', 'surgical',
'otc', 'glucose-test-kit', 'category-2', 'category-1',
'category-4', 'baby-food', '', 'category-3']
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
""" calling the main function """
status, safety_stock_df, df_one_one, df_one_many, df_one_none, \
df_none_one, new_drug_entries, missed_entries = main(
debug_mode, reset_stores, reset_date, type_list_comb_lvl,
type_list_drug_lvl, v4_active_flag, drug_type_list_v4,
read_schema, rs_db_read, write_schema, rs_db_write, logger)
# open RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
ss_df_uri = s3.save_df_to_s3(
safety_stock_df, file_name=f"safety_stock_df_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
all_cases_xl_path = s3.write_df_to_excel(data={
'C1_one_one': df_one_one, 'C2_one_many': df_one_many,
'C3_one_none': df_one_none, 'C4_none_one': df_none_one},
file_name=f"all_mappings_{reset_date}.xlsx")
email = Email()
email.send_email_file(
subject=f"IPC Combination Fcst (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[ss_df_uri, new_drug_entries_uri,
missed_entries_uri],
file_paths=[all_cases_xl_path]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ipc_pmf/ipc_pmf.py | ipc_pmf.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--runtime_date_exp', default="0101-01-01", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
runtime_date_exp = args.runtime_date_exp
email_to = args.email_to
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
# Connections
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
#############################################
# Main logic block
#############################################
# Run date
if runtime_date_exp == '0101-01-01':
# Timezone aware
run_date = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d")
else:
run_date = runtime_date_exp
# runtime_date = '2018-09-01'
logger.info("Running for {}".format(run_date))
# Period end date
# Paramatrize it
period_end_d_ts = datetime.strptime(run_date, '%Y-%m-%d') - timedelta(days=1)
period_end_d = period_end_d_ts.strftime('%Y-%m-%d')
logger.info("Run date minus 1 is {}".format(period_end_d))
day_minus8 = (pd.to_datetime(run_date) - timedelta(days=8)).strftime("%Y-%m-%d")
logger.info("Runtime date minus 8 is {}".format(period_end_d))
# Read last list so that only new data to be uploaded
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
query = f"""
SELECT
"shortbook-date"
FROM
"cfr-patient-request"
GROUP BY
"shortbook-date"
"""
logger.info(query)
rs_db_write.execute(query, params=None)
last_data_date: pd.DataFrame = rs_db_write.cursor.fetch_dataframe()
if last_data_date is None:
last_data_date = pd.DataFrame(columns=['shortbook_date'])
last_data_date.columns = [c.replace('-', '_') for c in last_data_date.columns]
logger.info(len(last_data_date))
last_data_date.head()
try:
last_sb_date_max = pd.to_datetime(last_data_date['shortbook_date']).max().strftime('%Y-%m-%d')
except ValueError:
last_sb_date_max = '2000-06-01'
logger.info("Last date in last data for cfr patient request is : {}".format(last_sb_date_max))
# Remaining data to be fetched
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
pr_q = """
SELECT
`id` as `short-book-id`,
`store-id`,
`created-at`,
`patient-id`,
`unique-id`,
`drug-id`,
`drug-name`,
`requested-quantity`,
`inventory-at-creation`,
`quantity`,
`home-delivery`,
`received-at`,
`bill-id`
FROM `short-book-1`
WHERE `auto-short` = 0
and `auto-generated` = 0
and date(`created-at`) > '{0}'
and date(`created-at`) <= '{1}'
""".format(last_sb_date_max, day_minus8)
pr_q = pr_q.replace('`', '"')
logger.info(pr_q)
rs_db.execute(pr_q, params=None)
data_pr: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if data_pr is None:
data_pr = pd.DataFrame(columns=['short_book_id', 'store_id', 'created_at', 'patient_id', 'unique_id',
'drug_id', 'drug_name', 'requested_quantity',
'inventory_at_creation',
'quantity', 'home_delivery',
'received_at', 'bill_id'])
data_pr.columns = [c.replace('-', '_') for c in data_pr.columns]
logger.info(len(data_pr))
logger.info("New PR data length is : {}".format(len(data_pr)))
data_pr['shortbook_date'] = pd.to_datetime(data_pr['created_at']).dt.normalize()
for i in ['created_at', 'received_at']:
data_pr[i] = pd.to_datetime(data_pr[i], errors='coerce')
logger.info("Min date in new data is {} and max date is {}".format(data_pr['shortbook_date'].min().strftime("%Y-%m-%d"),
data_pr['shortbook_date'].max().strftime(
"%Y-%m-%d")))
##################################################
# Now loss calculation starts
##################################################
# Remove invalid requested quantity
data_pr_f = data_pr[data_pr.requested_quantity > 0]
logger.info("New PR data length after removing negative and 0 requested quantity is : {}".format(len(data_pr_f)))
# Replace NULL drug-ids with -1 so that it can be identified as new drug
data_pr_f['drug_id'] = data_pr_f['drug_id'].fillna(-1).astype(int)
# MySQL drugs table
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
drugs_q = """
SELECT
id as drug_id,
`drug-name`,
`composition`,
category as drug_category,
type as drug_type,
`repeatability-index`
FROM
drugs
"""
drugs_q = drugs_q.replace('`', '"')
logger.info(drugs_q)
rs_db.execute(drugs_q, params=None)
data_drugs: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if data_drugs is None:
data_drugs = pd.DataFrame(columns=['drug_id', 'drug_name', 'composition', 'drug_category',
'drug_type', 'repeatability_index'])
data_drugs.columns = [c.replace('-', '_') for c in data_drugs.columns]
logger.info(len(data_drugs))
logger.info("Drug master length is : {}".format(len(data_drugs)))
# Join PR data with drugs
data_pr_f = data_pr_f.merge(data_drugs, how='left', on=['drug_id'])
data_pr_f['drug_name'] = np.where(data_pr_f['drug_id'] > 0, data_pr_f['drug_name_y'], data_pr_f['drug_name_x'])
# Search for bills in bill-items-1
bills = tuple(list(data_pr_f['bill_id'].dropna().astype(int).drop_duplicates()))
logger.info("Number of bills to be searched is : {}".format(len(bills)))
#########################################
# Sales data
#########################################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
sales_q = """
SELECT
`created-at`,
`patient-id`,
`bill-id`,
`drug-id`,
sum("revenue-value")/sum("quantity") as avg_rate,
sum("quantity") as sold_quantity
FROM
sales
WHERE
`bill-id` in {}
and `bill-flag` = 'gross'
GROUP BY
`created-at`,
`patient-id`,
`bill-id`,
`drug-id`
""".format(bills)
sales_q = sales_q.replace('`', '"')
# logger.info(sales_q)
rs_db.execute(sales_q, params=None)
data_b: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if data_b is None:
data_b = pd.DataFrame(columns=['created_at', 'patient_id', 'bill_id',
'drug_id', 'avg_rate', 'sold_quantity'])
data_b.columns = [c.replace('-', '_') for c in data_b.columns]
logger.info(len(data_b))
data_b['bill_date'] = pd.to_datetime(data_b['created_at']).dt.normalize()
logger.info("Bill date length is : {}".format(len(data_b)))
# Join with main data
data_b = data_b.rename(columns={'patient_id': 'bill_patient_id', 'created_at': 'bill_created_at'})
data_final_join = data_pr_f.merge(data_b, how='left', on=['bill_id', 'drug_id'])
logger.info("PR data length after joining with bills data is : {}".format(len(data_final_join)))
data_final_join['day_diff'] = (data_final_join['bill_date'] - data_final_join['shortbook_date']).dt.days
# Loss calculation
data_final_join['within_tat_flag'] = np.where(data_final_join['day_diff'].between(0, 7), 1, 0)
data_final_join['within_tat_sold_quantity'] = np.where(data_final_join['within_tat_flag'] == 1,
data_final_join['sold_quantity'], 0)
data_final_join['diff_quantity'] = data_final_join['requested_quantity'] - data_final_join[
'within_tat_sold_quantity']
data_final_join['loss_quantity'] = np.where(data_final_join['diff_quantity'] > 0, data_final_join['diff_quantity'],
0)
# Rate already present in bill attached
data_final_join['rate_present'] = np.where(data_final_join['avg_rate'] > 0, 1, 0)
# Filter out quantity > 30
data_final_join2 = data_final_join[data_final_join.requested_quantity <= 30]
logger.info("PR data length after filtering out outlier quantity is : {}".format(len(data_final_join2)))
# Populate rate for those not fulfilled
drugs = tuple(list(data_final_join2['drug_id'].dropna().drop_duplicates().astype(int)))
logger.info("Count of drugs to look up in historical sales is : {}".format(len(drugs)))
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
rate_q = """
SELECT
"drug-id",
SUM("revenue-value")/SUM("quantity") AS avg_rate_system
FROM
"sales"
WHERE
date("created-at") <= '{0}'
and "bill-flag" = 'gross'
GROUP BY
"drug-id"
""".format(period_end_d)
rate_q = rate_q.replace('`', '"')
logger.info(rate_q)
rs_db.execute(rate_q, params=None)
data_d: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if data_d is None:
data_d = pd.DataFrame(columns=['drug_id', 'avg_rate_system'])
data_d.columns = [c.replace('-', '_') for c in data_d.columns]
logger.info(len(data_d))
data_d.head()
logger.info("Count of drugs to looked up successfully in historical sales is : "
"{}".format(len(data_d)))
# Join with main data
data_final_join2 = data_final_join2.merge(data_d, how='left', on=['drug_id'])
# What should the final rate be, if present in PR then that, else if present in system then that.
data_final_join2['attributed_rate'] = np.where(data_final_join2['rate_present'] == 1, data_final_join2['avg_rate'],
data_final_join2['avg_rate_system'])
# Still some drugs which are new, will not have a rate assigned
data_final_join2['system_present'] = np.where(data_final_join2['attributed_rate'] > 0, 1, 0)
# Missing value in rate is replaced by a value which is representative of all drugs rate
# Can be changed later
data_final_join2['attributed_rate'] = data_final_join2['attributed_rate'].fillna(100)
# Final loss sales
data_final_join2['final_lost_sales'] = data_final_join2['loss_quantity'].astype(float) * data_final_join2[
'attributed_rate'].astype(float)
# Sold quantity and num_days_sold
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
sales_summ_q = """
SELECT
"store-id",
"drug-id",
COUNT(distinct date("created-at")) as num_days_sold,
MAX(date("created-at")) as last_sold
FROM
"sales"
WHERE
date("created-at") <= '{0}'
and "bill-flag" = 'gross'
GROUP BY
"store-id",
"drug-id"
""".format(period_end_d)
sales_summ_q = sales_summ_q.replace('`', '"')
logger.info(sales_summ_q)
rs_db.execute(sales_summ_q, params=None)
data_d2: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if data_d2 is None:
data_d2 = pd.DataFrame(columns=['store_id', 'drug_id', 'num_days_sold', 'last_sold'])
data_d2.columns = [c.replace('-', '_') for c in data_d2.columns]
logger.info(len(data_d2))
data_d2.head()
logger.info("Count of drugs with sold quantity and num_days_sold is : {}".format(len(data_d2)))
# Join with main data
data_final_join2 = data_final_join2.merge(data_d2, how='left', on=['store_id', 'drug_id'])
# Put 0 for those not sold in that store
data_final_join2['num_days_sold'] = data_final_join2['num_days_sold'].fillna(0)
# Round off some values
for i in ['attributed_rate', 'final_lost_sales']:
data_final_join2[i] = np.round(data_final_join2[i].astype(float), 2)
# Attributed date
data_final_join2['attributed_loss_date'] = data_final_join2['shortbook_date'] + timedelta(days=7)
# Merge stores
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
stores_q = """
SELECT
id AS store_id,
store AS store_name
FROM
"stores-master"
"""
stores_q = stores_q.replace('`', '"')
logger.info(stores_q)
rs_db.execute(stores_q, params=None)
stores: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if stores is None:
stores = pd.DataFrame(columns=['store_id', 'store_name'])
stores.columns = [c.replace('-', '_') for c in stores.columns]
logger.info(len(stores))
cfr_pr = data_final_join2.merge(stores, how='left', on=['store_id'])
# For redshift specific
# Convert int columns to int
for i in ['num_days_sold', 'repeatability_index', 'bill_id']:
cfr_pr[i] = cfr_pr[i].fillna(0).astype(int)
for i in ['shortbook_date', 'attributed_loss_date', 'bill_date', 'last_sold']:
cfr_pr[i] = pd.to_datetime(cfr_pr[i]).dt.date
logger.info(cfr_pr.columns)
#########################################
# Loss classification logic (stand-alone function), in dss it's cfr-seg
########################################
# DOI INFO
store_ids = tuple(list(cfr_pr['store_id'].dropna().astype(int).drop_duplicates()))
drug_ids = tuple(list(cfr_pr['drug_id'].dropna().astype(int).drop_duplicates()))
# Fetch all tables data
# tagging drug as new/old
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
doi_q = """
SELECT
`store-id`,
`drug-id`,
`min`,
`safe-stock`,
`max`,
`drug-grade`
FROM
`drug-order-info`
WHERE
`store-id` in {0}
and `drug-id` in {1}
""".format(store_ids, drug_ids)
doi_q = doi_q.replace('`', '"')
# logger.info(doi_q)
rs_db.execute(doi_q, params=None)
doi_data: pd.DataFrame = rs_db.cursor.fetch_dataframe()
if doi_data is None:
doi_data = pd.DataFrame(columns=['store_id', 'drug_id', 'min', 'safe_stock', 'max', 'drug_grade'])
doi_data.columns = [c.replace('-', '_') for c in doi_data.columns]
logger.info(len(doi_data))
cfr_pr = cfr_pr.merge(doi_data, how='left', on=['store_id', 'drug_id'])
cfr_pr['fulfilment_hours'] = (cfr_pr['received_at'] - cfr_pr['created_at']).astype('timedelta64[h]')
# Loss classification tag
def loss_classification_tag(x):
if x['drug_grade'] in ['A1', 'A2']:
return 'DS_loss'
elif (x['inventory_at_creation']) >= (x['requested_quantity']):
return 'system_loss'
elif (x['fulfilment_hours'] >= 0) & (x['fulfilment_hours'] < 48):
return 'store_loss'
elif (x['fulfilment_hours'] >= 48) or pd.isnull(x['fulfilment_hours']):
return 'supply_chain_loss'
else:
return 'None'
cfr_pr['loss_tag'] = cfr_pr.apply(loss_classification_tag, axis=1)
# DB upload columns
final_cols = ['store_id', 'store_name', 'shortbook_date', 'patient_id', 'unique_id', 'drug_id',
'short_book_id',
'drug_name_x', 'composition', 'repeatability_index', 'drug_category', 'drug_type',
'requested_quantity', 'inventory_at_creation',
'created_at', 'received_at',
'home_delivery', 'drug_name_y',
'bill_id', 'bill_date', 'within_tat_flag', 'within_tat_sold_quantity', 'loss_quantity',
'system_present', 'attributed_rate', 'final_lost_sales', 'attributed_loss_date', 'num_days_sold',
'last_sold',
'min', 'safe_stock', 'max', 'drug_grade',
'fulfilment_hours', 'loss_tag']
cfr_pr = cfr_pr[final_cols]
#####################################################
# Write to DB
#####################################################
data_export = cfr_pr.copy()
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
write_schema = 'prod2-generico'
write_table_name = 'cfr-patient-request'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id', 'created-at', 'updated-at'])]
# Mandatory lines
data_export['etl-created-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['etl-created-by'] = 'etl-automation'
data_export['etl-updated-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['etl-updated-by'] = 'etl-automation'
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name, db=rs_db_write,
schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cfr-patient-request/cfr-patient-request.py | cfr-patient-request.py |
""
import argparse
import os
import sys
import datetime
import dateutil
from dateutil.tz import gettz
from datetime import datetime as dt
from datetime import date, timedelta
import pandas as pd
import numpy as np
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.db.db import DB
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"info message")
rs_db = DB()
rs_db_write = DB(read_only=False)
rs_db.open_connection()
rs_db_write.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'franchise-patient-one-view'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
# creating summary data using bills info
pat_store_q = """
select
s."patient-id",
s."store-id" ,
count(distinct "bill-id") "total-bills-to-store",
sum(s."revenue-value") "total-value-to-store",
max(date("created-at")) "last-transacted-at",
max("bill-id") "last-bill-id"
from
"prod2-generico".sales s
where
"franchisee-id" != 1
and "bill-flag" = 'gross'
group by
1,
2
having
"last-transacted-at"> DATEADD('month',
-6,
DATE_TRUNC('month', CURRENT_DATE))
"""
pat_store = rs_db.get_df(pat_store_q)
# taking refill date
refill_q = f"""
select
"patient-id",
"store-id",
min("refill-date") as "expected-next-date"
from
"prod2-generico"."retention-refill"
where
"bill-id" in {tuple(pat_store['last-bill-id'].unique())}
group by
1,
2
"""
refill = rs_db.get_df(refill_q)
pat_store_refill = pd.merge(pat_store, refill, how='left', on=['patient-id', 'store-id'])
pat_store_refill[(pd.isnull(pat_store_refill['expected-next-date']) == True)].head()
pat_store_refill['expected-next-date'] = np.where(pd.isnull(pat_store_refill['expected-next-date']) == True,
pat_store_refill['last-transacted-at'] +
timedelta(days=30), pat_store_refill['expected-next-date'])
#segment category
seg_q = """
select
id as "patient-id",
"patient-category"
from
"prod2-generico".patients
where
id in {}
""".format(tuple(pat_store_refill['patient-id'].unique()))
seg = rs_db.get_df(seg_q)
pat_store_refill_seg = pd.merge(pat_store_refill, seg, how='left', on=['patient-id'])
#etl
pat_store_refill_seg['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
pat_store_refill_seg['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
pat_store_refill_seg['created-by'] = 'etl-automation'
pat_store_refill_seg['updated-by'] = 'etl-automation'
if pat_store_refill_seg.empty:
print('DataFrame is empty!')
exit()
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}"'''
print(truncate_query)
rs_db_write.execute(truncate_query)
s3.write_df_to_db(df=pat_store_refill_seg[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/franchise-patient-one-view/franchise-patient-one-view.py | franchise-patient-one-view.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from datetime import datetime as dt
from datetime import timedelta
import pandas as pd
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-fr', '--full_run', default=0, type=int, required=False)
parser.add_argument('-ar', '--alternate_range', default=0, type=int, required=False)
parser.add_argument('-st', '--start', default="2017-01-01", type=str, required=False)
parser.add_argument('-ed', '--end', default=str(dt.now().date()), type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
full_run = args.full_run
alternate_range = args.alternate_range
start = args.start
end = args.end
logger = get_logger()
logger.info(f"env: {env}")
# params
if full_run:
start = '2017-05-13'
end = str(dt.today().date() - timedelta(days=1))
elif alternate_range:
start = start
end = end
else:
start = str(dt.today().date() - timedelta(days=2))
end = str(dt.today().date() - timedelta(days=1))
read_schema = 'prod2-generico'
table_name = 'daywise-marketing-spend'
rs_db = DB()
rs_db.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=read_schema)
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."{table_name}"
WHERE
"date" BETWEEN '{start}' AND '{end}';
"""
logger.info(f"truncate query : \n {truncate_query}")
rs_db.execute(truncate_query)
# Date_wise_spend_and_sale_calculation
market_spend_q = f"""
SELECT
T1.date_ as date ,
T1.store_id,
T1.`promo-code`,
T1.`code-type`,
T1.new_patient,
T1.unique_patients_count,
T1.total_bills_count,
T1.sales,
T1.profit,
T2.marketing_spend
FROM
(
SELECT
date(b.`created-at`) AS date_,
b.`store-id` AS store_id,
coalesce(pc."promo-code", '0') AS "promo-code",
coalesce(pc."code-type", '0') AS "code-type",
(CASE
WHEN date(b.`created-at`)= date(pm.`first-bill-date`) THEN 1
ELSE 0
END) AS new_patient,
COUNT(DISTINCT b.`patient-id`) AS unique_patients_count,
COUNT(DISTINCT b.id) AS total_bills_count,
SUM(bi.quantity * bi.rate) AS sales,
(SUM(bi.quantity * bi.rate)-SUM(i.`purchase-rate` * bi.quantity)) AS profit
FROM
"{read_schema}".`bills-1` b
LEFT JOIN "{read_schema}".`promo-codes` pc ON
b.`promo-code-id` = pc.id
LEFT JOIN "{read_schema}".`bill-items-1` bi ON
b.id = bi.`bill-id`
LEFT JOIN "{read_schema}".`patients-metadata-2` pm ON
b.`patient-id` = pm.`id`
LEFT JOIN "{read_schema}".`inventory-1` i ON
bi.`inventory-id` = i.id
WHERE
date(b.`created-at`) BETWEEN '{start}' AND '{end}'
GROUP BY
date(b.`created-at`),
b.`store-id`,
new_patient,
pc.`promo-code`,
pc.`code-type` ) T1
LEFT JOIN
(
SELECT
date(b.`created-at`) AS date_,
b.`store-id` AS store_id,
coalesce(pc."promo-code", '0') AS "promo-code",
coalesce(pc."code-type", '0') AS "code-type",
(CASE
WHEN date(b.`created-at`)= date(pm.`first-bill-date`) THEN 1
ELSE 0
END) AS new_patient,
COUNT(DISTINCT b.`patient-id`) AS unique_patients_count,
COUNT(DISTINCT b.id) AS total_bills_count,
SUM(b.`promo-discount`) AS marketing_spend
FROM
"{read_schema}".`bills-1` b
LEFT JOIN "{read_schema}".`promo-codes` pc ON
b.`promo-code-id` = pc.id
LEFT JOIN "{read_schema}".`patients-metadata-2` pm ON
b.`patient-id` = pm.`id`
WHERE
date(b.`created-at`) BETWEEN '{start}' AND '{end}'
GROUP BY
date(b.`created-at`),
b.`store-id`,
new_patient,
pc.`promo-code`,
pc.`code-type` ) T2 ON
T1.date_ = T2.date_
AND T1.store_id = T2.store_id
AND T1.new_patient = T2.new_patient
AND T1.`promo-code` = T2.`promo-code`
AND T1.`code-type` = T2.`code-type`;"""
market_spend_q = market_spend_q.replace('`', '"')
logger.info(f"data fetching query : \n {market_spend_q}")
marketing_spend_sales = rs_db.get_df(market_spend_q)
marketing_spend_sales.columns = [c.replace('-', '_') for c in marketing_spend_sales.columns]
logger.info(f"raw data length data : {len(marketing_spend_sales)}")
# If customer is not using any promo so tagging him as 'Organic'
# Temporary : will resolve query
marketing_spend_sales['pr_tag'] = 0
marketing_spend_sales['hd_tag'] = 0
# Filling null value as '0'
fill_value = {'promo_code': 'Organic',
'code_type': 'Organic'}
marketing_spend_sales = marketing_spend_sales.fillna(value=fill_value)
# store attributes
store_q = f"""
select
id as store_id ,
store,
abo,
"store-manager",
"line-manager",
"opened-at" as store_open_at
from
"{read_schema}"."stores-master" sm;
"""
store_q = store_q.replace('`', '"')
logger.info(store_q)
store_attr = rs_db.get_df(store_q)
store_attr.columns = [c.replace('-', '_') for c in store_attr.columns]
logger.info(f"stores table length : {len(store_attr)}")
store_attr.head()
marketing_spend_sales = pd.merge(marketing_spend_sales,
store_attr,
left_on='store_id',
right_on='store_id',
how='left')
marketing_spend_sales['redeemed_point'] = None
marketing_spend_sales.columns = [c.replace('_', '-') for c in marketing_spend_sales.columns]
# etl
marketing_spend_sales['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
marketing_spend_sales['created-by'] = 'etl-automation'
marketing_spend_sales['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
marketing_spend_sales['updated-by'] = 'etl-automation'
# Write to csv
s3.save_df_to_s3(df=marketing_spend_sales[table_info['column_name']],
file_name='day_wise_marketing_spend/marketing_spend_sales.csv')
s3.write_df_to_db(df=marketing_spend_sales[table_info['column_name']], table_name=table_name, db=rs_db,
schema=read_schema)
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/daywise_marketing_spend/daywise_marketing_spend.py | daywise_marketing_spend.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
import numpy as np
# Custom library imports
from zeno_etl_libs.utils.general_funcs import month_diff
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--runtime_date_exp', default="0101-01-01", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
runtime_date_exp = args.runtime_date_exp
email_to = args.email_to
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
# Connections
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
# Run date
if runtime_date_exp != '0101-01-01':
run_date = runtime_date_exp
else:
run_date = datetime.today().strftime('%Y-%m-%d')
# runtime_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
# Period end date
# Paramatrize it
period_end_d = (pd.to_datetime(run_date) - timedelta(days=1)).strftime('%Y-%m-%d')
logger.info("Run date minus 1 is {}".format(period_end_d))
period_end_d_minus180 = (pd.to_datetime(period_end_d) - timedelta(days=180)).strftime('%Y-%m-%d')
logger.info("Period end date minus 180 is {}".format(period_end_d_minus180))
# Data to be fetched
#########################################################
# Bill data
########################################################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
bills_q = """
SELECT
a."patient-id",
a."store-id",
a."id" as bill_id,
a."created-at" AS bill_created_at,
a."bill-date",
a."total-spend",
a."bill-year" as year_bill,
a."bill-month" as month_bill,
a."cum-nob" as nob_till_bill,
a."cum-spend" as spend_till_bill,
a."cum-abv" as average_bill_value,
a."normalized-date",
a."value-segment",
a."value-segment-calculation-date" as value_segment_calc_date,
a."behaviour-segment",
b."behaviour-segment-calculation-date" as behaviour_segment_calc_date,
b."first-bill-date" as overall_min_bill_date,
b."primary-disease",
(case when b."is-repeatable" is True then 1 else 0 end) as latest_is_repeatable,
(case when b."is-generic" is True then 1 else 0 end) as latest_is_generic,
(case when b."hd-flag" is True then 1 else 0 end) as latest_hd_flag,
s."store" as store_name,
s."opened-at" as store_opened_at,
s."abo"
FROM
"retention-master" a
LEFT JOIN
"patients-metadata-2" b
on a."patient-id" = b."id"
LEFT JOIN
"stores-master" s
on a."store-id" = s."id"
WHERE
a."bill-date" > '{0}'
AND a."bill-date" <= '{1}'
""".format(period_end_d_minus180, period_end_d)
# AND a."store-id" = 2
bills_q = bills_q.replace('`', '"')
logger.info(bills_q)
data_bill = rs_db.get_df(query=bills_q)
data_bill.columns = [c.replace('-', '_') for c in data_bill.columns]
logger.info(len(data_bill))
for i in ['bill_created_at', 'bill_date', 'overall_min_bill_date', 'normalized_date',
'value_segment_calc_date', 'behaviour_segment_calc_date']:
data_bill[i] = pd.to_datetime(data_bill[i])
logger.info("Data for bills fetched with length {}".format(len(data_bill)))
# Sort on patient_id, bill_date
data_bill = data_bill.sort_values(by=['patient_id', 'bill_created_at'])
################################
# Calculated columns
################################
# Find next bill date
data_bill['next_bill_date'] = data_bill.groupby(['patient_id'])['bill_date'].shift(-1)
# Difference between next bill date and current bill date
data_bill['day_diff_next_bill'] = (data_bill['next_bill_date'] - data_bill['bill_date']).dt.days
# But what's the difference between run_date and bill date
data_bill['day_diff_today'] = (pd.to_datetime(run_date) - data_bill['bill_date']).dt.days
# Define lost event
# Next bill diff >90 days OR next bill date NULL
# AND
# Date diff with run_date, should also be >90
data_bill['lost_event_flag'] = np.where(((data_bill['day_diff_next_bill'] > 90) |
(data_bill['day_diff_next_bill'].isnull()))
& (data_bill['day_diff_today'] > 90), 1, 0)
# But what's the lost attribution date
data_bill['bill_date_plus90'] = pd.to_datetime(data_bill['bill_date'] + timedelta(days=90))
data_bill['lost_attribution_date'] = np.where(data_bill['lost_event_flag'] == 1,
data_bill['bill_date_plus90'].dt.strftime('%Y-%m-%d'),
"")
data_bill['lost_attribution_date'] = pd.to_datetime(data_bill['lost_attribution_date'], errors='coerce')
# Month diff
data_bill['month_diff_acq'] = month_diff(data_bill['bill_date'], data_bill['overall_min_bill_date'])
################################
# Calculated columns
################################
# Round to 2 decimals
for i in ['spend_till_bill', 'average_bill_value']:
data_bill[i] = data_bill[i].astype(float).round(2)
#################################
# Data lost
#################################
data_lost = data_bill[data_bill['lost_event_flag'] == 1].copy()
logger.info("Lost data length {}".format(len(data_lost)))
########################################################
# Churn reasons
#######################################################
############################
# PR Lost or Delayed
############################
# Todo change order-number to patient-request-number or the aligned source of truth
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
pr_q = """
SELECT
`patient-id`,
`order-number`,
`bill-id`,
MIN(`pr-created-at`) AS min_created_at,
MAX(`completed-at`) AS max_completed_at
FROM
`patient-requests-metadata`
WHERE
`pso-requested-quantity` > 0
AND date(`pr-created-at`) > '{0}'
AND date(`pr-created-at`) <= '{1}'
GROUP BY
`patient-id`,
`order-number`,
`bill-id`
""".format(period_end_d_minus180, period_end_d)
pr_q = pr_q.replace('`', '"')
logger.info(pr_q)
data_pr = rs_db.get_df(query=pr_q)
data_pr.columns = [c.replace('-', '_') for c in data_pr.columns]
logger.info(len(data_pr))
logger.info("PR data fetched with length {}".format(len(data_pr)))
# PR Delay
# Delay can only happen for those who billed
data_pr_b = data_pr[data_pr['bill_id'] >= 0].copy()
for i in ['min_created_at', 'max_completed_at']:
data_pr_b[i] = pd.to_datetime(data_pr_b[i], errors='coerce')
data_pr_b['hour_diff'] = (data_pr_b['max_completed_at'] - data_pr_b['min_created_at']) / np.timedelta64(1, 'h')
data_pr_b['pr_72hrs_delay'] = np.where(data_pr_b['hour_diff'] > 72, 1, 0)
# Take unique on bills
data_pr_b_unique = data_pr_b.drop_duplicates(subset=['patient_id', 'bill_id']).copy()
data_pr_b_unique['bill_id'] = data_pr_b_unique['bill_id'].astype(int)
# Merge with main data
data_lost = data_lost.merge(data_pr_b_unique[['patient_id', 'bill_id', 'pr_72hrs_delay']],
how='left', on=['patient_id', 'bill_id'])
data_lost['pr_72hrs_delay'] = data_lost['pr_72hrs_delay'].fillna(0)
logger.info("Lost data length after joining with PR delay status {}".format(len(data_lost)))
# PR Loss
data_pr_lost = data_pr_b[(data_pr_b['max_completed_at'] == '0000-00-00 00:00:00') |
(data_pr_b['max_completed_at'].isnull())].copy()
for i in ['min_created_at', 'max_completed_at']:
data_pr_lost[i] = pd.to_datetime(data_pr_lost[i], errors='coerce')
run_date_minus_7 = pd.to_datetime(run_date) - timedelta(days=7)
data_pr_lost = data_pr_lost[data_pr_lost['min_created_at'] <= run_date_minus_7]
logger.info("PR Loss data length {}".format(len(data_pr_lost)))
# Merge with main data
data_lost_tmp = data_lost.merge(
data_pr_lost[['patient_id', 'order_number', 'min_created_at', 'max_completed_at']],
how='left', on=['patient_id'])
# Because merged on patient id only, so date diff with bill date to be taken
data_lost_tmp['lost_pr_date_diff'] = (data_lost_tmp['min_created_at'] - data_lost_tmp['bill_date']).dt.days
# For a customer to be lost due to PR, the PR event should happen after that bill date
data_lost_tmp = data_lost_tmp[data_lost_tmp['lost_pr_date_diff'] > 0]
# But should be less than or equal to loss attributed day
data_lost_tmp = data_lost_tmp[data_lost_tmp['min_created_at'] <= data_lost_tmp['lost_attribution_date']]
# Drop any duplicate mappings
data_lost_tmp = data_lost_tmp.drop_duplicates(subset=['patient_id', 'bill_id'])
data_lost_tmp = data_lost_tmp[
['patient_id', 'bill_id', 'min_created_at', 'max_completed_at', 'lost_pr_date_diff']].copy()
data_lost_tmp['pr_lost'] = 1
logger.info("PR loss final data length is {}".format(len(data_lost_tmp)))
# Merge with main data
data_lost = data_lost.merge(data_lost_tmp, how='left', on=['patient_id', 'bill_id'])
data_lost['pr_lost'] = data_lost['pr_lost'].fillna(0)
logger.info("Lost data length after joining with PR Lost status {}".format(len(data_lost)))
############################
# HD delayed
############################
# Todo change clarify if bill-id>0 is compulsory, to get HD
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
hd_q = """
SELECT
`patient-id`,
`bill-id`,
MIN(`pso-created-at`) AS min_created_at,
MAX(`delivered-at`) AS max_delivered_at
FROM
`home-delivery-metadata`
WHERE
`bill-id` > 0
AND date(`pso-created-at`) > '{0}'
AND date(`pso-created-at`) <= '{1}'
GROUP BY
`patient-id`,
`bill-id`
""".format(period_end_d_minus180, period_end_d)
hd_q = hd_q.replace('`', '"')
logger.info(hd_q)
data_hd = rs_db.get_df(query=hd_q)
data_hd.columns = [c.replace('-', '_') for c in data_hd.columns]
logger.info(len(data_hd))
logger.info("HD data fetched with length {}".format(len(data_hd)))
for i in ['min_created_at', 'max_delivered_at']:
data_hd[i] = pd.to_datetime(data_hd[i], errors='coerce')
data_hd['hour_diff'] = (data_hd['max_delivered_at'] - data_hd['min_created_at']) / np.timedelta64(1, 'h')
data_hd['hd_24hrs_delay'] = np.where(data_hd['hour_diff'] > 24, 1, 0)
# Take unique on bills
data_hd_unique = data_hd.drop_duplicates(subset=['patient_id', 'bill_id']).copy()
data_hd_unique['bill_id'] = data_hd_unique['bill_id'].astype(int)
data_lost = data_lost.merge(data_hd_unique[['patient_id', 'bill_id', 'hd_24hrs_delay']],
how='left', on=['patient_id', 'bill_id'])
data_lost['hd_24hrs_delay'] = data_lost['hd_24hrs_delay'].fillna(0)
logger.info("Lost data length after joining with HD delay status {}".format(len(data_lost)))
############################
# NPS
############################
# Todo change with nps-bill-mapping later
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
nps_q = """
SELECT
b.`id` AS patient_id,
a.`rating`,
DATE(a.`created-at`) AS feedback_date
FROM
feedback a
INNER JOIN patients b on
a.phone = b.phone
WHERE date(a.`created-at`) > '{0}'
AND date(a.`created-at`) <= '{1}'
GROUP BY
b.`id`,
a.`rating`,
DATE(a.`created-at`)
""".format(period_end_d_minus180, period_end_d)
nps_q = nps_q.replace('`', '"')
logger.info(nps_q)
data_nps = rs_db.get_df(query=nps_q)
data_nps.columns = [c.replace('-', '_') for c in data_nps.columns]
logger.info(len(data_nps))
logger.info("NPS data fetched with length {}".format(len(data_nps)))
data_nps['feedback_date'] = pd.to_datetime(data_nps['feedback_date'])
data_nps['detractor_flag'] = np.where(((data_nps['feedback_date'] <= '2019-10-23') & (data_nps['rating'] <= 6))
| (data_nps['rating'] <= 3), 1, 0)
# NPS detractors only
data_nps_d = data_nps[data_nps['detractor_flag'] == 1]
data_lost_nps_tmp = data_lost.merge(data_nps_d[['patient_id', 'rating', 'feedback_date', 'detractor_flag']],
how='left', on=['patient_id'])
data_lost_nps_tmp['nps_date_diff'] = (data_lost_nps_tmp['feedback_date'] - data_lost_nps_tmp['bill_date']).dt.days
# To be lost, NPS should be on or after churn event bill date
data_lost_nps_tmp = data_lost_nps_tmp[data_lost_nps_tmp['nps_date_diff'] >= 0]
# But should be less than or equal to loss attributed day
data_lost_nps_tmp = data_lost_nps_tmp[data_lost_nps_tmp['feedback_date']
<= data_lost_nps_tmp['lost_attribution_date']]
data_lost_nps = data_lost_nps_tmp.drop_duplicates(subset=['patient_id', 'bill_id'])
data_lost_nps = data_lost_nps[['patient_id', 'bill_id', 'rating', 'feedback_date', 'nps_date_diff']].copy()
data_lost_nps['nps_detractor_lost'] = 1
# Merge with main data
data_lost = data_lost.merge(data_lost_nps, how='left', on=['patient_id', 'bill_id'])
data_lost['nps_detractor_lost'] = data_lost['nps_detractor_lost'].fillna(0)
logger.info("Lost data length after joining with NPS Lost status {}".format(len(data_lost)))
##################################
# Customer returns
##################################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
return_q = """
SELECT
a.`bill-id`
FROM
`customer-return-items-1` a
INNER JOIN `bills-1` b
ON a.`bill-id` = b.`id`
WHERE
date(b.`created-at`) > '{0}'
AND date(b.`created-at`) <= '{1}'
GROUP BY
a.`bill-id`
""".format(period_end_d_minus180, period_end_d)
return_q = return_q.replace('`', '"')
logger.info(return_q)
data_return = rs_db.get_df(query=return_q)
data_return.columns = [c.replace('-', '_') for c in data_return.columns]
logger.info(len(data_return))
data_return['return_flag'] = 1
# Merge with main data
data_lost = data_lost.merge(data_return, how='left', on=['bill_id'])
data_lost['return_flag'] = data_lost['return_flag'].fillna(0)
logger.info("Lost data length after joining with Customer returns data {}".format(len(data_lost)))
#############################
# Expiry items
############################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
expiry_q = """
SELECT
a.`bill-id`,
a.`inventory-id`,
b.`expiry`
FROM
`bill-items-1` a
LEFT JOIN
`inventory-1` b
on a.`inventory-id` = b.`id`
WHERE
date(a.`created-at`) > '{0}'
AND date(a.`created-at`) <= '{1}'
""".format(period_end_d_minus180, period_end_d)
expiry_q = expiry_q.replace('`', '"')
# logger.info(expiry_q)
data_expiry = rs_db.get_df(query=expiry_q)
data_expiry.columns = [c.replace('-', '_') for c in data_expiry.columns]
logger.info(len(data_expiry))
logger.info("Bill item data with inventory id - fetched with length {}".format(len(data_expiry)))
data_expiry['expiry'] = pd.to_datetime(data_expiry['expiry'], errors='coerce')
# Merge and calculate
data_lost_inv = data_lost[['bill_id', 'bill_date']].merge(data_expiry, how='left', on=['bill_id'])
data_lost_inv['expiry_days'] = (data_lost_inv['expiry'] - data_lost_inv['bill_date']).dt.days
data_lost_inv['expiry_less_6m'] = np.where(data_lost_inv['expiry_days'] < 180, 1, 0)
data_lost_inv_grp = data_lost_inv.groupby(['bill_id']).agg(
{'inventory_id': 'count', 'expiry_less_6m': 'sum'}).reset_index()
data_lost_inv_grp = data_lost_inv_grp.rename(columns={'inventory_id': 'items'})
data_lost_inv_grp['expiry_less_6m_pc'] = data_lost_inv_grp['expiry_less_6m'] / data_lost_inv_grp['items']
data_lost_inv_grp['near_expiry_flag'] = np.where(data_lost_inv_grp['expiry_less_6m_pc'] >= 0.5, 1, 0)
##############################
# Merge with main data
##############################
data_lost = data_lost.merge(data_lost_inv_grp, how='left', on=['bill_id'])
data_lost['near_expiry_flag'] = data_lost['near_expiry_flag'].fillna(0)
logger.info("Lost data length after joining with Expiry items data {}".format(len(data_lost)))
# Churn event exact
def churn_event(row):
if row['nps_detractor_lost'] == 1:
return 'NPS Lost'
elif row['pr_lost'] == 1:
return 'PR Lost'
elif row['pr_72hrs_delay'] == 1:
return 'PR Delayed'
elif row['hd_24hrs_delay'] == 1:
return 'HD Delayed'
elif row['return_flag'] == 1:
return 'Items returned'
elif row['near_expiry_flag'] == 1:
return 'Near expiry items'
else:
return 'Not known'
data_lost['churn_event'] = data_lost.apply(lambda row: churn_event(row), axis=1)
# DB upload columns
final_cols = ['patient_id', 'store_id', 'bill_id', 'bill_created_at', 'bill_date',
'year_bill', 'month_bill', 'nob_till_bill', 'next_bill_date', 'day_diff_next_bill',
'day_diff_today', 'lost_event_flag',
'bill_date_plus90', 'lost_attribution_date', 'overall_min_bill_date', 'normalized_date',
'month_diff_acq', 'total_spend', 'spend_till_bill', 'average_bill_value',
'pr_72hrs_delay', 'min_created_at', 'max_completed_at', 'lost_pr_date_diff', 'pr_lost',
'hd_24hrs_delay', 'rating', 'feedback_date', 'nps_date_diff', 'nps_detractor_lost',
'return_flag', 'items', 'expiry_less_6m', 'expiry_less_6m_pc', 'near_expiry_flag',
'churn_event', 'latest_is_repeatable', 'latest_is_generic', 'latest_hd_flag',
'primary_disease', 'value_segment_calc_date', 'value_segment', 'behaviour_segment_calc_date',
'behaviour_segment', 'store_name', 'store_opened_at', 'abo']
data_export = data_lost[final_cols]
# For redshift specific
# Convert int columns to int
for i in ['bill_id', 'lost_event_flag', 'pr_72hrs_delay', 'pr_lost',
'hd_24hrs_delay', 'nps_detractor_lost', 'return_flag',
'expiry_less_6m', 'near_expiry_flag']:
data_export[i] = data_export[i].fillna(0).astype(int)
# Impute for Nulls
# Impute 99999 instead of null, for now
# Todo change dtype to float in DDL
# month_diff_acq was added because of float vs integer mismatch in database writing
for i in ['day_diff_next_bill', 'lost_pr_date_diff', 'nps_date_diff', 'month_diff_acq',
'rating']:
data_export[i] = data_export[i].fillna(99999).astype(int)
for i in ['bill_date', 'overall_min_bill_date', 'normalized_date',
'next_bill_date', 'bill_date_plus90', 'lost_attribution_date',
'value_segment_calc_date', 'behaviour_segment_calc_date',
'feedback_date']:
data_export[i] = pd.to_datetime(data_export[i]).dt.date
logger.info(data_export.columns)
################################
# DB WRITE
###############################
write_schema = 'prod2-generico'
write_table_name = 'car-churn'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id', 'created-at', 'updated-at'])]
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# Mandatory lines
data_export['created-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['created-by'] = 'etl-automation'
data_export['updated-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['updated-by'] = 'etl-automation'
# Truncate and append
rs_db_write.execute(f"set search_path to '{write_schema}'", params=None)
truncate_q = f"""
DELETE FROM
"{write_table_name}"
WHERE
"bill-date" > '{period_end_d_minus180}'
AND "bill-date" <= '{period_end_d}'
"""
rs_db_write.execute(truncate_q)
# Write to DB
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/car-churn/car-churn.py | car-churn.py |
import argparse
import os
import sys
from zeno_etl_libs.db.db import DB
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
db = DB(read_only=False)
db.open_connection()
tables_meta = {
"acm": {
"pk": ["code", "Slcd"],
"full_dump": 1
},
"item": {
"pk": ["code"],
"full_dump": 1
},
"fifo": {
"pk": ["psrlno"],
"full_dump": 1
},
"salepurchase1": {
"pk": ["vtyp", "vdt", "vno", "subvno"],
"full_dump": 1
},
"salepurchase2": {
"pk": ["Vtype", "Vdt", "Vno", "Itemc", "Psrlno", "srlno"],
"full_dump": 1
},
"master": {
"pk": ["code"],
"full_dump": 1
},
"billtrackmst": {
"pk": ["srl"],
"full_dump": 1
},
"company": {
"pk": ["code"],
"full_dump": 1
},
"acmextra": {
"pk": ["Code", "slcd"],
"full_dump": 1
},
"dispatchstmt": {
"pk": ["TagNo", "TagDt", "Vdt", "Vtype", "Vno"],
"full_dump": 1
},
"app-sp2upd": {
"pk": ["Vtype", "Vdt", "Vno", "Itemc", "Psrlno", "NewPsrlno", "PorderNo"]
},
"acknow": {
"pk": ["vtype", "Vno", "Srl"],
"full_dump": 1
},
"proofsp1": {
"pk": ["Vtype", "Vdt", "Vno"]
},
"proofsp2": {
"pk": ["Vtype", "Vdt", "Vno", "Ordno", "Itemc"]
},
"porder": {
"pk": ["Ordno", "acno", "itemc"],
"full_dump": 1
},
"porderupd": {
"pk": ["Ordno", "acno", "itemc"]
},
"salt": {
"pk": ["code"],
"full_dump": 1
},
"billost": {
"pk": ["Vtype", "vdt", "Vno", "SubVno"],
"full_dump": 1
},
"rcptpymt": {
"pk": ["vtype", "vdt", "vno"],
"full_dump": 1
},
"app-sp2": {
"pk": ["Vtype", "Vdt", "Vno", "Itemc", "Psrlno", "NewPsrlno", "PorderNo"],
"full_dump": 1
},
"adjstmnt": {
"pk": ["vtype", "vdt", "vno", "avtype", "avdt", "avno", "amount", "Srlno", "SubVno", "RefNo"],
"full_dump": 1
}
}
""" Since s3 lists everything in asc order so processing on First Come basic """
# list all the date folder on s3
bucket_name = 'generico-node-internal'
schema = "prod2-generico"
s3 = S3(bucket_name=bucket_name)
env_folder = "production" if env == "prod" else "staging"
date_list_response = s3.s3_client.list_objects_v2(
Bucket=bucket_name,
Delimiter='/',
MaxKeys=100,
Prefix=f"wms/data-sync-to-s3/non-processed/{env_folder}/"
)
for date_data in date_list_response['CommonPrefixes']:
file_list_response = s3.s3_client.list_objects_v2(
Bucket='generico-node-internal',
Delimiter='/',
MaxKeys=100,
Prefix=date_data['Prefix']
)
for file in file_list_response['Contents']:
key = file['Key']
file_name = key.split("/")[-1]
table_name = file_name.split("_")[0].lower()
temp_table_name = f"temp-{table_name}"
file_s3_uri = f"s3://{bucket_name}/{key}"
if table_name in [t.lower() for t in tables_meta.keys()]:
logger.info(f"Syncing key: {key}")
try:
""" create temp table """
create_temp_table_query = f"""
create temp table IF NOT EXISTS "{temp_table_name}" (like "{schema}"."{table_name}");
"""
db.execute(query=create_temp_table_query)
logger.info(f"Created temp table: {temp_table_name}")
""" insert data into the temp table """
s3.write_to_db_from_s3_csv(table_name=temp_table_name, file_s3_uri=file_s3_uri, db=db)
logger.info(f"Inserted data in temp table: {temp_table_name}")
""" delete the common data between the temp and original table """
if tables_meta[table_name].get("full_dump"):
delete_common_data_query = f""" DELETE FROM "{schema}"."{table_name}" ; """
else:
filter_list = []
for pk in tables_meta[table_name]['pk']:
_pk = pk.lower()
filter_list.append(f""" "{schema}"."{table_name}"."{_pk}" = source."{_pk}" """)
filter_str = " and ".join(filter_list)
delete_common_data_query = f"""
DELETE FROM
"{schema}"."{table_name}"
USING
"{temp_table_name}" source
WHERE
{filter_str};
"""
db.execute(query=delete_common_data_query)
logger.info(f"Deleted old data from target table: {table_name}")
""" Insert the new data """
insert_query = f"""
insert into "{schema}"."{table_name}" select * from "{temp_table_name}"
"""
db.execute(query=insert_query)
logger.info(f"Inserted new data in target table: {table_name}")
""" clear the temp table """
clear_temp_table_query = f"""
delete from "{temp_table_name}"
"""
db.execute(query=clear_temp_table_query)
logger.info(f"Clearing temp table for next round: {temp_table_name}")
""" move the file to processed folder from non-processed """
target_key = key.replace("non-processed", "processed")
s3.move_s3_obj(source=f"/{bucket_name}/{key}", target_key=target_key)
logger.info(f"Moved file to processed folder: {target_key}")
except Exception as e:
logger.exception(e) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sync-warehouse-s3-data-to-redshift/sync-warehouse-s3-data-to-redshift.py | sync-warehouse-s3-data-to-redshift.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
from zeno_etl_libs.helper.email.email import Email
import numpy as np
import Levenshtein as lev
from datetime import timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--full_run', default=0, type=int, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
email_to = args.email_to
env = args.env
full_run = args.full_run
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB()
rs_db_write = DB(read_only=False)
rs_db.open_connection()
rs_db_write.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'ecomm-playstore-patients'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
# max of data
playstore_q = """
select
max("review-created-at") max_exp
from
"prod2-generico"."ecomm-playstore-reviews"
"""
max_exp_date = rs_db.get_df(playstore_q)
max_exp_date['max_exp'].fillna(np.nan, inplace=True)
print(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
print(max_exp_date)
# params
if full_run or max_exp_date == 'NaN':
start = '2017-05-13'
else:
start = max_exp_date
start = dateutil.parser.parse(start)
print(start)
startminus7 = start - timedelta(days=7)
startminus14 = start - timedelta(days=14)
q = f"""
select
"review-id",
"review",
"author-name",
"review-created-at",
"star-rating"
from
"prod2-generico"."ecomm-playstore-reviews"
where
date("review-created-at")> '{startminus7}'
"""
reviews = rs_db.get_df(q)
print(reviews)
reviews.columns = [c.replace('-', '_') for c in reviews.columns]
reviews['review_created_at'] = pd.to_datetime(reviews['review_created_at'])
reviews['review_day_pre7'] = reviews['review_created_at'] - pd.DateOffset(days=7)
zeno_q = f"""
select
zo.id as zeno_order_id_before_review ,
zo."patient-id" ,
zo."created-at" as order_created_at,
p.phone,
p."name" as "matched-name"
from
"prod2-generico"."zeno-order" zo
left join "prod2-generico".patients p on
zo."patient-id" = p.id
where
date(zo."created-at") > '{startminus14}'
and p."name" is not null
"""
zeno_orders = rs_db.get_df(zeno_q)
reviews['i'] = 1
zeno_orders['i'] = 1
merged_df = pd.merge(reviews, zeno_orders, how='outer', on='i')
merged_df['author_name'] = merged_df['author_name'].str.lower()
merged_df['matched-name'] = merged_df['matched-name'].str.lower()
merged_df['lev_ratio'] = merged_df.apply(lambda row: lev.ratio(row['author_name'], row['matched-name']), 1)
merged_df['rank_order'] = merged_df.sort_values(['zeno_order_id_before_review'], ascending=[False]) \
.groupby(['review_id', 'matched-name']) \
.cumcount() + 1
latest_order = merged_df[(merged_df['rank_order'] == 1)]
latest_order.columns
latest_order['top_3_matches'] = latest_order.sort_values(['lev_ratio'], ascending=[False]).groupby(['review_id']) \
.cumcount() + 1
latest_order = latest_order[(latest_order['top_3_matches'] <= 3)]
latest_order = latest_order.sort_values(['star_rating', 'review_id', 'top_3_matches']
, ascending=[True, True, True])
latest_order.columns = [c.replace('_', '-') for c in latest_order.columns]
latest_order_data = latest_order[['review-id', 'review', 'star-rating', 'review-created-at', 'author-name',
'matched-name', 'zeno-order-id-before-review', 'patient-id'
, 'order-created-at', 'phone', 'lev-ratio']]
latest_order_mail = latest_order[['review-id', 'review', 'star-rating', 'review-created-at', 'author-name',
'matched-name', 'zeno-order-id-before-review', 'patient-id'
, 'order-created-at']]
# etl
latest_order_data['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
latest_order_data['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
latest_order_data['created-by'] = 'etl-automation'
latest_order_data['updated-by'] = 'etl-automation'
latest_order_data=latest_order_data[(pd.to_datetime(latest_order_data['review-created-at']) > start)]
latest_order_mail=latest_order_mail[(pd.to_datetime(latest_order_mail['review-created-at']) > start)]
if latest_order_mail.empty:
print('DataFrame is empty!')
exit()
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
print(start)
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where DATE("review-created-at") >'{start}' '''
print(truncate_query)
rs_db_write.execute(truncate_query)
s3.write_df_to_db(df=latest_order_data[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
file_name = 'Zeno_playstore.xlsx'
file_path = s3.write_df_to_excel(data={'Zeno Playstore': latest_order_mail}, file_name=file_name)
email = Email()
# file_path ='/Users/Lenovo/Downloads/utter.csv'
email.send_email_file(subject="Zeno Playstore",
mail_body='Zeno Playstore',
to_emails=email_to, file_uris=[], file_paths=[file_path])
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm-playstore-patients/ecomm-playstore-patients.py | ecomm-playstore-patients.py |
import os
import sys
import argparse
import numpy as np
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB, MySQL
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-d', '--default_size', default=1024, type=int, required=False)
parser.add_argument('-m', '--mail_list', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
default_size = args.default_size
mail_list = args.mail_list
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
mysql = MySQL(read_only=False)
mysql.open_connection()
email = Email()
SCHEMA = "prod2-generico"
TABLE_NAME = "table-size-config"
try:
QUERY = """SELECT TRIM(pgdb.datname) AS Database,
TRIM(a.name) AS Table,
((b.mbytes/part.total::decimal)*100)::decimal(5,2) AS pct_of_total,
b.mbytes,
b.unsorted_mbytes
FROM stv_tbl_perm a
JOIN pg_database AS pgdb
ON pgdb.oid = a.db_id
JOIN ( SELECT tbl,
SUM( DECODE(unsorted, 1, 1, 0)) AS unsorted_mbytes,
COUNT(*) AS mbytes
FROM stv_blocklist
GROUP BY tbl ) AS b
ON a.id = b.tbl
JOIN ( SELECT SUM(capacity) AS total
FROM stv_partitions
WHERE part_begin = 0 ) AS part
ON 1 = 1
WHERE a.slice = 0
ORDER BY 4 desc, db_id, name
"""
df_rs_size = rs_db.get_df(query=QUERY)
table_size_conf = f"""select * from "{SCHEMA}"."{TABLE_NAME}";"""
df_table_size_conf = rs_db.get_df(query=table_size_conf)
df_imp_tables = pd.merge(df_table_size_conf, df_rs_size,
left_on="table-name", right_on="table", how="inner")
df_imp_tables = df_imp_tables.loc[df_imp_tables['mbytes'] >
df_imp_tables['default-size'].astype(int)]
df_imp_tables = df_imp_tables.drop(['id', 'schema', 'table-name',
'database', 'unsorted_mbytes'], axis=1)
df_imp_tables = df_imp_tables[['table', 'pct_of_total', 'mbytes', 'default-size']]
df_other_tables = pd.merge(df_table_size_conf,
df_rs_size, left_on="table-name", right_on="table", how="right")
df_other_tables_filtered = df_other_tables.loc[df_other_tables[
'table-name'].isin([np.NaN, None])]
df_other_tables_filtered = df_other_tables_filtered[
df_other_tables_filtered['mbytes'] > 1024]
df_other_tables_filtered = df_other_tables_filtered.drop([
'id', 'schema', 'table-name', 'database',
'unsorted_mbytes'], axis=1)
df_other_tables_filtered['default-size'] = default_size
df_other_tables_filtered = df_other_tables_filtered[
['table', 'pct_of_total', 'mbytes', 'default-size']]
final_df = pd.concat([df_imp_tables, df_other_tables_filtered])
final_df.columns = ['table_name', '%_of_total_RS',
'Actual_size_MB', 'default_size_configured_MB']
QUERY = """
show tables;
"""
df_mysql_source = pd.read_sql_query(con=mysql.connection, sql=QUERY)
final_df = pd.merge(final_df, df_mysql_source, left_on='table_name',
right_on='Tables_in_prod2-generico', how='outer', indicator=True)\
.query("_merge != 'both'").drop('_merge', axis=1).reset_index(drop=True)
final_df = final_df.drop(['Tables_in_prod2-generico'], axis=1).dropna().sort_values('Actual_size_MB', ascending=False)
file_path = '/tmp/output.csv'
final_df.to_csv(file_path)
email.send_email_file(subject='[Alert] List of ETL tables exceeding size limit',
mail_body="list of tables exceeding"
" default size defined are as attached \n",
to_emails=mail_list,
file_paths=[file_path])
except Exception as error:
raise Exception from error
finally:
rs_db.close_connection()
mysql.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/table_size_config/table-size-config.py | table-size-config.py |
import argparse
import os
import sys
sys.path.append('../../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from dateutil.relativedelta import relativedelta
import pandas as pd
import numpy as np
import datetime
from dateutil.tz import gettz
from zeno_etl_libs.db.db import DB, PostGreWrite
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'sc-metrics'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
date1= (datetime.datetime.today() + relativedelta(months=-1)).replace(day=1).strftime('%Y-%m-%d')
date2= (datetime.datetime.today() + relativedelta(days=-1)).strftime('%Y-%m-%d')
# =============================================================================
# Importing all stores with opening date
# =============================================================================
sm = """
select
sm.id as "store-id"
from
"prod2-generico"."stores-master" sm
inner join "prod2-generico"."stores" s on
s.id = sm.id
where
date(sm."opened-at") != '0101-01-01'
and s."is-active" = 1
group by
sm.id;
"""
sm_data = rs_db.get_df(sm)
sm_data.columns = [c.replace('-', '_') for c in sm_data.columns]
sm_data['join']='A'
# =============================================================================
# Date range explode
# =============================================================================
d_date = pd.DataFrame({'join':['A']})
#d_date['join']='A'
d_date['start_date']= date1
d_date['end_date']= date2
d_date['date'] = [pd.date_range(s, e, freq='d') for s, e in
zip(pd.to_datetime(d_date['start_date']),
pd.to_datetime(d_date['end_date']))]
#d_date = d_date.explode('date')
d_date = pd.DataFrame({'date': np.concatenate(d_date.date.values)})
d_date['join']='A'
#d_date.drop(['start_date','end_date'],axis=1,inplace=True)
d_date['date'] = d_date['date'].astype('str')
m_data = pd.merge(left=sm_data,right=d_date,on=['join'],how='inner')
m_data.drop('join',axis=1,inplace=True)
# =============================================================================
# AS PR received TAT
# =============================================================================
as_pr = f"""
select
"store-id" ,
date("received-at") as "date",
avg( case when (sb."auto-short" = 0 AND sb."auto-generated" = 0 AND sb."status" NOT IN ('deleted')) then (datediff('hour', sd."store-delivered-at", sb."received-at")) end) as "pr_received_tat",
avg( case when (sb."auto-short" = 1 and sb."home-delivery" = 0 and sb."patient-id" = 4480 and sb."status" NOT IN ('deleted')) then (datediff('hour', sd."store-delivered-at", sb."received-at")) end) as "as_received_tat"
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico"."store-delivered" sd on
sd.id = sb.id
where
date("received-at")>= '{date1}'
and date("received-at")<= '{date2}'
group by
"store-id" ,
date("received-at");
"""
as_pr_tat = rs_db.get_df(as_pr)
as_pr_tat.columns = [c.replace('-', '_') for c in as_pr_tat.columns]
as_pr_tat['date'] = as_pr_tat['date'].astype('str')
m_data = pd.merge(left=m_data,right=as_pr_tat,how='left',on=['store_id','date'])
# =============================================================================
# Audit Loss
# =============================================================================
a_loss = f"""
select
date(a."created-at") as "date",
a."store-id",
sum(aps."actual-quantity" * aps."final-ptr") as "actual-value",
sum((aps."actual-quantity"-(case when aps."accounted-quantity">aps."actual-quantity" then aps."actual-quantity" else aps."accounted-quantity" end )-aps."corrected-qty")* aps."final-ptr") as "accounted-value",
sum((aps."actual-quantity"-(aps."accounted-quantity")-aps."corrected-qty")* aps."final-ptr") as "billing-page-value",
sum(case when (aps."actual-quantity"-aps."accounted-quantity")<0 and aps."correction-requested-qty">0 then 1 else 0 end) as "merchandizing-issue"
from
"prod2-generico"."audits" a
left join "prod2-generico"."audit-process-sku" aps
on
a.id = aps."audit-id"
where
date(a."created-at") >= '{date1}'
and date(a."created-at") <= '{date2}'
group by 1,2
;
"""
audit_loss = rs_db.get_df(a_loss)
audit_loss.columns = [c.replace('-', '_') for c in audit_loss.columns]
audit_loss['date'] = audit_loss['date'].astype('str')
m_data = pd.merge(left=m_data,right=audit_loss,on=['store_id','date'],how='left')
# =============================================================================
# LP Liquidation + LP PR PCT
# =============================================================================
lp = f"""
select
lp."store-id" ,
lp."received-date" as "date",
sum(lp."lp-sales-sum") as "lp-sales-sum",
sum(lp."lp-value") as "lp-value",
sum(s."lp_pr_sales") as "lp_pr_sales"
from
(
select
lp."store-id" ,
lp."received-date",
sum(lp."lp-sales-sum") as "lp-sales-sum",
sum(lp."lp-value-sum") as "lp-value"
from
"prod2-generico"."lp-liquidation" lp
where
date(lp."received-date")>= '{date1}'
and date(lp."received-date")<= '{date2}'
group by
lp."store-id" ,
date(lp."received-date")) lp
inner join (
select
"store-id" ,
"created-date",
sum(case when "pr-flag" = true then "revenue-value" end) as "lp_pr_sales"
from
"prod2-generico"."sales"
where
date("created-at")>= '{date1}'
and date("created-at")<= '{date2}'
group by
1,
2) s on
s."store-id" = lp."store-id"
and s."created-date" = lp."received-date"
where
date(lp."received-date")>= '{date1}'
and date(lp."received-date")<= '{date2}'
group by
lp."store-id" ,
date(lp."received-date");
"""
lp_liq = rs_db.get_df(lp)
lp_liq.columns = [c.replace('-', '_') for c in lp_liq.columns]
lp_liq['date'] = lp_liq['date'].astype('str')
m_data = pd.merge(left=m_data,right=lp_liq,on=['store_id','date'],how='left')
# =============================================================================
# OOS less than min + STore level OOS
# =============================================================================
oos = f"""
select
oos."closing-date" as "date",
oos."store-id" ,
sum( case when oos."bucket" in ('AW', 'AX', 'AY') and oos."oos-min-count" = 0 then oos."drug-count" end) as min_count_oos_ax,
sum(case when oos."bucket" in ('AW', 'AX', 'AY') then oos."drug-count" end) as "total_drug_count_oos_ax",
sum(case when oos."oos-min-count" = 0 and d."company-id" = 6984 then oos."drug-count" end) as "goodaid_min_count",
sum(case when d."company-id" = 6984 then oos."drug-count" end) as "goodaid_total_count",
sum(oos."drug-count") as "total_drug_count_oos",
sum(oos."oos-count") as "total_oos_drug_count_oos"
from
"prod2-generico"."out-of-shelf-drug-level" oos
inner join "prod2-generico"."drugs" d on
oos."drug-id" = d."id"
where
oos."max-set" = 'Y'
and oos."mature-flag" = 'Y'
and date(oos."closing-date") >='{date1}'
and date(oos."closing-date") <='{date2}'
group by
1,
2;
"""
oos_data = rs_db.get_df(oos)
oos_data.columns = [c.replace('-', '_') for c in oos_data.columns]
oos_data['date'] = oos_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=oos_data,on=['store_id','date'],how='left')
# =============================================================================
# Feedback rating and bill pct
# =============================================================================
fb = f"""
select
date(b."created-at") as "date",
b."store-id",
count(distinct case when f.rating is not null then b.id end)* 1.0 / count(distinct b.id)* 1.0 as "feedback-bills-pct",
NVL(count(distinct case when f.rating in (1, 2) then b.id end),
0) as "flag-rating",
count(distinct case when f.rating is not null then b.id end) as "feedback_bills"
from
"prod2-generico"."bills-1" b
left join "prod2-generico"."feedback" f on
f."bill-id" = b.id
where date(b."created-at") >= '{date1}'
and date(b."created-at") <= '{date2}'
group by
date(b."created-at") ,
b."store-id";
"""
fb_data = rs_db.get_df(fb)
fb_data.columns = [c.replace('-', '_') for c in fb_data.columns]
fb_data['date'] = fb_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=fb_data,on=['store_id','date'],how='left')
# =============================================================================
# Sales related Metric
# =============================================================================
sd = f"""
select
"store-id",
"created-date" as "date",
sum(case when "bill-flag" = 'gross' and "substitution-status" = 'substituted' then quantity end) as "subs_num",
NVL(sum(case when "bill-flag" = 'gross' and "substitution-status" in ('substituted', 'not-substituted') then quantity end),1) as "subs_den",
sum(case when "bill-flag" = 'gross' and "substitution-status" = 'substituted' and "hd-flag" = True then quantity end) as "hd_subs_num",
NVL(sum(case when "bill-flag" = 'gross' and "substitution-status" in ('substituted', 'not-substituted') and "hd-flag" = True then quantity end),1) as "hd_subs_den",
sum(case when "bill-flag" = 'gross' and "substitution-status-g" = 'ga-substituted' and "goodaid-availablity-flag"='available' then quantity end) as "ga_subs_num",
NVL(sum(case when "bill-flag" = 'gross' and "goodaid-availablity-flag"='available' and "substitution-status" in ('ga-substituted', 'substituted', 'not-substituted') then quantity end),1) as "ga_subs_den",
sum(case when "bill-flag" = 'return' then "revenue-value" end) as "return-value",
sum(case when "bill-flag" = 'gross' then "revenue-value" end) as "gross-revennue",
count(distinct case when "promo-code" = 'BOGO' and "bill-flag" = 'gross' then "bill-id" end) as "bogo-bills",
sum("revenue-value") as revenue,
sum(case when "pr-flag" =True then "revenue-value" end) as "pr_sales",
sum(case when "hd-flag" =True then "revenue-value" end) as "hd_sales",
sum(case when "company-id" =6984 then "revenue-value" end) as "goodaid_sales",
sum(case when "ecom-flag" =True then "revenue-value" end) as "ecomm_sales",
sum(case when "type" ='generic' then "revenue-value" end) as "generic_sales",
count(DISTINCT case when "hd-flag" =True and "bill-flag" = 'gross' then "bill-id" end) as "hd_bills",
count(distinct case when "bill-flag" = 'gross' then "bill-id" end) as "NOB",
sum(case when "bill-flag" = 'gross' then "revenue-value" end)*1.0/NVL(count(distinct case when "bill-flag" = 'gross' then "bill-id" end),1)*1.0 as "ABV"
from
"prod2-generico"."sales"
where "created-date">='{date1}'
and "created-date"<='{date2}'
group by
"store-id" ,
"created-date";
"""
sales_data = rs_db.get_df(sd)
sales_data.columns = [c.replace('-', '_') for c in sales_data.columns]
sales_data['date'] = sales_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=sales_data,on=['store_id','date'],how='left')
# =============================================================================
# Missed Call info
# =============================================================================
msc = f"""
SELECT
scle."store-id",
date(scle."date-time") as "date",
count(case when scle."call-type" = 'MISSED' then scle."call-type" end) as "missed_calls",
count(scle."call-type") as "total_received_calls"
FROM
"prod2-generico"."store-call-logs-entries" scle
where
scle."call-type" in ('INCOMING',
'MISSED')
and date(scle."date-time") >= '{date1}'
and date(scle."date-time") <= '{date2}'
group by
scle."store-id",
date(scle."date-time");
"""
missed_call = rs_db.get_df(msc)
missed_call.columns = [c.replace('-', '_') for c in missed_call.columns]
missed_call['date'] = missed_call['date'].astype('str')
m_data = pd.merge(left=m_data,right=missed_call,on=['store_id','date'],how='left')
# =============================================================================
# Calling dashboard
# =============================================================================
call = f"""
select
cd."store-id" ,
date(cd."created-at") as "date",
count(distinct cd.id) as "target_calls",
count(distinct case when ch.id is not null then cd.id end) as "actual_calls",
count(distinct case when cd."backlog-days-count">0 then cd.id end) as "backlog_days_flag"
from
"prod2-generico"."calling-dashboard" cd
left join "prod2-generico"."calling-history" ch on
cd.id = ch."calling-dashboard-id"
where
date(cd."created-at")>= '{date1}'
and date(cd."created-at")<= '{date2}'
group by
cd."store-id" ,
date(cd."created-at");
"""
calling_data = rs_db.get_df(call)
calling_data.columns = [c.replace('-', '_') for c in calling_data.columns]
calling_data['date'] = calling_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=calling_data,on=['store_id','date'],how='left')
# =============================================================================
# NPI
# =============================================================================
npi = f"""
select
nrt."store-id" ,
date(nrt."store-return-created-at") as "date",
avg(DATEDIFF ('h', nrt."npi-added-in-store-at", nrt."check-created-at" )) as "hours-to-start-scanning",
avg(DATEDIFF ('h', nrt."npi-added-in-store-at", nrt."store-return-created-at" )) as "hours-to-mark-store-return"
from
"prod2-generico"."npi-returns-tracking" nrt
where date(nrt."store-return-created-at")>='{date1}'
and date(nrt."store-return-created-at")<= '{date2}'
group by
nrt."store-id",
date(nrt."store-return-created-at");
"""
npi_data = rs_db.get_df(npi)
npi_data.columns = [c.replace('-', '_') for c in npi_data.columns]
npi_data['date'] = npi_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=npi_data,on=['store_id','date'],how='left')
# =============================================================================
# Cluster FF
# =============================================================================
cff = f"""
Select
date(pso."created-at") AS "date",
-- PSO Created at
pstm."from-store-id" AS "store-id" ,
--pstm."to-store-id" AS "destination-store-id",
-- max(c.id) as "cluster-id" ,
-- max(sos."name") AS "source_store",
-- max(des."name") AS "destination_store",
-- max(pstm."item-quantity") AS "to-be-transferred-qty",
-- SUM(sti."quantity") as "actual-transferred-qty",
-- pso."status" as "pso-status",
-- pstm."status" AS "tn-status",
-- st."status" AS "st-status",
-- pso."drug-id" ,
-- pso."drug-name" ,
-- max(pstm.id) AS "pstm-id",
-- max(pstm."is-active") as "is-active",
avg(DATEDIFF ('h', pstm."created-at", st."initiated-at" )) as "hrs_cluster_order_ready_for_pickup",
avg(DATEDIFF ('h', pstm."created-at", st."transferred-at" )) as "hrs_cluster_biker_picked_up_order",
avg(DATEDIFF ('h', pstm."created-at", st."received-at" )) as "hrs_cluster_store_received_order"
-- PSO Created at
FROM "prod2-generico"."pso-stock-transfer-mapping" pstm
LEFT JOIN "prod2-generico"."stock-transfers-1" st on
pstm."stock-transfer-id" = st.id
Left JOIN "prod2-generico"."pso-stock-transfer-inventory-mapping" pstim ON
pstm.id = pstim."pso-stock-transfer-mapping-id"
LEFT JOIN "prod2-generico"."stock-transfer-items-1" sti ON
pstim."inventory-id" = sti."inventory-id"
AND st.id = sti."transfer-id"
Left join "prod2-generico"."patients-store-orders" pso ON
pstm."patient-store-order-id" = pso.id
left join "prod2-generico"."store-clusters" sc on
pstm."from-store-id" = sc."store-id"
left join "prod2-generico".stores sos on
pstm."from-store-id" = sos.id
left join "prod2-generico".stores des on
pstm."to-store-id" = des.id
inner join "prod2-generico".clusters c on
sc."cluster-id" = c.id
and sc."is-active" = 1
WHERE
sc."cluster-id" is not null
AND date(pso."created-at") >= '{date1}'
and date(pso."created-at") <= '{date2}'
GROUP BY pstm."from-store-id",
date(pso."created-at");
"""
cluster_data = rs_db.get_df(cff)
cluster_data.columns = [c.replace('-', '_') for c in cluster_data.columns]
cluster_data['date'] = cluster_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=cluster_data,on=['store_id','date'],how='left')
# =============================================================================
# as - pr otif
# =============================================================================
aspr = f"""
select
"store-id" ,
date("created-at") as "date",
sum(case
when "as-ms-pr" = 'as' then "requested-quantity"
end) as "as_requested_qty",
sum(case
when "as-ms-pr" = 'pr' then "requested-quantity"
end) as "pr_requested_qty",
sum(case
when "as-ms-pr" = 'as'
and date("store-delivered-at")!= '0101-01-01'
and "store-delivered-at" < "delivery-tat" then "requested-quantity"
end) as "as_otif_qty",
sum(case
when "as-ms-pr" = 'pr'
and date("store-delivered-at")!= '0101-01-01'
and "store-delivered-at" < "delivery-tat" then "requested-quantity"
end) as "pr_otif_qty"
from
"prod2-generico"."sb-sla"
where date("created-at")>='{date1}'
and date("created-at")<= '{date2}'
group by "store-id" ,date("created-at");
"""
aspr_data = rs_db.get_df(aspr)
aspr_data.columns = [c.replace('-', '_') for c in aspr_data.columns]
aspr_data['date'] = aspr_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=aspr_data,on=['store_id','date'],how='left')
# =============================================================================
# store opening closing
# =============================================================================
s_date = f"""
select
date("created-at") as "date",
"store-id" ,
min("created-at") as "first_search",
max("created-at") as "last_search"
from
"prod2-generico"."searches"
group by
date("created-at"),
"store-id";
"""
opening_data = rs_db.get_df(s_date)
opening_data.columns = [c.replace('-', '_') for c in opening_data.columns]
opening_data['date'] = opening_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=opening_data,on=['store_id','date'],how='left')
# =============================================================================
# store info
# =============================================================================
s_info = f"""
select
id as "store-id",
store ,
"line-manager" ,
abo ,
city ,
"franchisee-name",
acquired ,
"cluster-name" ,
"old-new-static"
from
"prod2-generico"."stores-master";
"""
store_info = rs_db.get_df(s_info)
store_info.columns = [c.replace('-', '_') for c in store_info.columns]
m_data = pd.merge(left=m_data,right=store_info,on=['store_id'],how='left')
# =============================================================================
# PR wholeness
# =============================================================================
pro = f"""
select
pr."store-id" ,
date(pr."turnaround-time") as "date",
sum(case when pr."pso-status" != 'pso-draft' then pr."selling-rate" end) as "pr_created_value",
sum(case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then pr."selling-rate" else 0 end) as "within_slot_delivered_pr_value",
sum(case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then pr."selling-rate" end) as "total_delivered_pr_value",
count(distinct case when pr."pso-status" != 'pso-draft' then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "pr_created_count",
count(distinct case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0)::text ) else null end) as "within_slot_delivered_count",
count(distinct case when pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "total_delivered_pr_count",
sum(case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' then pr."selling-rate" end) as "pr_created_value_delivery",
sum(case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then pr."selling-rate" else 0 end) as "within_slot_delivered_pr_value_delivery",
sum(case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then pr."selling-rate" end) as "total_delivered_pr_value_delivery",
count(distinct case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "pr_created_count_delivery",
count(distinct case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end)<= pr."turnaround-time" then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0)::text ) else null end) as "within_slot_delivered_count_delivery",
count(distinct case when pr."order-type" = 'delivery' and pr."pso-status" != 'pso-draft' and (case when pr."order-type" = 'pickup' then pr."completed-at" else hdm."delivered-at" end) is not null then (pr."order-number" || pr."store-id" || pr."patient-id" || pr."created-at" || nvl(pr."bill-id", 0) )::text end) as "total_delivered_pr_count_delivery"
from
"prod2-generico"."patient-requests-metadata" pr
left join "prod2-generico"."home-delivery-metadata" hdm
on
hdm.id = pr.id
where
date(pr."created-at") >= '{date1}'
and date(pr."created-at") <= '{date2}'
group by
1,
2;
"""
pro_data = rs_db.get_df(pro)
pro_data.columns = [c.replace('-', '_') for c in pro_data.columns]
pro_data['date'] = pro_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=pro_data,on=['store_id','date'],how='left')
# =============================================================================
# Search to pr conversion
# =============================================================================
search = f"""
select date("search-date") as "date", "store-id",
sum(case when "pr-opportunity-converted-flag"=1 then "lost-sales" end) as "pr_achieved_sales",
sum(case when "pr-opportunity-converted-flag"=1 then "loss-quantity" end) as "pr_achieved_qty",
sum(case when "pr-opportunity-flag" =1 then "lost-sales" end) as "search_loss_sales",
sum(case when "pr-opportunity-flag" =1 then "loss-quantity" end) as "search_loss_qty"
from "prod2-generico"."cfr-searches-v2"
where
date("search-date") >= '{date1}'
and date("search-date") <= '{date2}'
group by
date("search-date"),"store-id" ;
"""
search_data = rs_db.get_df(search)
search_data.columns = [c.replace('-', '_') for c in search_data.columns]
search_data['date'] = search_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=search_data,on=['store_id','date'],how='left')
# =============================================================================
# cash tally data
# =============================================================================
ctally = f"""
select
"store-id" ,
date,
max("created-at") as max_cash_tally_date
from
"prod2-generico"."cash-tally" where
date("date") >= '{date1}'
and date("date") <= '{date2}'
group by
"store-id" ,
date;
"""
ctally_data = rs_db.get_df(ctally)
ctally_data.columns = [c.replace('-', '_') for c in ctally_data.columns]
ctally_data['date'] = ctally_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=ctally_data,on=['store_id','date'],how='left')
# =============================================================================
# Expiry Sales value
# =============================================================================
exp = f"""
select
si."snapshot-date" as "date",
si."entity-id" as "store-id" ,
SUM(case when si."inventory-sub-type-1" = 'expired'
then si."value-with-tax" end ) as "expired-value",
SUM(case when si."inventory-sub-type-1" = 'near-expiry'
then si."value-with-tax" end ) as "near-expiry-value"
from
"prod2-generico"."system-inventory" si
where
si."entity-type" = 'store'
and date(si."snapshot-date") >= '{date1}'
and date(si."snapshot-date") <= '{date2}'
group by
si."snapshot-date" ,
si."entity-id" ;
"""
exp_data = rs_db.get_df(exp)
exp_data.columns = [c.replace('-', '_') for c in exp_data.columns]
exp_data['date'] = exp_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=exp_data,on=['store_id','date'],how='left')
# =============================================================================
# PSO draft conversion %
# =============================================================================
draft = f"""
select
date("created-at") as "date",
"store-id" ,
sum(case when "pso-parent-id" is not null then 1 else 0 end) as "pso-draft-count",
sum(case when "pso-parent-id" is not null and status != 'pso-draft' then 1 else 0 end) as "pso-draft-converted-count"
from
"prod2-generico"."patients-store-orders" pso
where
date("created-at")>= '{date1}'
and date("created-at")<= '{date2}'
group by
1,
2 ;
"""
draft_data = rs_db.get_df(draft)
draft_data.columns = [c.replace('-', '_') for c in draft_data.columns]
draft_data['date'] = draft_data['date'].astype('str')
m_data = pd.merge(left=m_data,right=draft_data,on=['store_id','date'],how='left')
m_data.info()
# Write to Redshift Also
m_data.columns = [c.replace('_', '-') for c in m_data.columns]
m_data.columns
m_data = m_data[[
'store-id' ,'date' ,'pr-received-tat' ,'as-received-tat'
,'lp-sales-sum' ,'lp-value'
,'lp-pr-sales' ,'min-count-oos-ax' ,'total-drug-count-oos-ax' ,'goodaid-min-count'
,'goodaid-total-count' ,'total-drug-count-oos' ,'total-oos-drug-count-oos' ,'feedback-bills-pct'
,'flag-rating' ,'subs-num' ,'subs-den' ,'hd-subs-num' ,'hd-subs-den'
,'ga-subs-num' ,'ga-subs-den' ,'return-value' ,'gross-revennue'
,'bogo-bills' ,'revenue' ,'pr-sales' ,'hd-sales'
,'goodaid-sales' ,'ecomm-sales' ,'hd-bills' ,'nob'
,'abv' ,'missed-calls' ,'total-received-calls' ,'target-calls'
,'actual-calls' ,'hours-to-start-scanning' ,'hours-to-mark-store-return' ,'hrs-cluster-order-ready-for-pickup'
,'hrs-cluster-biker-picked-up-order' ,'hrs-cluster-store-received-order' ,'as-requested-qty' ,'pr-requested-qty'
,'as-otif-qty' ,'pr-otif-qty' ,'first-search' ,'last-search'
,'store' ,'line-manager' ,'abo' ,'city' ,'franchisee-name'
,'acquired' ,'cluster-name' ,'old-new-static' ,'pr-created-value'
,'within-slot-delivered-pr-value' ,'total-delivered-pr-value' ,'pr-created-count' ,'within-slot-delivered-count'
,'total-delivered-pr-count','pr-achieved-sales' ,'pr-achieved-qty'
,'search-loss-sales' ,'search-loss-qty','feedback-bills' ,'max-cash-tally-date', 'backlog-days-flag',
'pr-created-value-delivery'
, 'within-slot-delivered-pr-value-delivery', 'total-delivered-pr-value-delivery', 'pr-created-count-delivery', 'within-slot-delivered-count-delivery'
, 'total-delivered-pr-count-delivery', 'generic-sales','expired-value', 'actual-value',
'accounted-value', 'billing-page-value', 'merchandizing-issue', 'pso-draft-count', 'pso-draft-converted-count', 'near-expiry-value'
]]
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=m_data[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/sc-metrics/sc-metrics.py | sc-metrics.py |
import argparse
import os
import sys
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.parameter.job_parameter import parameter
job_params = parameter.get_params(job_id=64)
env = job_params['env']
os.environ['env'] = env
email_to = job_params['email_to']
day_wise_sale = job_params['day_wise_sale']
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
read_schema = 'prod2-generico'
# Fetching data from google sheet
gs = GoogleSheet()
cfr_data = gs.download({
"spreadsheet_id": "18SUPchsLqNAl0m7xSu09jz51taZO5JgZtQQAliahddg",
"sheet_name": "CFR",
"listedFields": []})
generic_activity_data = gs.download({
"spreadsheet_id": "18SUPchsLqNAl0m7xSu09jz51taZO5JgZtQQAliahddg",
"sheet_name": "Generic Activity",
"listedFields": []})
data_cfr = pd.DataFrame(cfr_data)
data_generic = pd.DataFrame(generic_activity_data)
data_cfr['Project'] = 'CFR Search'
data_generic['Project'] = 'Generic Assrt Extention'
data = pd.concat([data_cfr, data_generic])
end = dt.now().date() - timedelta(days=1)
start_m = dt.now().date() - timedelta(days=30)
start_w1 = dt.now().date() - timedelta(days=7)
start_w2 = dt.now().date() - timedelta(days=14)
data = data[['store_id', 'drug_id', 'drug name', 'drug grp', 'cat', 'Project']]
data.columns = [c.replace('_', '-') for c in data.columns]
data = data.drop_duplicates()
data['drug grp'] = data['drug grp'].apply(lambda x: "others" if x not in ('ethical', 'generic') else x)
drugs = tuple(data['drug-id'].drop_duplicates())
limit_str = " "
doi_q = f"""
SELECT
"store-id",
"drug-id",
"min",
"safe-stock",
"max",
"as-active",
"ms-active",
"pr-active"
FROM
"{read_schema}"."drug-order-info"
WHERE
"drug-id" in {drugs}
{limit_str}"""
inv_q = f"""
SELECT
"store-id",
"drug-id",
SUM("locked-quantity" + "quantity" + "locked-for-audit" + "locked-for-transfer"
+ "locked-for-check" + "locked-for-return") AS "current-inventory",
SUM(("locked-quantity" + "quantity" + "locked-for-audit" + "locked-for-transfer"
+ "locked-for-check" + "locked-for-return")* ptr) as "inv-value"
FROM
"{read_schema}"."inventory-1"
WHERE
"drug-id" in {drugs}
GROUP BY
"store-id",
"drug-id" {limit_str};"""
sales_q = f"""
select
"store-id",
"drug-id",
DATE("created-at") as "sales-date",
sum("net-quantity") as "net-sales-quantity",
sum("revenue-value") as "net-sales-value"
from
"{read_schema}".sales s
where
DATE("created-at") between '{start_m}' and '{end}'
and "drug-id" in {drugs}
group by
"store-id",
"drug-id",
DATE("created-at") {limit_str};
"""
doi = rs_db.get_df(doi_q)
inv = rs_db.get_df(inv_q)
sales = rs_db.get_df(sales_q)
data['store-id'] = data['store-id'].astype(int)
data['drug-id'] = data['drug-id'].astype(int)
data = pd.merge(data, doi, how='left', on=['store-id', 'drug-id'])
inv['current-inventory'] = inv['current-inventory'].apply(lambda x: 0 if x < 1 else x)
inv['inv-value'] = inv['inv-value'].apply(lambda x: 0 if x < 1 else x)
data = pd.merge(data, inv, how='left', on=['store-id', 'drug-id'])
D_30 = sales[sales['sales-date'].between(start_m, end)].groupby(['store-id',
'drug-id'],
as_index=False).agg({'net-sales-quantity': 'sum',
'net-sales-value': 'sum'})
D_14 = sales[sales['sales-date'].between(start_w2,
end)].groupby(['store-id',
'drug-id'],
as_index=True).agg(
sales_quantiy_14=('net-sales-quantity', 'sum'),
sales_value_14=('net-sales-value', 'sum')).reset_index()
D_07 = sales[sales['sales-date'].between(start_w1,
end)].groupby(['store-id',
'drug-id'],
as_index=True).agg(
sales_quantiy_07=('net-sales-quantity', 'sum'),
sales_value_07=('net-sales-value', 'sum')).reset_index()
D_30['net-sales-quantity'] = D_30['net-sales-quantity'].apply(lambda x: 0 if x < 1 else x)
D_30['net-sales-value'] = D_30['net-sales-value'].apply(lambda x: 0 if x < 1 else x)
if day_wise_sale:
sales_day_wise = pd.pivot_table(data=sales,
index=['store-id', 'drug-id'],
columns='sales-date', values='net-sales-quantity',
aggfunc='sum').reset_index().fillna(0)
data = pd.merge(data, sales_day_wise, how='left', on=['store-id', 'drug-id'])
data = pd.merge(data, D_30, how='left', on=['store-id', 'drug-id'])
data = pd.merge(data, D_14, how='left', on=['store-id', 'drug-id'])
data = pd.merge(data, D_07, how='left', on=['store-id', 'drug-id'])
data['Max>0 Count'] = data['max'].apply(lambda x: 1 if x > 0 else 0)
data['Total Str-Drg Combinations'] = 1
data['Inv > 0 Count'] = data['current-inventory'].apply(lambda x: 1 if x > 0 else 0)
data = data.fillna(0)
availability_summary = data.groupby(['Project', 'drug grp'],
as_index=False).agg({'Total Str-Drg Combinations': 'sum',
'max': 'sum',
'Max>0 Count': 'sum',
'as-active': 'sum',
'current-inventory': 'sum',
'Inv > 0 Count': 'sum'})
availability_summary['Availability %'] = availability_summary['Inv > 0 Count'] / availability_summary[
'Total Str-Drg Combinations']
availability_summary.columns = ['Project',
'Type',
'Total Str-Drg Combinations',
'Max',
'Max>0 Count',
'AS Active Count',
'Inv QTY',
'Inv > 0 Count',
'Availability %']
inventory_doh_summary = data.groupby(['Project', 'drug grp'],
as_index=False).agg({'Total Str-Drg Combinations': 'sum',
'current-inventory': 'sum',
'inv-value': 'sum',
'net-sales-quantity': 'sum',
'net-sales-value': 'sum'
})
inventory_doh_summary['Avg DOH'] = inventory_doh_summary['inv-value'] * 30 / inventory_doh_summary[
'net-sales-value']
inventory_doh_summary.columns = ['Project',
'Type',
'Total Str-Drg Combinations',
'Inv QTY',
'Inv Value',
'Last 30 days sales qty',
'Last 30 days sales Value',
'Avg DOH']
data['inv-value'] = data['inv-value'].astype(float)
data['net-sales-value'] = data['net-sales-value'].astype(float)
data['DOH'] = data['inv-value'] * 30 / data['net-sales-value']
data['DOH'] = data['DOH'].fillna(0)
data['DOH Buckets'] = pd.cut(data['DOH'], bins=[-1, 7, 15, 30, 90, 10000, np.inf], labels=['less than 7',
'7 - 15 days',
'15-30 days',
'30-90 days',
'greater than 90 days',
'No sales'])
data['DOH Buckets'] = np.where(data['current-inventory'] < 1, 'No Stock', data['DOH Buckets'])
doh_buckets = data.groupby(['Project', 'DOH Buckets'],
as_index=True).agg({'Total Str-Drg Combinations': 'sum',
'inv-value': 'sum',
'net-sales-value': 'sum',
'DOH': 'mean'}).reset_index()
doh_buckets.columns = ['Project',
'DOH Buckets',
'Count Of Str-Drug',
'Inv Val',
'Sales Val ( Last 30 days )',
'Avg DOH']
str_drg = data[['store-id', 'drug-id', 'Project']].drop_duplicates()
str_drg = str_drg.rename(columns={'Project': 'project'})
file_name = 'Summary_Report.xlsx'
file_path = s3.write_df_to_excel(data={'Presence & Availability': availability_summary,
'Inventory DOH': inventory_doh_summary,
'DOH Buckets': doh_buckets,
'base file': data}, file_name=file_name)
email = Email()
email.send_email_file(subject="Weekly CFR Visibility Report",
mail_body=f'Weekly CFR Visibility Report from {start_m} to {end}',
to_emails=email_to, file_uris=[], file_paths=[file_path]) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cfr_auto_mailer/cfr_auto_mailer.py | cfr_auto_mailer.py |
import os
import sys
import pandas as pd
import datetime as dt
import numpy as np
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
import argparse
def main(debug_mode, as_inactive_exclude_types, mature_delta_days,
exclude_stores, db, rs_db_write, read_schema, write_schema, logger):
s3 = S3()
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
run_date = dt.date.today()
mature_delta_start_date = run_date - dt.timedelta(days=mature_delta_days)
logger.info("Getting all store IDs")
stores = get_stores(exclude_stores, db, read_schema)
store_id_list = list(stores['id'])
logger.info("All stores ID's fetched!")
out_of_shelf = pd.DataFrame()
try:
# getting oos iteratively
for store_id in store_id_list:
logger.info(f"Getting required data for store: {store_id}")
# getting current inventory
inventory = get_inventory(store_id, db, read_schema)
# getting past 30d sales values of drugs in store
df_30d_sales = get_past_30d_sales(store_id, db, run_date,
read_schema)
# getting ipc buckets
df_buckets = get_buckets(store_id, db, read_schema)
# getting doi info for drugs whose max is set
doi_class_1 = get_drug_list(store_id, db, read_schema, max_set='Y',
drugs=[])
doi_class_1["max_set"] = 'Y'
# getting doi info for drugs whose max is not set,
# but sold in past 3 months
drug_list = list(df_30d_sales.drug_id.unique())
doi_class_2 = get_drug_list(store_id, db, read_schema, max_set='N',
drugs=drug_list)
doi_class_2["max_set"] = 'N'
# combine both doi_class tables
doi_class = doi_class_1.append(doi_class_2)
# getting std_qty of all drugs in system
drug_std_qty = get_std_qty(db, read_schema)
# get mature drug flags
df_mature_flag = mature_drugs_flag(
store_id, run_date, mature_delta_start_date, mature_delta_days,
rs_db, read_schema)
# ==========================================
# Out of shelf calculation starts
# ==========================================
store_name = inventory["store_name"].unique()[0]
out_of_shelf_store = doi_class.merge(inventory[['store_id',
'store_name',
'drug_id',
'quantity']],
on=['store_id', 'drug_id'],
how='left')
out_of_shelf_store['quantity'].fillna(0, inplace=True)
out_of_shelf_store['store_name'].fillna(store_name, inplace=True)
# set OUP = 0, for NPI drugs in max_set = 'N'
out_of_shelf_store["order_upto_point"] = np.where(
out_of_shelf_store["max_set"] == 'N',
0, out_of_shelf_store["order_upto_point"])
# merging buckets of drugs
out_of_shelf_store = out_of_shelf_store.merge(df_buckets,
on=["store_id",
"drug_id"],
how="left")
out_of_shelf_store["bucket"].fillna('NA', inplace=True)
# merging std_qty of drugs
out_of_shelf_store = out_of_shelf_store.merge(drug_std_qty,
on="drug_id",
how="left")
out_of_shelf_store["std_qty"].fillna(1, inplace=True)
# merging past 30d sales value of store-drugs
out_of_shelf_store = out_of_shelf_store.merge(df_30d_sales,
on="drug_id",
how="left")
out_of_shelf_store["gross_sales_val"].fillna(0, inplace=True)
# add mature drug_flag
out_of_shelf_store = out_of_shelf_store.merge(
df_mature_flag, how='left', on=['store_id', 'drug_id'])
out_of_shelf_store['mature_flag'].fillna('N', inplace=True)
out_of_shelf = out_of_shelf.append(out_of_shelf_store)
out_of_shelf["closing_date"] = run_date
logger.info("All stores required data fetched!")
# calculating store wise OOS percent high-value-ethical
logger.info("Creating OOS report on store-drug level")
out_of_shelf["type"] = np.where(
out_of_shelf["type"] == 'high-value-ethical', 'ethical',
out_of_shelf["type"])
out_of_shelf["type"] = np.where(
out_of_shelf["type"].isin(['generic', 'ethical']),
out_of_shelf["type"], 'others')
out_of_shelf["oos_flag"] = np.where(out_of_shelf["quantity"] == 0, 1, 0)
# adding inv<min (SS) flag
out_of_shelf["oos_min_flag"] = np.where(
out_of_shelf["quantity"] <= out_of_shelf["safety_stock"], 1, 0)
# adding inv<std_qty flag
out_of_shelf["oos_std_qty_flag"] = np.where(
out_of_shelf["quantity"] < out_of_shelf["std_qty"], 1, 0)
# adding 30d sales value
out_of_shelf["oos_sales_loss_30d"] = out_of_shelf["gross_sales_val"] * \
out_of_shelf["oos_flag"]
out_of_shelf["sales_30d"] = out_of_shelf["gross_sales_val"]
# exclude as-active = 0 based on specified drug types
out_of_shelf = out_of_shelf.loc[~((out_of_shelf["as_active"] == 0) &
(out_of_shelf["type"].isin(
as_inactive_exclude_types)))]
# OOS group store drug level
out_of_shelf_group = pd.DataFrame(out_of_shelf.groupby(
['store_id', 'store_name', 'drug_id', 'drug_name', 'type', 'bucket',
'drug_grade', 'max_set', 'mature_flag']).agg(
{'oos_flag': ['sum', 'count'], 'oos_min_flag': ['sum', 'count'],
'oos_std_qty_flag': ['sum', 'count'],
'oos_sales_loss_30d': 'sum', 'sales_30d': 'sum'})).reset_index()
out_of_shelf_group.columns = [
'store_id', 'store_name', 'drug_id', 'drug_name', 'type', 'bucket',
'drug_grade', 'max_set', 'mature_flag', 'oos_count', 'drug_count',
'oos_min_count', 'oos_min_drug_count', 'oos_std_qty_count',
'oos_std_qty_drug_count', 'oos_sales_loss_30d', 'sales_30d']
# add customer segment info
customer_segment = get_customer_segment(db, read_schema, interval=90)
out_of_shelf_group = out_of_shelf_group.merge(
customer_segment, how='left', on=['store_id', 'drug_id'])
out_of_shelf_group['customer_type'].fillna('Non-premium', inplace=True)
# add min, safe-stock, max and current-inventory
out_of_shelf_group = out_of_shelf_group.merge(
out_of_shelf[["store_id", "drug_id", "as_active", "safety_stock",
"reorder_point", "order_upto_point", "quantity"]],
on=["store_id", "drug_id"], how="left")
out_of_shelf_group.rename(
{"safety_stock": "min", "reorder_point": "safe-stock",
"order_upto_point": "max", "quantity": "inventory_quantity"},
axis=1, inplace=True)
out_of_shelf_group['closing_date'] = run_date
# OOS group store type grade level
out_of_shelf_store_group = pd.DataFrame(out_of_shelf.groupby(
['store_id', 'store_name', 'type', 'bucket', 'max_set']).agg(
{'oos_flag': ['sum', 'count'], 'oos_min_flag': ['sum', 'count'],
'oos_std_qty_flag': ['sum', 'count'], 'oos_sales_loss_30d': 'sum',
'sales_30d': 'sum'})).reset_index()
out_of_shelf_store_group.columns = [
'store_id', 'store_name', 'type', 'bucket', 'max_set', 'oos_count',
'drug_count', 'oos_min_count', 'oos_min_drug_count',
'oos_std_qty_count', 'oos_std_qty_drug_count',
'oos_sales_loss_30d', 'sales_30d']
out_of_shelf_store_group['closing_date'] = run_date
# required format for RS write
logger.info("Formatting table for RS-DB write")
out_of_shelf_group.columns = [c.replace('_', '-') for c in
out_of_shelf_group.columns]
out_of_shelf_store_group.columns = [c.replace('_', '-') for c in
out_of_shelf_store_group.columns]
out_of_shelf_group['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
out_of_shelf_group['created-by'] = 'etl-automation'
out_of_shelf_group['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
out_of_shelf_group['updated-by'] = 'etl-automation'
out_of_shelf_store_group['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
out_of_shelf_store_group['created-by'] = 'etl-automation'
out_of_shelf_store_group['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
out_of_shelf_store_group['updated-by'] = 'etl-automation'
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
logger.info("Writing to table: out-of-shelf-drug-level")
table_info = helper.get_table_info(db=rs_db_write,
table_name='out-of-shelf-drug-level',
schema=write_schema)
columns = list(table_info['column_name'])
out_of_shelf_group = out_of_shelf_group[columns] # required column order
s3.write_df_to_db(df=out_of_shelf_group,
table_name='out-of-shelf-drug-level',
db=rs_db_write, schema=write_schema)
logger.info("Writing to table: out-of-shelf-store-level")
table_info = helper.get_table_info(db=rs_db_write,
table_name='out-of-shelf-store-level',
schema=write_schema)
columns = list(table_info['column_name'])
out_of_shelf_store_group = out_of_shelf_store_group[columns] # required column order
s3.write_df_to_db(df=out_of_shelf_store_group,
table_name='out-of-shelf-store-level',
db=rs_db_write, schema=write_schema)
logger.info("Writing table to RS-DB completed!")
else:
logger.info("Writing to RS-DB skipped")
# get metrics for email notification
df_max_set = out_of_shelf_store_group.loc[
out_of_shelf_store_group["max-set"] == 'Y']
df_ethical = df_max_set.loc[df_max_set["type"] == 'ethical']
df_generic = df_max_set.loc[df_max_set["type"] == 'generic']
df_others = df_max_set.loc[df_max_set["type"] == 'others']
store_count = len(df_max_set["store-id"].unique())
oos_sys = round(
(100 * df_max_set["oos-count"].sum() / df_max_set["drug-count"].sum()), 2)
oos_ethical = round(
(100 * df_ethical["oos-count"].sum() / df_ethical["drug-count"].sum()), 2)
oos_generic = round(
(100 * df_generic["oos-count"].sum() / df_generic["drug-count"].sum()), 2)
oos_others = round(
(100 * df_others["oos-count"].sum() / df_others["drug-count"].sum()), 2)
status = 'Success'
logger.info(f"OOS code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"OOS code execution status: {status}")
store_count, oos_sys, oos_ethical, oos_generic, oos_others = 0, 0, 0, 0, 0
return status, run_date, store_count, oos_sys, oos_ethical, oos_generic, oos_others
def get_stores(exclude_stores, db, schema):
if not exclude_stores:
exclude_stores = "(0)"
else:
exclude_stores = tuple(exclude_stores)
q_store = """
select id
from "{schema}".stores
where name <> 'Zippin Central'
and name <> 'Marketing'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {exclude_stores}
""".format(schema=schema, exclude_stores=exclude_stores)
df_stores = db.get_df(q_store)
return df_stores
def get_inventory(store_id, db, schema):
q_inventory = """
select "store-id" , s.name as "store-name", "drug-id" ,
sum(quantity+"locked-quantity"+"locked-for-audit"+
"locked-for-check") as quantity,
sum(quantity+"locked-quantity"+"locked-for-audit"+
"locked-for-check"+"locked-for-transfer"+"locked-for-return")
as "total-system-quantity"
from "{schema}"."inventory-1" i
left join "{schema}".stores s
on i."store-id" = s.id
where "store-id" = {0}
group by "store-id" , s.name, "drug-id"
""".format(store_id, schema=schema)
df_inventory = db.get_df(q_inventory)
df_inventory.columns = [c.replace('-', '_') for c in df_inventory.columns]
return df_inventory
def get_past_30d_sales(store_id, db, run_date, schema):
start_date = run_date - dt.timedelta(days=30)
end_date = run_date
q_sales = """
select "drug-id", sum("revenue-value") as "gross-sales-val"
from "{schema}".sales s
where "store-id" = {0}
and "bill-flag" = 'gross'
and date("created-at") between '{1}' and '{2}'
group by "drug-id"
""".format(store_id, start_date.strftime("%Y-%m-%d"),
end_date.strftime("%Y-%m-%d"), schema=schema)
df_30d_sales = db.get_df(q_sales)
df_30d_sales.columns = [c.replace('-', '_') for c in df_30d_sales.columns]
df_30d_sales = df_30d_sales.dropna()
df_30d_sales["drug_id"] = df_30d_sales["drug_id"].astype(int)
return df_30d_sales
def get_std_qty(db, schema):
q_std_qty = f"""
select "drug-id" , "std-qty"
from "{schema}"."drug-std-info" dsi
"""
df_drug_std_qty = db.get_df(q_std_qty)
df_drug_std_qty.columns = [c.replace('-', '_') for c in df_drug_std_qty.columns]
return df_drug_std_qty
def get_buckets(store_id, db, schema):
q_latest_buckets = f"""
select "store-id" , "drug-id" , bucket
from "{schema}"."ipc2-segmentation" is2
where "store-id" = {store_id}
and "reset-date" = (select max("reset-date")
from "{schema}"."ipc2-segmentation" where "store-id" = {store_id})
"""
df_latest_buckets = db.get_df(q_latest_buckets)
df_latest_buckets['bucket'] = np.where(df_latest_buckets['bucket'] == '',
'NA', df_latest_buckets['bucket'])
df_latest_buckets.columns = [c.replace('-', '_') for c in
df_latest_buckets.columns]
return df_latest_buckets
def get_customer_segment(db, schema, interval=90):
q_customer_seg = """
select distinct "store-id" , "drug-id" , seg."segment-calculation-date" ,
case
when seg."value-segment" in ('platinum', 'gold', 'silver') then 'Premium'
else 'Non-premium'
end as "customer-type"
from "{schema}".sales s
left join (
select "patient-id" , "segment-calculation-date", "value-segment"
from "{schema}"."customer-value-segment"
where "segment-calculation-date" = (select max("segment-calculation-date")
from "{schema}"."customer-value-segment")
) as seg on s."patient-id" = seg."patient-id"
where DATEDIFF(day, date(s."created-at"), current_date) <= {0}
and "customer-type" = 'Premium'
order by "store-id" , "drug-id"
""".format(interval, schema=schema)
df_customer_seg = db.get_df(q_customer_seg)
df_customer_seg.columns = [c.replace('-', '_') for c in
df_customer_seg.columns]
return df_customer_seg
def get_drug_list(store_id, db, schema, max_set='Y', drugs=None):
if max_set == 'Y':
max_condition = "max > 0"
drugs_condition = ""
elif max_set == 'N' and drugs != []:
max_condition = "max = 0"
drugs_condition = """and "drug-id" in {0}""".format(
str(drugs).replace('[', '(').replace(']', ')'))
else:
max_condition = "max = 0"
drugs_condition = """and "drug-id" in (0)"""
# getting max from drug-order-info
doi_query = """
select "store-id", "drug-id", "drug-name", type, category, "drug-grade",
min, doi."safe-stock", max, "as-active"
from "{schema}"."drug-order-info" doi
join "{schema}".drugs d on d.id = doi."drug-id"
where {1} and d.type not in ('discontinued-products', 'banned')
and "store-id" = {0}
{2}
""".format(store_id, max_condition, drugs_condition,
schema=schema)
df_doi = db.get_df(doi_query)
df_doi.columns = [c.replace('-', '_') for c in df_doi.columns]
df_doi.columns = ['store_id', 'drug_id', 'drug_name', 'type', 'category',
'drug_grade', 'safety_stock', 'reorder_point',
'order_upto_point', 'as_active']
return df_doi
def mature_drugs_flag(store_id, run_date, mature_delta_start_date,
mature_delta_days, db, schema):
q_mature_days = """
select "store-id" , "drug-id" , sum("drug-count") as mature_days
from "{schema}"."out-of-shelf-drug-level" oosdl
where "closing-date" < '{0}'
and "closing-date" >= '{1}'
and "max-set" = 'Y'
and "store-id" = {2}
group by "store-id" , "drug-id"
""".format(run_date.strftime("%Y-%m-%d"),
mature_delta_start_date.strftime("%Y-%m-%d"),
store_id, schema=schema)
df_mature_flag = db.get_df(q_mature_days)
df_mature_flag.columns = [c.replace('-', '_') for c in df_mature_flag.columns]
df_mature_flag["mature_flag"] = np.where(
df_mature_flag["mature_days"] == mature_delta_days, 'Y', 'N')
df_mature_flag.drop("mature_days", axis=1, inplace=True)
return df_mature_flag
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-d', '--debug_mode', default="Y", type=str,
required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-exlist', '--as_inact_ex_types',
default="generic,others", type=str,
required=True)
parser.add_argument('-mdd', '--mature_delta_days',
default=7, type=int, required=False)
parser.add_argument('-exs', '--exclude_stores',
default="52,60,92,243,281", type=str,
required=True)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
debug_mode = args.debug_mode
email_to = args.email_to
as_inactive_exclude_types = args.as_inact_ex_types.replace(" ", "").split(",")
mature_delta_days = args.mature_delta_days
exclude_stores = args.exclude_stores.replace(" ", "").split(",")
# ensure input is correct
if not all([i in ['generic', 'ethical', 'others'] for i in
as_inactive_exclude_types]):
as_inactive_exclude_types = ['generic', 'others'] # default types
#convert string store_ids to int
exclude_stores = [int(i) for i in exclude_stores]
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
logger = get_logger()
rs_db = DB()
rs_db_write = DB(read_only=False)
# open RS connection
rs_db.open_connection()
rs_db_write.open_connection()
""" calling the main function """
status, run_date, store_count, oos_sys, oos_ethical, \
oos_generic, oos_others = main(
debug_mode, as_inactive_exclude_types, mature_delta_days, exclude_stores,
rs_db, rs_db_write, read_schema, write_schema, logger)
# close RS connection
rs_db.close_connection()
rs_db_write.close_connection()
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"OOS Job (GLUE-{env}) {run_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Job Params: {args}
===SYSTEM LEVEL===
TOTAL : {oos_sys}%
GENERIC : {oos_generic}%
ETHICAL : {oos_ethical}%
OTHERS : {oos_others}%
STORES : {store_count}
(flags: max_set='Y', mature_flag='All')
""",
to_emails=email_to)
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/out_of_shelf/out_of_shelf.py | out_of_shelf.py |
import argparse
# this is to include zeno_etl_libs in the python search path on the run time
import sys
import os
sys.path.append('../../../..')
import pandas as pd
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
source_pg_table = "customer_behaviour_segment"
target_rs_table = "customer-behaviour-segment"
def main(rs_db, pg_db, s3, limit, batch_size):
table_info = helper.get_table_info(db=rs_db, table_name=target_rs_table, schema='prod2-generico')
columns = list(table_info['column_name'])
# columns.remove('id')
rs_db.execute(query=f""" delete from "prod2-generico"."{target_rs_table}"; """)
incomplete = True
last_id = None
df = pd.DataFrame()
total_pushed = 0
while incomplete:
limit_str = f" limit {batch_size} " if batch_size else ""
filter_str = f" where id > {last_id} " if last_id else ""
query = f"""
select
id,
patient_id as "patient-id",
segment_calculation_date as "segment-calculation-date",
segment_name as "behaviour-segment"
from
{source_pg_table} cvs
{filter_str}
order by id asc
{limit_str} ;
"""
df = pd.read_sql_query(query, pg_db.connection)
# df['updated-at'] = dt.now()
# df['created-at'] = dt.now()
# df['created-by'] = 'etl-automation'
# df['updated-by'] = 'etl-automation'
if df.empty:
incomplete = False
else:
last_id = int(df['id'].values[-1])
df = df[columns]
# df['segment-calculation-date'] = pd.to_datetime(df['segment-calculation-date'])
# df['segment-calculation-date'] = df['segment-calculation-date'].dt.date.apply(str)
# print(df)
s3.write_df_to_db(df=df, table_name=target_rs_table, db=rs_db, schema='prod2-generico')
total_pushed += batch_size
if limit and limit < total_pushed:
incomplete = False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-l', '--limit', default=None, type=int, required=False, help="Total patients to process")
parser.add_argument('-b', '--batch_size', default=500000, type=int, required=False, help="batch size")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
limit = args.limit
batch_size = args.batch_size
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
_s3 = S3()
pg_db = PostGre()
pg_db.open_connection()
""" calling the main function """
main(rs_db=rs_db, pg_db=pg_db, s3=_s3, limit=limit, batch_size=batch_size)
# Closing the DB Connection
rs_db.close_connection()
pg_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/behaviour-segnment/behaviour_segment.py | behaviour_segment.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
# sys.path.insert(0,'/Users/tusharuike/ETL')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.django.api import Django
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
from zeno_etl_libs.utils.ipc2.forecast_main import ipc_forecast
from zeno_etl_libs.utils.ipc2.safety_stock import safety_stock_calc
from zeno_etl_libs.utils.ipc2.portfolio_consolidation import wh_consolidation, \
goodaid_consolidation, D_class_consolidation
from zeno_etl_libs.utils.ipc.store_portfolio_additions import generic_portfolio
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc2.post_processing import post_processing
from zeno_etl_libs.utils.ipc2.helpers.correction_flag import compare_df, \
add_correction_flag
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.utils.ipc2.helpers.outlier_check import check_oup_outlier
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
v3_active_flag, v4_active_flag, v5_active_flag, v6_active_flag,
d_class_consolidation, wh_gen_consolidation, goodaid_ss_flag,
keep_all_generic_comp, omit_npi, ga_inv_weight, rest_inv_weight,
top_inv_weight, v6_type_list, v6_ptr_cut_off, open_po_turbhe_active,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
outlier_check, rs_db_read, rs_db_write, read_schema, write_schema,
s3, django, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
# Define empty variables if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
df_outliers_all = pd.DataFrame()
manual_doid_upd_all = pd.DataFrame()
try:
for store_id in reset_stores:
logger.info(f"Running for store id: {store_id} and reset date: {reset_date}")
if not type_list:
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
# RUNNING IPC2.0 FORECAST PIPELINE
logger.info("Forecast Pipeline starts...")
agg_fcst, cal_sales, weekly_fcst, seg_df, drug_class = ipc_forecast(
store_id, reset_date, type_list, read_schema, rs_db_read,
logger)
# SAFETY STOCK CALCULATIONS
logger.info("Safety Stock Calculations starts...")
safety_stock_df = safety_stock_calc(
agg_fcst, cal_sales, store_id, reset_date, v3_active_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
v4_active_flag, drug_type_list_v4, v5_active_flag,
open_po_turbhe_active, read_schema, rs_db_read, logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
logger.info("WH Generic Consolidation starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = wh_consolidation(
safety_stock_df, rs_db_read, read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'WH')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
logger.info("GA SS Modification starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = goodaid_consolidation(
safety_stock_df, rs_db_read, read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'GA')
# D-CLASS SKU CONSOLIDATION
if d_class_consolidation == 'Y':
logger.info("D Class Consolidation starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = D_class_consolidation(
safety_stock_df, store_id, rs_db_read, read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'DCC')
# KEEP ALL GENERIC COMPOSITIONS IN STORE
if keep_all_generic_comp == 'Y':
logger.info("All Generic Composition starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = generic_portfolio(safety_stock_df, rs_db_read,
read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'AG')
# OMIT NPI DRUGS
if omit_npi == 'Y':
logger.info("Omit NPI starts")
df_pre_corr = safety_stock_df.copy()
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
df_post_corr = safety_stock_df.copy()
logger.info(f"Sum OUP before: {df_pre_corr['order_upto_point'].sum()}")
logger.info(f"Sum OUP after: {df_post_corr['order_upto_point'].sum()}")
corr_drug_lst = compare_df(df_pre_corr, df_post_corr, logger)
safety_stock_df = add_correction_flag(safety_stock_df, corr_drug_lst, 'NPI')
# POST PROCESSING AND ORDER VALUE CALCULATIONS
logger.info("Post Processing starts")
safety_stock_df, order_value, weekly_fcst, \
seg_df = post_processing(safety_stock_df, weekly_fcst, seg_df,
store_id, read_schema, rs_db_read,
logger)
order_value_all = order_value_all.append(order_value, ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc2-weekly-forecast
weekly_fcst['store_id'] = weekly_fcst['store_id'].astype(int)
weekly_fcst['drug_id'] = weekly_fcst['drug_id'].astype(int)
weekly_fcst['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
weekly_fcst['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['created-by'] = 'etl-automation'
weekly_fcst['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
weekly_fcst['updated-by'] = 'etl-automation'
weekly_fcst.columns = [c.replace('_', '-') for c in
weekly_fcst.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc2-weekly-forecast',
schema=write_schema)
columns = list(table_info['column_name'])
weekly_fcst = weekly_fcst[columns] # required column order
logger.info("Writing to table: ipc2-weekly-forecast")
s3.write_df_to_db(df=weekly_fcst,
table_name='ipc2-weekly-forecast',
db=rs_db_write, schema=write_schema)
# writing table ipc2-safety-stock
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc2-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: ipc2-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='ipc2-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table ipc2-segmentation
seg_df['store_id'] = seg_df['store_id'].astype(int)
seg_df['drug_id'] = seg_df['drug_id'].astype(int)
seg_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
seg_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df['created-by'] = 'etl-automation'
seg_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
seg_df['updated-by'] = 'etl-automation'
seg_df.columns = [c.replace('_', '-') for c in seg_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='ipc2-segmentation',
schema=write_schema)
columns = list(table_info['column_name'])
seg_df = seg_df[columns] # required column order
logger.info("Writing to table: ipc2-segmentation")
s3.write_df_to_db(df=seg_df,
table_name='ipc2-segmentation',
db=rs_db_write, schema=write_schema)
logger.info("All writes to RS-DB completed!")
# OUP OUTLIER CHECK
if outlier_check == 'Y':
logger.info("Outlier detection starts")
outlier_drugs, df_outliers, \
manual_doid_upd_df = check_oup_outlier(
safety_stock_df, store_id, reset_date, rs_db_read,
read_schema)
df_outliers_all = df_outliers_all.append(df_outliers)
manual_doid_upd_all = manual_doid_upd_all.append(manual_doid_upd_df)
else:
outlier_drugs = []
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in safety_stock_df.columns]
ss_data_upload = safety_stock_df.loc[
(safety_stock_df["order_upto_point"] > 0) &
(~safety_stock_df["drug_id"].isin(outlier_drugs))]
ss_data_upload = ss_data_upload[['store_id', 'drug_id',
'safety_stock', 'reorder_point', 'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
if isinstance(reset_store_ops, pd.DataFrame):
content_type = 74
object_id = reset_store_ops.loc[
reset_store_ops[
'store_id'] == store_id, 'object_id'].unique()
for obj in object_id:
request_body = {"object_id": int(obj),
"content_type": content_type}
api_response, _ = django.django_model_execution_log_create_api(
request_body)
reset_store_ops.loc[
reset_store_ops['object_id'] == obj,
'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"IPC code execution status: {status}")
return status, order_value_all, new_drug_entries, missed_entries,\
df_outliers_all, manual_doid_upd_all
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected],[email protected]",
type=str, required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-exsto', '--exclude_stores',
default=[52, 60, 92, 243, 281], nargs='+', type=int,
required=False)
parser.add_argument('-gad', '--goodaid_ss_flag', default="Y", type=str,
required=False)
parser.add_argument('-giw', '--ga_inv_weight', default=0.5, type=float,
required=False)
parser.add_argument('-riw', '--rest_inv_weight', default=0.0, type=float,
required=False)
parser.add_argument('-tiw', '--top_inv_weight', default=1, type=float,
required=False)
parser.add_argument('-dcc', '--d_class_consolidation', default="Y", type=str,
required=False)
parser.add_argument('-wgc', '--wh_gen_consolidation', default="Y", type=str,
required=False)
parser.add_argument('-v4', '--v4_active_flag', default="Y", type=str,
required=False)
parser.add_argument('-v5', '--v5_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v6', '--v6_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v6lst', '--v6_type_list',
default=['ethical', 'generic', 'others'], nargs='+',
type=str, required=False)
parser.add_argument('-v6ptr', '--v6_ptr_cut_off', default=400, type=int,
required=False)
parser.add_argument('-rd', '--reset_date', default="YYYY-MM-DD", type=str,
required=False)
parser.add_argument('-rs', '--reset_stores',
default=[0], nargs='+', type=int,
required=False)
parser.add_argument('-v3', '--v3_active_flag', default="Y", type=str,
required=False)
parser.add_argument('-v3sp', '--corrections_selling_probability_cutoff',
default="{'ma_less_than_2': 0.40, 'ma_more_than_2' : 0.40}",
type=str, required=False)
parser.add_argument('-v3cp', '--corrections_cumulative_probability_cutoff',
default="{'ma_less_than_2':0.50,'ma_more_than_2':0.63}",
type=str, required=False)
parser.add_argument('-v4tl', '--drug_type_list_v4',
default="{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'others':'{0:[0,0,0], 1:[0,1,2], 2:[0,1,2],3:[1,2,3]}'}",
type=str, required=False)
parser.add_argument('-npi', '--omit_npi', default='Y', type=str,
required=False)
parser.add_argument('-kagc', '--keep_all_generic_comp', default='Y',
type=str, required=False)
parser.add_argument('-oc', '--outlier_check', default='Y',
type=str, required=False)
parser.add_argument('-opta', '--open_po_turbhe_active', default='N',
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
# JOB EXCLUSIVE PARAMS
exclude_stores = args.exclude_stores
goodaid_ss_flag = args.goodaid_ss_flag
ga_inv_weight = args.ga_inv_weight
rest_inv_weight = args.rest_inv_weight
top_inv_weight = args.top_inv_weight
d_class_consolidation = args.d_class_consolidation
wh_gen_consolidation = args.wh_gen_consolidation
v5_active_flag = args.v5_active_flag
v6_active_flag = args.v6_active_flag
v6_type_list = args.v6_type_list
v6_ptr_cut_off = args.v6_ptr_cut_off
reset_date = args.reset_date
reset_stores = args.reset_stores
v3_active_flag = args.v3_active_flag
v4_active_flag = args.v4_active_flag
corrections_selling_probability_cutoff = args.corrections_selling_probability_cutoff
corrections_cumulative_probability_cutoff = args.corrections_cumulative_probability_cutoff
drug_type_list_v4 = args.drug_type_list_v4
omit_npi = args.omit_npi
keep_all_generic_comp = args.keep_all_generic_comp
outlier_check = args.outlier_check
open_po_turbhe_active = args.open_po_turbhe_active
# EVALUATE REQUIRED JSON PARAMS
corrections_selling_probability_cutoff = literal_eval(
corrections_selling_probability_cutoff)
corrections_cumulative_probability_cutoff = literal_eval(
corrections_cumulative_probability_cutoff)
drug_type_list_v4 = literal_eval(drug_type_list_v4)
logger = get_logger()
s3 = S3()
django = Django()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# only stores aged > 3 months are eligible
store_id = stores.loc[dt.datetime.now() -
stores['opened_at'] >
dt.timedelta(days=90), 'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']', ')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
reset_stores = reset_store_ops['store_id'].unique()
type_list = None
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
""" calling the main function """
status, order_value_all, new_drug_entries, missed_entries, \
df_outliers_all, manual_doid_upd_all = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
v3_active_flag, v4_active_flag, v5_active_flag, v6_active_flag,
d_class_consolidation, wh_gen_consolidation, goodaid_ss_flag,
keep_all_generic_comp, omit_npi, ga_inv_weight, rest_inv_weight,
top_inv_weight, v6_type_list, v6_ptr_cut_off, open_po_turbhe_active,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
outlier_check, rs_db_read, rs_db_write, read_schema, write_schema,
s3, django, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
df_outliers_all_uri = s3.save_df_to_s3(df_outliers_all,
file_name=f"df_outliers_all_{reset_date}.csv")
manual_doid_upd_all_uri = s3.save_df_to_s3(manual_doid_upd_all,
file_name=f"manual_doid_upd_all_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS (IPC-RUN STATUS)
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"IPC2.0 SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
# SEND EMAIL ATTACHMENTS (OUTLIER WARNING)
outlier_count = df_outliers_all.shape[0]
if outlier_count > 0:
outlier_order_qty = df_outliers_all["to_order_quantity"].sum()
outlier_order_val = round(df_outliers_all["to_order_value"].sum(), 2)
outlier_stores = list(df_outliers_all["store_id"].unique())
email.send_email_file(
subject=f"IPC2.0 OUTLIER WARNING (SM-{env}) {reset_date}: "
f"Cases {outlier_count}",
mail_body=f"""
Stores: {outlier_stores}
Cases: {outlier_count}
Order Quantity: {outlier_order_qty}
Order Value: {outlier_order_val}
Note: For the detected cases SS, ROP & OUP is set to 0.
Please verify and upload attached file using DOID-GLUE JOB.
""",
to_emails=email_to, file_uris=[df_outliers_all_uri,
manual_doid_upd_all_uri])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ipc2/ipc2_ss_main.py | ipc2_ss_main.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
from zeno_etl_libs.logger import get_logger
from dateutil.tz import gettz
import numpy as np
import pandas as pd
import datetime as dt
import argparse
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
table_name = 'goodaid-atc-sr'
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# fetching data from Gsheet
gs = GoogleSheet()
data = gs.download(data={
"spreadsheet_id": "1JMt8oICcodWbzHqQg3DckHKFrJAR0vXmFVXOunW-38Q",
"sheet_name": "Sheet1",
"listedFields": []
})
data = pd.DataFrame(data)
data['drug_id'] = data['drug_id'].astype(str).astype(int)
logger.info("Data: G-sheet data fetched successfully")
logger.info(len(data))
data.drop(['drug_name', 'composition'], axis=1, inplace=True)
drug_id_list = data.drug_id.unique()
drug_id_list = tuple(drug_id_list)
query = '''
select id as "drug_id", "drug-name", composition from "prod2-generico".drugs d where id in {} '''
data_name = rs_db.get_df(query.format(drug_id_list))
data = pd.merge(left=data, right=data_name, how='inner', on='drug_id')
# providing start-date for all the drugs
query = '''
select
d.id as "drug_id",
MIN(bi."created-at") as "start-date",
d."composition-master-id"
from
"prod2-generico"."bill-items-1" bi
left join "prod2-generico"."inventory-1" i on
bi."inventory-id" = i.id
left join "prod2-generico".drugs d on
i."drug-id" = d.id
where
d."company-id" = 6984
and d.id in {}
and bi."created-at" is not null
group by
d.id,
d."composition-master-id" '''
min_date = rs_db.get_df(query.format(drug_id_list))
logger.info("Data: min-composition start date fetched successfully")
logger.info(len(min_date))
merged = pd.merge(left=data, right=min_date, how='inner', on='drug_id')
merged['start-date'] = pd.to_datetime(merged['start-date']).dt.date
merged['start-date'] = pd.to_datetime(merged['start-date'])
logger.info(len(merged))
# providing composition wise lot and rank
gaid_comp_min_date = f'''
select
MIN(bi."created-at") as "min-bill-date",
d."composition-master-id"
from
"prod2-generico"."bill-items-1" bi
left join "prod2-generico"."inventory-1" i on
bi."inventory-id" = i.id
left join "prod2-generico".drugs d on
i."drug-id" = d.id
where
d."company-id" = 6984
group by
d."composition-master-id"
'''
min_date_comp = rs_db.get_df(gaid_comp_min_date)
min_date_comp['rank'] = min_date_comp['min-bill-date'].rank().astype(int)
min_date_comp['lot'] = (min_date_comp['rank'] / 25).apply(np.ceil).astype(int)
logger.info("Data: min-composition start date, lot and rank fetched successfully")
logger.info(len(min_date_comp))
goodaid_tagging = pd.merge(left=merged, right=min_date_comp, how='left', on='composition-master-id')
goodaid_tagging.columns = goodaid_tagging.columns.str.replace(" ", "-")
goodaid_tagging.columns = goodaid_tagging.columns.str.replace("_", "-")
goodaid_tagging['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_tagging['created-by'] = 'etl-automation'
goodaid_tagging['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_tagging['updated-by'] = 'etl-automation'
logger.info(len(goodaid_tagging))
# =========================================================================
# Writing table in Redshift
# =========================================================================
schema = 'prod2-generico'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=goodaid_tagging[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-atc-sr-update.py | goodaid-atc-sr-update.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from dateutil.tz import gettz
import numpy as np
import pandas as pd
import datetime as dt
import argparse
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
table_name = 'ethical-generic-rank'
rs_db = DB()
rs_db.open_connection()
s3 = S3()
#removing old goodaid drugs from base data
ga_query = f'''
select
distinct ("drug-id") as "drug_id"
from
"prod2-generico"."prod2-generico"."goodaid-atc-sr" gas
where
"old-new-drug" = 'old' '''
goodaid_old_drugs = rs_db.get_df(ga_query)
drug_id_list = goodaid_old_drugs.drug_id.unique()
drug_id_list = tuple(drug_id_list)
logger.info("the number of old goodaid drugs is: " +str(len(goodaid_old_drugs)))
# fetching sales data.
query = '''
select
"drug-id" ,
"drug-name" ,
"type" ,
company,
composition ,
"company-id" ,
"goodaid-availablity-flag",
sum("revenue-value") as "revenue-value",
sum(quantity) as "quantity"
from
"prod2-generico".sales s
where
"type" in ('ethical', 'generic')
and "drug-id" not in {}
group by 1,2,3,4,5,6,7'''
base_data = rs_db.get_df(query.format(drug_id_list))
logger.info("Data: base fetched successfully: " +str(len(base_data)))
# Getting new goodaid compositions from goodaid-atc-sr table
ga_query = f'''
select
distinct ("drug-id")
from
"prod2-generico"."prod2-generico"."goodaid-atc-sr" gas
where
"old-new-drug" = 'new' '''
goodaid_drugs = rs_db.get_df(ga_query)
logger.info("Data: goodaid_drugs fetched successfully: " +str(len(goodaid_drugs)))
# identifying goodaid drugs in base data
base_data['is-goodaid'] = np.where(base_data['drug-id'].isin(goodaid_drugs['drug-id'].unique().tolist()), 1, 0)
logger.info("Data: base_data with goodaid flag fetched successfully: " +str(len(base_data)))
# rank 1
ethical_generic = base_data.groupby(
['composition', 'drug-id', 'drug-name', 'type', 'company', 'is-goodaid', 'goodaid-availablity-flag']).agg(
{'quantity': 'sum', 'revenue-value': 'sum'}).reset_index()
logger.info("Data: ethical_generic fetched successfully")
ethical_generic['rank'] = ethical_generic.sort_values(['is-goodaid','type','quantity'],
ascending=[False, False, False])\
.groupby(['composition', 'type']).cumcount() + 1
logger.info("Data: ethical_generic with rank fetched successfully")
# compositions with >1 good aid drug
ethical_generic_ex = ethical_generic[(ethical_generic['is-goodaid'] == '1')
& (ethical_generic['rank'] > 1)]
ethical_generic['exclusion'] = np.where(ethical_generic['drug-id']
.isin(ethical_generic_ex['drug-id'].unique()
.tolist()), 1, 0)
logger.info("Data: ethical_generic exclusion fetched successfully")
# excluding compositions with >1 good aid drug
ethical_generic_final = ethical_generic[(ethical_generic['exclusion'] == 0)]
logger.info("Data: ethical_generic exclusion fetched successfully")
# rank data set after exclusion
ethical_generic_final = ethical_generic_final[['composition', 'drug-id', 'drug-name', 'type', 'company',
'is-goodaid', 'goodaid-availablity-flag', 'quantity', 'revenue-value']]
ethical_generic_final['rank'] = ethical_generic_final.sort_values(['is-goodaid', 'type', 'quantity'],
ascending=[False, False, False]) \
.groupby(['composition', 'type']) \
.cumcount() + 1
ethical_generic_rank = ethical_generic_final[ethical_generic_final['composition'] != '']
logger.info("Data: ethical_generic_rank fetched successfully")
ethical_generic_rank['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ethical_generic_rank['created-by'] = 'etl-automation'
ethical_generic_rank['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
ethical_generic_rank['updated-by'] = 'etl-automation'
# =========================================================================
# Writing table in Redshift
# =========================================================================
schema = 'prod2-generico'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=ethical_generic_rank[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/ethical-generic-rank.py | ethical-generic-rank.py |
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
import argparse
import datetime
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected],"
"[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
query = '''
select
mtd."start-date",
mtd.composition,
round(mtd."MTD composition share",2) as "MTD composition share",
round(y."Yesterdays composition share",2) as "Yesterdays composition share",
round(mtd."Goodaid Margin",2) as "Goodaid Margin",
round(mtd."ethical margin",2) as "Ethical Margin",
mtd."MTD Goodaid Qty",
mtd."MTD Non GA Generic Qty",
mtd."MTD Ethical Qty",
round(mtd."MTD Goodaid Sales", 0) as "MTD Goodaid Sales",
round(mtd."MTD Non GA Generic Sales",0) as "MTD Non GA Generic Sales",
round(mtd."MTD Ethical Sales",0) as "MTD Ethical Sales",
y."Yesterdays Goodaid Qty",
y."Yesterdays Non GA Generic Qty",
y."Yesterdays Ethical Qty",
round(y."Yesterdays Goodaid Sales",0) as "Yesterdays Goodaid Sales",
round(y."Yesterdays Non GA Generic Sales",0) as "Yesterdays Non GA Generic Sales",
round(y."Yesterdays Ethical Sales", 0) as "Yesterdays Ethical Sales"
from
(select
date(g."min-bill-date") as "start-date",
s.composition ,
sum(case when s."type"= 'generic' and s."company-id"= 6984 then (s."net-quantity") else 0 end) as "MTD Goodaid Qty",
sum(case when s."type"= 'generic' and "company-id" <> 6984 then (s."net-quantity") else 0 end) as "MTD Non GA Generic Qty",
sum(case when s."type"= 'ethical' then (s."net-quantity") else 0 end) as "MTD Ethical Qty",
sum(case when s."type"= 'generic' and s."company-id"= 6984 then ("revenue-value") else 0 end) as "MTD Goodaid Sales",
sum(case when s."type"= 'generic' and s."company-id" <> 6984 then (s."revenue-value") else 0 end) as "MTD Non GA Generic Sales",
sum(case when s."type"= 'ethical' then (s."revenue-value") else 0 end) as "MTD Ethical Sales",
(("MTD Goodaid Qty"*1.0)/("MTD Goodaid Qty"*1.0+"MTD Non GA Generic Qty"*1.0+"MTD Ethical Qty"*1.0))*100 as "MTD composition share",
sum(case when s."type"= 'generic' and s."company-id"= 6984 then (s.quantity*s."purchase-rate") else 0 end) as "MTD Goodaid Cogs",
sum(case when s."type"= 'ethical' then (s.quantity *s."purchase-rate") else 0 end) as "MTD Ethical Cogs",
(("MTD Ethical Sales"-"MTD Ethical Cogs")/"MTD Ethical Sales")*100 as "ethical margin",
(("MTD Goodaid Sales"-"MTD Goodaid Cogs")/"MTD Goodaid Sales")*100 as "Goodaid Margin"
from
"prod2-generico"."prod2-generico".sales s
left join "prod2-generico"."prod2-generico"."goodaid-atc-sr" g
on s.composition = g.composition
where
s."bill-flag" = 'gross'
and s.composition in ('Sitagliptin(100mg)', 'Sitagliptin(50mg)', 'Metformin(1000mg),Sitagliptin(50mg)', 'Metformin(500mg),Sitagliptin(50mg)')
and (s."created-at") > DATE_TRUNC('day', dateadd(day, -(extract(day from current_date)), current_date))
group by 1,2) mtd
left join
(select
composition ,
sum(case when "type"= 'generic' and "company-id"= 6984 then ("net-quantity") else 0 end) as "Yesterdays Goodaid Qty",
sum(case when "type"= 'generic' and "company-id" <> 6984 then ("net-quantity") else 0 end) as "Yesterdays Non GA Generic Qty",
sum(case when "type"= 'ethical' then ("net-quantity") else 0 end) as "Yesterdays Ethical Qty",
sum(case when "type"= 'generic' and "company-id"= 6984 then ("revenue-value") else 0 end) as "Yesterdays Goodaid Sales",
sum(case when "type"= 'generic' and "company-id" <> 6984 then ("revenue-value") else 0 end) as "Yesterdays Non GA Generic Sales",
sum(case when "type"= 'ethical' then ("revenue-value") else 0 end) as "Yesterdays Ethical Sales",
(("Yesterdays Goodaid Qty"*1.0)/("Yesterdays Goodaid Qty"*1.0+"Yesterdays Non GA Generic Qty"*1.0+"Yesterdays Ethical Qty"*1.0))*100 as "Yesterdays composition share",
sum(case when s."type"= 'generic' and s."company-id"= 6984 then (s.quantity*s."purchase-rate") else 0 end) as "Yest Goodaid Cogs",
sum(case when s."type"= 'ethical' then (s.quantity *s."purchase-rate") else 0 end) as "Yest Ethical Cogs"
from
"prod2-generico"."prod2-generico".sales s
where
s."bill-flag" = 'gross'
and composition in ('Sitagliptin(100mg)', 'Sitagliptin(50mg)', 'Metformin(1000mg),Sitagliptin(50mg)', 'Metformin(500mg),Sitagliptin(50mg)')
and date("created-at") = dateadd(day, -1, current_date)
group by 1) y
on mtd.composition = y.composition '''
data = rs_db.get_df(query)
logger.info("data for the 4 composition successfully loaded")
run_date = str(datetime.datetime.now().date())
file_name = 'Goodaid New Diabetes Drug Data-{}.csv'.format(str(run_date))
# Uploading the file to s3
new_drugs = s3.save_df_to_s3(df=data, file_name=file_name)
# Sending email
subject = '''New Diabetes Drug Data '''
mail_body = '''Please find the attached file containing the data till-{}
'''.format(run_date)
file_uris = [new_drugs]
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/ga-drug-automailer.py | ga-drug-automailer.py |
import os
import sys
import pandas as pd
import numpy as np
import datetime
import argparse
sys.path.append('../../../..')
from datetime import date
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
# email_to = args.email_to
env = args.env
os.environ['env'] = env
email = Email()
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# Below is incentive lost automailer will run on every monday
if date.today().weekday() == 0:
query = '''
select
c.abo,
c."store-id" as "store_id",
sm.store as "store_name",
sum(c."total-opportunity"-c."achived") as "missed_opportunity_yesterday",
sum((c."total-opportunity"-c."achived")*gi.incentive ) as "missed_incentive_value_yesterday",
s."franchisee-email" as "store_email"
from
(
select
a.abo,
a."composition",
a."store-id",
a."date",
a."achived",
b."total-opportunity"
from
(
select
abo,
composition ,
"store-id" ,
"date",
count(distinct ("patient-id" || "composition-master-id" || "group-molecule")) as "achived"
from
"prod2-generico"."prod2-generico"."goodaid-incentive-v3" a
where
"date" >= dateadd(day, -7, current_date)
group by
abo,
composition,
"store-id",
"date"
) a
left join (
select
abo,
composition,
"store-id" ,
"date",
sum("actaul-total-opp") as "total-opportunity"
from
"prod2-generico"."prod2-generico"."goodaid-daily-store-opportunity" g
where
"date" >= dateadd(day, -7, current_date)
group by
abo,
composition,
"store-id" ,
"date") b
on
a."store-id" = b."store-id"
and a."composition" = b."composition"
and a."date" = b."date") c
left join "prod2-generico"."prod2-generico"."goodaid-incentive-rate-card" gi on
c.composition= gi.composition
left join "prod2-generico"."prod2-generico"."stores-master" sm on
c."store-id"=sm.id
left join "prod2-generico"."prod2-generico".stores s on
c."store-id" = s.id
where gi.status = 'live'
and sm."franchisee-id" = 1
and c.abo is not null
group by
c.abo, c."store-id", store, s."franchisee-email"
having missed_opportunity_yesterday >=0
order by abo; '''
incentive_data = rs_db.get_df(query)
# MTD incentive miss data
query = '''
select
c.abo,
c."store-id" as "store_id",
sm.store as "store_name",
sum(c."total-opportunity"-c."achived") as "missed_opportunity_MTD",
sum((c."total-opportunity"-c."achived")*gi.incentive ) as "missed_incentive_value_MTD",
s."franchisee-email" as "store_email"
from
(
select
a.abo,
a."composition",
a."store-id",
a."date",
a."achived",
b."total-opportunity"
from
(
select
abo,
composition ,
"store-id" ,
"date",
count(distinct ("patient-id" || "composition-master-id" || "group-molecule")) as "achived"
from
"prod2-generico"."prod2-generico"."goodaid-incentive-v3" a
where
"date" > DATE_TRUNC('day', dateadd(day, -(extract(day from current_date)), current_date))
group by
abo,
composition,
"store-id",
"date"
) a
left join (
select
abo,
composition,
"store-id" ,
"date",
sum("actaul-total-opp") as "total-opportunity"
from
"prod2-generico"."prod2-generico"."goodaid-daily-store-opportunity" g
where
"date" > DATE_TRUNC('day', dateadd(day, -(extract(day from current_date)), current_date))
group by
abo,
composition,
"store-id" ,
"date") b
on
a."store-id" = b."store-id"
and a."composition" = b."composition"
and a."date" = b."date") c
left join "prod2-generico"."prod2-generico"."goodaid-incentive-rate-card" gi on
c.composition= gi.composition
left join "prod2-generico"."prod2-generico"."stores-master" sm on
c."store-id"=sm.id
left join "prod2-generico"."prod2-generico".stores s on
c."store-id" = s.id
where gi.status = 'live'
and sm."franchisee-id" = 1
and c.abo is not null
group by
c.abo, c."store-id", store, s."franchisee-email"
having missed_opportunity_MTD >=0
order by abo; '''
incentive_data_mtd = rs_db.get_df(query)
incentive_data = incentive_data.merge(incentive_data_mtd, on=['abo', 'store_id', 'store_name',
'store_email'], how='left')
# last month incentive miss data
query = '''
select
c.abo,
c."store-id" as "store_id",
sm.store as "store_name",
sum(c."total-opportunity"-c."achived") as "missed_opportunity_lm",
sum((c."total-opportunity"-c."achived")*gi.incentive ) as "missed_incentive_value_lm",
s."franchisee-email" as "store_email"
from
(
select
a.abo,
a."composition",
a."store-id",
a."date",
a."achived",
b."total-opportunity"
from
(
select
abo,
composition ,
"store-id" ,
"date",
count(distinct ("patient-id" || "composition-master-id" || "group-molecule")) as "achived"
from
"prod2-generico"."prod2-generico"."goodaid-incentive-v3" a
where
"date" between date_trunc('month', current_date) - interval '1 month' and date_trunc('month', current_date) - interval '1 day'
group by
abo,
composition,
"store-id",
"date"
) a
left join (
select
abo,
composition,
"store-id" ,
"date",
sum("actaul-total-opp") as "total-opportunity"
from
"prod2-generico"."prod2-generico"."goodaid-daily-store-opportunity" g
where
"date" between date_trunc('month', current_date) - interval '1 month' and date_trunc('month', current_date) - interval '1 day'
group by
abo,
composition,
"store-id" ,
"date") b
on
a."store-id" = b."store-id"
and a."composition" = b."composition"
and a."date" = b."date") c
left join "prod2-generico"."prod2-generico"."goodaid-incentive-rate-card" gi on
c.composition= gi.composition
left join "prod2-generico"."prod2-generico"."stores-master" sm on
c."store-id"=sm.id
left join "prod2-generico"."prod2-generico".stores s on
c."store-id" = s.id
where gi.status = 'live'
and sm."franchisee-id" = 1
and c.abo is not null
group by
c.abo, c."store-id", store, s."franchisee-email"
having missed_opportunity_lm >=0
order by abo '''
incentive_data_lm = rs_db.get_df(query)
incentive_data = incentive_data.merge(incentive_data_lm, on=['abo', 'store_id', 'store_name', 'store_email'],
how='left')
query = '''
select
abo,
email
from
"prod2-generico"."prod2-generico"."stores-master" sm
left join "prod2-generico"."prod2-generico".users u on
sm.abo = u."name"
where abo is not null
and u."type" = 'area-business-owner'
group by
1,2
order by abo'''
abo_data = rs_db.get_df(query)
incentive_data = incentive_data.merge(abo_data, how='left', on='abo')
incentive_data = incentive_data[incentive_data['email'].notna()]
abo_list = incentive_data.abo.unique()
abo_list = tuple(abo_list)
store_list = incentive_data.store_name.unique()
store_list = tuple(store_list)
now = datetime.date.today()
then = now + datetime.timedelta(days=-7)
currentMonth = datetime.datetime.now().strftime('%m')
currentYear = datetime.datetime.now().year
datetime_object = datetime.datetime.strptime(currentMonth, "%m")
full_month_name = datetime_object.strftime("%B")
last_month = datetime.datetime.now() - pd.DateOffset(months=1)
last_month = last_month.strftime('%m')
datetime_object_lm = datetime.datetime.strptime(last_month, "%m")
full_month_name_lm = datetime_object_lm.strftime("%B")
for x in abo_list:
incentive_data_1 = incentive_data[incentive_data['abo'] == x]
abo_incentive_data = incentive_data_1.groupby(['abo', 'store_name', 'store_id']).agg(
{'missed_incentive_value_yesterday': 'sum', 'missed_incentive_value_mtd': 'sum', \
'missed_incentive_value_lm': 'sum'}).reset_index()
abo_incentive_data[
["missed_incentive_value_yesterday", "missed_incentive_value_mtd", "missed_incentive_value_lm"]] = \
abo_incentive_data[["missed_incentive_value_yesterday",\
"missed_incentive_value_mtd", "missed_incentive_value_lm"]].astype(np.int64)
total_incentive_missed = abo_incentive_data.missed_incentive_value_yesterday.sum()
abo_incentive_data.rename(columns={'missed_incentive_value_yesterday': f'Lost incentive last week(Rs)-{then}', \
'missed_incentive_value_mtd': f'Lost incentive (Rs) MTD {full_month_name}-{currentYear}', \
'missed_incentive_value_lm': f'Lost incentive (Rs) last Month {full_month_name_lm}-{currentYear}', \
'store_name': f'Store Name', 'store_id': f'Store ID'}, inplace=True)
email_to = str(incentive_data_1.email.iloc[0])
# Sending email
subject = f'Rs.{total_incentive_missed} Incentive Lost Last Week'
mail_body = f" Hi {(abo_incentive_data.abo.iloc[0])} your stores have lost Rs.{total_incentive_missed} last week " \
f"({str(then)}) by not substituting GoodAid drugs in your stores. \
Please try and substitute Goodaid as much as possible to earn maximum incentive."
file_uris = [s3.save_df_to_s3(df=abo_incentive_data,
file_name=f'{incentive_data_1.abo.iloc[0]} total incentive Loss on {now}.csv')]
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri)
for x in store_list:
incentive_data_2 = incentive_data[incentive_data['store_name'] == x]
store_incentive_data = incentive_data_2.groupby(['abo', 'store_name', 'store_id']).agg(
{'missed_incentive_value_yesterday': 'sum', 'missed_incentive_value_mtd': 'sum', \
'missed_incentive_value_lm': 'sum'}).reset_index()
store_incentive_data[
["missed_incentive_value_yesterday", "missed_incentive_value_mtd", "missed_incentive_value_lm"]] = \
store_incentive_data[["missed_incentive_value_yesterday", \
"missed_incentive_value_mtd", "missed_incentive_value_lm"]].astype(np.int64)
email_to = str(incentive_data_2.store_email.iloc[0])
# Sending email
subject = f'Rs.{(store_incentive_data.missed_incentive_value_yesterday.iloc[0])} Incentive Lost Yesterday'
mail_body = f" Hi {(store_incentive_data.store_name.iloc[0])} store you have lost Rs.{(store_incentive_data.missed_incentive_value_yesterday.iloc[0])} last week ({str(then)})\
by not substituting GoodAid drugs in your store. Please try and substitute Goodaid as much as possible to earn maximum incentive."
store_incentive_data.rename(columns={'missed_incentive_value_yesterday': f'Lost incentive last week(Rs)-{then}',
'missed_incentive_value_mtd': f'Lost incentive (Rs) MTD '
f'{full_month_name}-{currentYear}', \
'missed_incentive_value_lm': f'Lost incentive (Rs) last Month '
f'{full_month_name_lm}-{currentYear}',
'store_name': f'Store Name', 'store_id': f'Store ID'}, inplace=True)
file_uris = [s3.save_df_to_s3(df=store_incentive_data,
file_name=f'{incentive_data_2.store_name.iloc[0]} total incentive Loss on {now}.csv')]
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri)
# 2nd automailer for new stock at stores this runs everyday.
query = '''
select
i."store-id" as "store_id" ,
s."name" as "store_name",
i."drug-id" as "drug_id",
d."drug-name" as "drug_name",
d.composition ,
d.company ,
s."franchisee-email" as "email",
sum(i.quantity+i."locked-quantity"+i."locked-for-check"+i."locked-for-return"+i."locked-for-audit"+
i."locked-for-transfer") as "quantity_available_at_store"
FROM
"prod2-generico"."prod2-generico"."inventory-1" i
inner join "prod2-generico"."prod2-generico".drugs d on d.id = i."drug-id"
left join "prod2-generico"."prod2-generico".stores s on i."store-id" = s.id
where d."company-id" = 6984
and s."franchisee-id" =1
GROUP by
1,2,3,4,5,6,7
HAVING
min(date(i."created-at")) = CURRENT_DATE-1 '''
new_drug_data = rs_db.get_df(query)
store_list = new_drug_data.store_name.unique()
store_list = tuple(store_list)
nl = '\n'
now = datetime.date.today()
if len(new_drug_data) > 0:
for x in store_list:
store_drug_data = new_drug_data[new_drug_data['store_name'] == x]
store_drug_data_1 = store_drug_data[['store_id', 'store_name', 'drug_id', 'drug_name', 'composition', 'company',
'quantity_available_at_store']].reset_index(drop = True)
email_to = str(store_drug_data.email.iloc[0])
# email_to = '[email protected]'
subject = f'{len(store_drug_data)} New Goodaid Drugs Arrived at your Store'
mail_body = f" Hi {(store_drug_data_1.store_name.iloc[0])} store {nl}{len(store_drug_data)} New Goodaid SKU/Drugs " \
f"have arrived at your store please start substituting."
file_uris = [s3.save_df_to_s3(df=store_drug_data_1, file_name=f'{store_drug_data_1.store_name.iloc[0]} '
f'drugs arrived at {now}.csv')]
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris, from_email='[email protected]')
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-incentive-lost-automailer.py | goodaid-incentive-lost-automailer.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.logger import get_logger
import pandas as pd
import datetime
import argparse
import operator as op
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected],"
"[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
email_to = args.email_to
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
query = '''
select
s."patient-id" ,
s."created-at" ,
s."drug-id",
s.composition ,
g.category
from
"prod2-generico"."prod2-generico".sales s
left join "prod2-generico"."prod2-generico"."goodaid-atc-sr" g on
s."drug-id" = g."drug-id"
where
s."company-id" = 6984
and
date(s."created-at") between '2021-03-01' and current_date-1 ;'''
data = rs_db.get_df(query)
data.columns = [c.replace('-', '_') for c in data.columns]
df = data
# this function returns a DataFrame containing the acquisition date and order date
def get_cohorts(df, period='M'):
df = df[['patient_id', 'created_at']].drop_duplicates()
df = df.assign(Month=df.groupby('patient_id') \
['created_at'].transform('min').dt.to_period(period))
df = df.assign(order_date=df['created_at'].dt.to_period(period))
return df
# calculates the retention of customers after their acquisition
def get_retention(df, period='M'):
df = get_cohorts(df, period).groupby(['Month', 'order_date']) \
.agg({'patient_id': 'nunique'}) \
.reset_index(drop=False).rename(columns={'patient_id': 'patients'})
df['periods'] = (df.order_date - df.Month).apply(op.attrgetter('n'))
return df
# Returns a cohort matrix
def get_cohort_matrix(df, period='M', percentage=False):
df = get_retention(df, period).pivot_table(index='Month',
columns='periods',
values='patients')
if percentage:
df = df.divide(df.iloc[:, 0], axis=0) * 100
return df
# overall Cohort monthly
overall_mon = get_cohort_matrix(df, 'M', percentage=False).reset_index()
overall_mon_per = get_cohort_matrix(df, 'M', percentage=True).round(2).reset_index()
overall_mon_per[0] = overall_mon[0]
# overall Cohort quarter
overall_quat = get_cohort_matrix(df, 'Q', percentage=False).reset_index()
overall_quat_per = get_cohort_matrix(df, 'Q', percentage=True).round(2).reset_index()
overall_quat_per[0] = overall_quat[0]
# chronic cohort monthly
df = data[data['category'] == 'chronic']
chronic_mon = get_cohort_matrix(df, 'M', percentage=False).reset_index()
chronic_mon_per = get_cohort_matrix(df, 'M', percentage=True).round(2).reset_index()
chronic_mon_per[0] = chronic_mon[0]
# chronic cohort quarterly
chronic_quat = get_cohort_matrix(df, 'Q', percentage=False).reset_index()
chronic_quat_per = get_cohort_matrix(df, 'Q', percentage=True).round(2).reset_index()
chronic_quat_per[0] = chronic_quat[0]
# acute cohorts monthly
df = data[data['category'] == 'acute']
acute_mon = get_cohort_matrix(df, 'M', percentage=False).reset_index()
acute_mon_per = get_cohort_matrix(df, 'M', percentage=True).round(2).reset_index()
acute_mon_per[0] = acute_mon[0]
# acute cohort quarterly
acute_quat = get_cohort_matrix(df, 'Q', percentage=False).reset_index()
acute_quat_per = get_cohort_matrix(df, 'Q', percentage=True).round(2).reset_index()
acute_quat_per[0] = acute_quat[0]
# Formatting Excel
path = "/".join(os.getcwd().split("/")[:-2]) + "/tmp/"
if not os.path.exists(path):
os.mkdir(path, 0o777)
time_now = datetime.datetime.now().strftime('%Y-%m-%d')
currentMonth= datetime.datetime.now().strftime('%m')
currentDay = datetime.datetime.now().day
datetime_object = datetime.datetime.strptime(currentMonth, "%m")
full_month_name = datetime_object.strftime("%B")
file_name = "Cohorts_{}.xlsx".format(time_now)
local_file_full_path = path + file_name
# writing in a Excel
with pd.ExcelWriter(local_file_full_path) as writer:
overall_mon.to_excel(writer, sheet_name='Overall Monthly', index=False)
overall_mon_per.to_excel(writer, sheet_name='Overall Monthly', index=False, startrow=len(overall_mon) + 4)
overall_quat.to_excel(writer, sheet_name='Overall Quarterly', index=False)
overall_quat_per.to_excel(writer, sheet_name='Overall Quarterly', index=False, startrow=len(overall_quat) + 4)
chronic_mon.to_excel(writer, sheet_name='Chronic Monthly', index=False)
chronic_mon_per.to_excel(writer, sheet_name='Chronic Monthly', index=False, startrow=len(chronic_mon) + 4)
chronic_quat.to_excel(writer, sheet_name='Chronic Quarterly', index=False)
chronic_quat_per.to_excel(writer, sheet_name='Chronic Quarterly', index=False, startrow=len(chronic_quat) + 4)
acute_mon.to_excel(writer, sheet_name='Acute Monthly', index=False)
acute_mon_per.to_excel(writer, sheet_name='Acute Monthly', index=False, startrow=len(acute_mon) + 4)
acute_quat.to_excel(writer, sheet_name='Acute Quarterly', index=False)
acute_quat_per.to_excel(writer, sheet_name='Acute Quarterly', index=False, startrow=len(acute_quat) + 4)
email = Email()
email.send_email_file(subject="Cohorts for {}".format(full_month_name),
mail_body='Hi Rohan please find the attached Cohorts till {}-{}'.format(currentDay,full_month_name),
to_emails=email_to, file_paths=[local_file_full_path])
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-retention-cohort-automailer.py | goodaid-retention-cohort-automailer.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
from zeno_etl_libs.logger import get_logger
import argparse
import pandas as pd
import datetime as dt
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-cb', '--created_by', default="etl-automation", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected], [email protected], [email protected],"
"[email protected]",type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
created_by = args.created_by
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info(f"user:{created_by}")
# prod creds below
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'goodaid-incentive-rate-card'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# getting new drugs incentive data from s3
df = pd.read_csv(s3.download_file_from_s3(file_name="goodaid-incentive-rate/incentive-rate-card.csv"))
logger.info("number of drugs whose incentive has to be updated is: " +str(len(df)))
# data of drugs in the incentive rate card table
query = f'''
select
*
from
"prod2-generico"."goodaid-incentive-rate-card" girc
where
status = 'live'
'''
data = rs_db.get_df(query)
data.columns = [c.replace('-','_') for c in data.columns]
logger.info("number of live drugs with incentive are: " +str(len(data)))
# setting up start and end date if it has to be updated currently
now = dt.date.today()
then = now + dt.timedelta(days=365)
###########################################################################################
# for durgs which are currently present in the table and their incentive has to be updated
# checking for the existing drugs
data_drug_id= data.drug_id.unique()
bool_series = df.drug_id.isin(data_drug_id)
fill_df = df[bool_series]
logger.info("number of drugs whose incentive has to be updated are: " +str(len(fill_df)))
number_of_drugs = len(fill_df)
if number_of_drugs>0:
fill_df['incentive_start_date'] = now
fill_df['incentive_end_date']= then
fill_df['status']= 'live'
drug_id_list = fill_df.drug_id.unique()
if len(list(drug_id_list))<=1:
logger.info(drug_id_list)
drug_id_list = str(list(drug_id_list)).replace('[', '(').replace(']', ')')
logger.info(drug_id_list)
else:
drug_id_list = tuple(drug_id_list)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} does not exist, create the table first")
else:
print(f"Table:{table_name} exists")
update_query = f'''
update "{schema}"."{table_name}" set
"incentive-end-date" = CURRENT_DATE-1
,status = 'not live',
"updated-at" = current_date
where
"drug-id" in {drug_id_list} and status = 'live' '''
rs_db.execute(update_query)
logger.info(f"Table:{table_name} table updated")
fill_df['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
fill_df['created-by'] = created_by
fill_df['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
fill_df['updated-by'] = 'etl-automation'
fill_df.columns = [c.replace('_','-') for c in fill_df.columns]
# =========================================================================
# append table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
s3.write_df_to_db(df=fill_df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Providing incentive for newly added drugs
bool_series = ~df.drug_id.isin(data_drug_id)
fill_df = df[bool_series]
logger.info("number of new drugs whose incentive has to be added are: " +str(len(fill_df)))
number_of_drugs = len(fill_df)
if number_of_drugs>0:
drug_id_list = fill_df.drug_id.unique()
if len(list(drug_id_list))<=1:
logger.info(drug_id_list)
drug_id_list = str(list(drug_id_list)).replace('[', '(').replace(']', ')')
logger.info(drug_id_list)
else:
drug_id_list = tuple(drug_id_list)
query = f'''
select
i."drug-id" as "drug_id",
MIN(bi."created-at") as "incentive_start_date"
from
"prod2-generico"."bill-items-1" bi
left join "prod2-generico"."inventory-1" i on
bi."inventory-id" = i.id
where
i."drug-id" in {drug_id_list}
group by
i."drug-id" '''
new_data = rs_db.get_df(query)
new_data.incentive_start_date = pd.to_datetime(new_data.incentive_start_date)
new_data['incentive_start_date'] = new_data['incentive_start_date'].dt.date
new_data['incentive_end_date'] = new_data.incentive_start_date + dt.timedelta(days= 365)
merged_df = pd.merge(fill_df, new_data, how= 'left', on = 'drug_id')
merged_df['incentive_start_date'].fillna(value=pd.to_datetime(now).strftime('%Y-%m-%d'), inplace=True)
merged_df['incentive_end_date'].fillna(value=pd.to_datetime(then).strftime('%Y-%m-%d'), inplace=True)
merged_df['status'] = 'live'
merged_df.columns = [c.replace('_','-') for c in merged_df.columns]
merged_df['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
merged_df['created-by'] = created_by
merged_df['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
merged_df['updated-by'] = 'etl-automation'
# =========================================================================
# append table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
s3.write_df_to_db(df=merged_df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# getting data from the table which was uploaded by the user
query= '''
select
*
from
"prod2-generico"."goodaid-incentive-rate-card" girc
where
date("created-at")= current_date
order by
"drug-id" asc '''
updated_incentives = rs_db.get_df(query)
number_of_incentives_updated = len(updated_incentives)
file_name= 'updated_goodaid_incentives_{}.csv'.format(dt.datetime.today().strftime('%Y-%m-%d'))
if number_of_incentives_updated > 0:
# Uploading the file to s3
updated_incentives = s3.save_df_to_s3(df=updated_incentives, file_name=file_name)
# Sending email
subject = ''' Goodaid Incentives Updated'''
mail_body = '''The {} drug_ids for which the incentive was added or updated are in the file attached please review it.
'''.format(number_of_incentives_updated)
file_uris = [updated_incentives]
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri)
# saving the file to archive which was uploaded by the user on retool
f_name = 'goodaid-incentive-rate/archive/incentive-rate-card_{}.csv'.format(dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S'))
s3.save_df_to_s3(df=df, file_name=f_name)
# deleting the file which was uploaded by the user on retool
uri = 's3://aws-glue-temporary-921939243643-ap-south-1/goodaid-incentive-rate/incentive-rate-card.csv'
s3.delete_s3_obj(uri=uri)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-incentive-update.py | goodaid-incentive-update.py |
import os
import sys
import argparse
import datetime as dt
import pandas as pd
import numpy as np
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]",
type=str, required=False)
args, unknown = parser.parse_known_args()
email_to = args.email_to
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'goodaid-incentive-rate-day'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# Import data
query = '''
select
"drug-id" ,
incentive ,
"incentive-start-date" ,
"incentive-end-date"
from
"prod2-generico"."prod2-generico"."goodaid-incentive-rate-card"
'''
data = rs_db.get_df(query)
logger.info(f"Base ratecard read with length {len(data)}")
logger.info(f"Unique drug-id count {data['drug-id'].nunique()}")
# Explode
# This step splits every drug-id, incentive for each day
# between incentive start date and incentive end date
data['rate_date'] = [pd.date_range(s, e, freq='d') for s, e in
zip(pd.to_datetime(data['incentive-start-date']),
pd.to_datetime(data['incentive-end-date']))]
data.columns = [c.replace('-', '_') for c in data.columns]
data_export = pd.DataFrame({'drug_id': data.drug_id.repeat(data.rate_date.str.len()),
'incentive': data.incentive.repeat(data.rate_date.str.len()),
'rate_date': np.concatenate(data.rate_date.values)})
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
logger.info(f"Exploded ratecard read with length {len(data_export)}")
logger.info(f"Unique drug-id count {data_export['drug-id'].nunique()}")
data_export['rate-date'] = data_export['rate-date'].dt.date
data_export['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['created-by'] = 'etl-automation'
data_export['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['updated-by'] = 'etl-automation'
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
data['Check_1'] = (data['incentive_end_date'] - data['incentive_start_date']).dt.days
x= sum(data.Check_1) + len(data)
diff = len(data_export) - x
if diff > 0:
# Sending email
subject = ''' Error in Goodaid Incentive Explode '''
mail_body = '''There is a error in goodaid ratecard explode please review it.'''
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-ratecard-explode.py | goodaid-ratecard-explode.py |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
from zeno_etl_libs.logger import get_logger
import argparse
import pandas as pd
import datetime as dt
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'goodaid-dfc'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
# =============================================================================
# Active Composition at warehouse
# =============================================================================
query = f'''
select
a."drug-id" , d.composition , d."composition-master-id"
from
"prod2-generico"."prod2-generico"."wh-sku-subs-master" a
inner join
"prod2-generico"."prod2-generico".drugs d on
d.id = a."drug-id"
where
d."company-id" = 6984
and a."add-wh" = 'Yes'
and d."type" != 'discontinued-products'
group by a."drug-id" , d.composition , d."composition-master-id" '''
ga_active_compositions= rs_db.get_df(query)
ga_active_compositions.columns = [c.replace('-', '_') for c in ga_active_compositions.columns]
g_drugs = tuple(map(int, list(ga_active_compositions['drug_id'].unique())))
ga_active_compositions = ga_active_compositions[~ga_active_compositions['composition_master_id'].isna()]
gaid_compositions = tuple(map(str, list(ga_active_compositions['composition_master_id'].apply(pd.to_numeric, errors='ignore').astype('Int64').unique())))
logger.info("Data: ga_active_compositions, and ga_active_drugs fetched successfully: " +str(len(ga_active_compositions)))
# =============================================================================
# Net sales of GAID Drugs
# =============================================================================
query_drug_level = '''
select
s."store-id" ,
s."drug-id" ,
d."drug-name" ,
max(gas."old-new-drug") as "goodaid-old-new-drugs",
d.composition ,
d."composition-master-id" ,
SUM(s."quantity") as "total_quantity",
SUM(s.rate * s."quantity") as "total_revenue",
MIN(b."opened-at") as "store_opened_at"
from
"prod2-generico"."prod2-generico".sales s
left join "prod2-generico"."prod2-generico".stores b
on
s."store-id" = b.id
left join "prod2-generico"."prod2-generico".drugs d
on
s."drug-id" = d.id
left join "prod2-generico"."prod2-generico"."goodaid-atc-sr" gas
on
s."drug-id" = gas."drug-id"
where
"bill-flag" = 'gross'
and s."drug-id" in {}
and s."company-id" = 6984
and DATE(s."created-at") >= CURRENT_DATE - interval '31days'
group by
s."store-id" ,
s."drug-id" ,
d."drug-name" ,
d."composition-master-id",
d.composition '''
drug_sales= rs_db.get_df(query_drug_level.format(g_drugs))
drug_sales.columns = [c.replace('-', '_') for c in drug_sales.columns]
logger.info("Data: drugs sales data fetched successfully: " + str(len(drug_sales)))
# =============================================================================
# Net sales of GAID compositions
# =============================================================================
query = '''
select
s."store-id" ,
d.composition ,
d."composition-master-id" ,
SUM(case
when s."company-id" = 6984 then (s."quantity")
else 0
end) as "goodaid_quantity",
SUM(case
when s."type" = 'ethical' then (s."quantity")
else 0
end) as "ethical_quantity",
SUM(case
when s."type" = 'generic' then (s."quantity")
else 0
end) as "total_generic_quantity",
SUM(case
when s."company-id" = 6984 then (s.rate * s."quantity")
else 0
end) as "goodaid_revenue_value",
SUM(case
when datediff(day, s."created-at", (current_date- 1)) <= 15 then s.quantity
else 0
end) as "total_quantity_15d",
SUM(case
when datediff(day, s."created-at", (current_date- 1)) <= 15 then (s.rate * s."quantity")
else 0
end) as "total_revenue_15d",
SUM(case
when
s."company-id" = 6984
and datediff(day, s."created-at", (current_date- 1)) <= 15
then
(s."quantity")
else 0
end) as "goodaid_quantity_15d",
SUM(case
when
s."type" = 'ethical'
and datediff(day, s."created-at", (current_date- 1)) <= 15
then
(s."quantity")
else 0
end) as "ethical_quantity_15d",
SUM(case
when
s."type" = 'generic'
and datediff(day, s."created-at", (current_date- 1)) <= 15
then
(s."quantity")
else 0
end) as "total_generic_quantity_15d",
SUM(case
when
s."company-id" = 6984
and datediff(day, s."created-at", (current_date- 1)) <= 15
then
(s.rate * s."quantity")
else 0
end) as "goodaid_revenue_value_15"
from
"prod2-generico"."prod2-generico".sales s
left join "prod2-generico"."prod2-generico".stores b
on
s."store-id" = b.id
left join "prod2-generico"."prod2-generico".drugs d
on
d.id = s."drug-id"
where
"bill-flag" = 'gross'
and d."composition-master-id" in {}
and DATE(s."created-at") >= CURRENT_DATE - interval '31days'
group by
s."store-id" ,
d."composition-master-id",
d.composition '''
first_purchase= rs_db.get_df(query.format(gaid_compositions))
first_purchase.columns = [c.replace('-', '_') for c in first_purchase.columns]
logger.info("Data: purchase data fetched successfully: " + str(len(first_purchase)))
first_purchase = drug_sales.merge(first_purchase,how = 'left', on = ['store_id','composition','composition_master_id'])
# =============================================================================
# Goodaid-comp-store first bill
# =============================================================================
query = """
select
"store-id" ,
s."drug-id" ,
min("created-at") as "ga_comp_first_bill"
from
"prod2-generico"."prod2-generico".sales s
where
s."drug-id" in {}
and "company-id" = 6984
and date("created-at")>= '2021-02-01'
group by
"store-id" ,
s."drug-id"
"""
comp_first_b= rs_db.get_df(query.format(g_drugs))
comp_first_b.columns= [c.replace('-','_') for c in comp_first_b.columns]
logger.info("Data: comp_first_b fetched successfully: " + str(len(comp_first_b)))
first_purchase = pd.merge(left=first_purchase, right=comp_first_b, on=['store_id', 'drug_id'], how='left')
def month_diff(a, b):
return 12 * (a.year - b.dt.year) + (a.month - b.dt.month)
first_purchase['store_age_months'] = month_diff(dt.datetime.now(), first_purchase['store_opened_at'])
first_purchase['store_type'] = np.where(first_purchase['store_age_months'] <= 3, 'new_store', 'old_store')
first_purchase['drug_age_days'] = (dt.datetime.now() - first_purchase['ga_comp_first_bill']).dt.days
first_purchase['drug_age_days'].fillna(0,inplace=True)
first_purchase['drug_type'] = np.where(first_purchase['drug_age_days'] <= 60, 'new_drug', 'old_drug')
# DFc calculation to be written post signoff from goodaid team
first_purchase['ga_share_30'] = first_purchase['total_quantity'] / (
first_purchase['total_generic_quantity']) * 100
first_purchase['ga_share_15'] = first_purchase['goodaid_quantity_15d'] / (
first_purchase['total_generic_quantity_15d']) * 100
first_purchase.replace([np.inf, -np.inf], 0, inplace=True)
first_purchase['ga_share_30'].fillna(0, inplace=True)
first_purchase['ga_share_15'].fillna(0, inplace=True)
# conditions = [
# (
# (first_purchase['store_type'] == 'old_store') &
# (first_purchase['drug_type'] == 'old_drug') &
# (first_purchase['ga_share_30'] >= 50)
# ),
# (
# (first_purchase['store_type'] == 'old_store') &
# (first_purchase['drug_type'] == 'old_drug') &
# (first_purchase['ga_share_30'] < 50)
# ),
# (
# (first_purchase['store_type'] == 'old_store') &
# (first_purchase['drug_type'] == 'new_drug') &
# (first_purchase['ga_share_15'] >= 50)
# ),
# (
# (first_purchase['store_type'] == 'old_store') &
# (first_purchase['drug_type'] == 'new_drug') &
# (first_purchase['ga_share_15'] < 50)
# ),
#
# (
# (first_purchase['store_type'] == 'new_store') &
# (first_purchase['drug_type'] == 'new_drug')
# ),
# (
# (first_purchase['store_type'] == 'new_store') &
# (first_purchase['drug_type'] == 'old_drug')
# )
# ]
# choices = [first_purchase['goodaid_quantity'] / 30,
# 0.5 * first_purchase['total_generic_quantity_15d'] / 15,
# first_purchase['goodaid_quantity_15d'] / 15,
# 0.5 * first_purchase['total_generic_quantity_15d'] / 15,
# first_purchase['total_generic_quantity_15d'] / 15,
# first_purchase['total_generic_quantity'] / 30]
conditions = [True]
choices = [first_purchase['goodaid_quantity'] / 30]
first_purchase['dfc_val'] = np.select(conditions, choices, default=0.01)
first_purchase['dfc_val'].fillna(0.01, inplace=True)
first_purchase['dfc_val'] = np.where(first_purchase['dfc_val'] == 0, 0.01, first_purchase['dfc_val'])
logger.info("Data: first_purchase table fetched successfully: " +str(len(first_purchase)))
goodaid_dfc=first_purchase
goodaid_dfc.columns= [c.replace('_', '-') for c in goodaid_dfc.columns]
goodaid_dfc['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_dfc['created-by'] = 'etl-automation'
goodaid_dfc['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_dfc['updated-by'] = 'etl-automation'
goodaid_dfc = goodaid_dfc.astype({'drug-age-days':'int'})
goodaid_dfc['goodaid-quantity'] = goodaid_dfc['goodaid-quantity'].fillna(0)
goodaid_dfc['ethical-quantity'] = goodaid_dfc['ethical-quantity'].fillna(0)
goodaid_dfc['total-generic-quantity'] = goodaid_dfc['total-generic-quantity'].fillna(0)
goodaid_dfc['total-quantity-15d'] = goodaid_dfc['total-quantity-15d'].fillna(0)
goodaid_dfc['goodaid-quantity-15d'] = goodaid_dfc['goodaid-quantity-15d'].fillna(0)
goodaid_dfc['ethical-quantity-15d'] = goodaid_dfc['ethical-quantity-15d'].fillna(0)
goodaid_dfc['total-generic-quantity-15d'] = goodaid_dfc['total-generic-quantity-15d'].fillna(0)
goodaid_dfc = goodaid_dfc.astype({'goodaid-quantity':'int',
'ethical-quantity':'int',
'total-generic-quantity':'int',
'total-quantity-15d':'int',
'goodaid-quantity-15d':'int',
'ethical-quantity-15d':'int',
'total-generic-quantity-15d':'int'})
logger.info("Data: goodaid_dfc table fetched successfully: " +str(len(goodaid_dfc)))
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db_write.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=goodaid_dfc[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-dfc.py | goodaid-dfc.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
from zeno_etl_libs.logger import get_logger
import argparse
import pandas as pd
import datetime as dt
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'goodaid-incentive-v3'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =============================================================================
# Stores Master
# =============================================================================
query = '''
select
id as "store_id",
"store-type" ,
city ,
store as "store_name",
"line-manager" ,
abo
from
"prod2-generico"."prod2-generico"."stores-master" sm '''
store_master = rs_db.get_df(query)
store_master.columns = [c.replace('-', '_') for c in store_master.columns]
logger.info('Data: store_master data fetched successfully: ' + str(len(store_master)))
# =============================================================================
# Existing Good-Aid composition
# =============================================================================
query = f'''
select
distinct(d.composition),d."composition-master-id"
from
"prod2-generico"."prod2-generico"."wh-sku-subs-master" a
inner join
"prod2-generico"."prod2-generico".drugs d on
d.id = a."drug-id"
where
d."company-id" = 6984
and a."add-wh" = 'Yes'
and d."type" = 'generic'
group by d.composition,d."composition-master-id" '''
ga_active_compositions = rs_db.get_df(query)
ga_active_compositions.columns = [c.replace('-', '_') for c in ga_active_compositions.columns]
g_composition = tuple(map(int, list(ga_active_compositions['composition_master_id'].unique())))
compositions = tuple([str(i) for i in ga_active_compositions['composition']])
logger.info('Data: ga_active_compositions fetched successfully: ' + str(len(ga_active_compositions)))
# =============================================================================
# Base data
# =============================================================================
query = '''
select
date(s."created-at") as "date",
s."created-at" as "date_time",
s."bill-id" ,
s."patient-id" ,
s."store-id" ,
s."drug-id" ,
d.company ,
d."company-id" ,
d.composition ,
d."composition-master-id" ,
(case when s."bill-flag"= 'gross' then 1 else -1 end) as "bill_flag",
sum(s.rate * s."net-quantity") as "sales",
sum(s."net-quantity") as "quantity",
sum(s."revenue-value") as "total revenue"
from
"prod2-generico"."prod2-generico".sales s
left join "prod2-generico"."prod2-generico".drugs d on s."drug-id" = d.id
where
d."company-id" = 6984
and d.composition is not null
and d."composition-master-id" is not null
group by
date(s."created-at") ,
(s."created-at") ,
s."bill-id" ,
s."patient-id" ,
s."store-id" ,
s."drug-id" ,
d.company ,
d."company-id" ,
d.composition ,
d."composition-master-id" ,
s."bill-flag" '''
base_data = rs_db.get_df(query)
base_data.columns = [c.replace('-', '_') for c in base_data.columns]
logger.info("Data: base data successfully fetched: " + str(base_data.shape))
base_data = pd.merge(left=base_data, right=store_master, on=['store_id'], how='left')
logger.info('Shape of base data is' + str(base_data.shape))
# =============================================================================
# Group Molecule
# =============================================================================
g_query = '''
select
cm."composition-master-id" as "composition-master-id",
LISTAGG( distinct cm."molecule-master-id" , '-' )
WITHIN GROUP (ORDER by cm."molecule-master-id" ) as "group_molecule",
listagg(distinct mm."name" ,'-' ) as "group_molecule_text"
from
"prod2-generico"."prod2-generico"."composition-master-molecules-master-mapping" cm
inner join "prod2-generico"."prod2-generico"."molecule-master" mm
on
cm."molecule-master-id" = mm.id
where
cm."composition-master-id" is not null
group by
cm."composition-master-id"
'''
group_molecule = rs_db.get_df(g_query)
logger.info("Data: group_molecule table fetched successfully" + str(group_molecule.shape))
group_molecule.columns = [c.replace('-', '_') for c in group_molecule.columns]
base_data = pd.merge(left=base_data, right=group_molecule, on=['composition_master_id'], how='left')
base_data_temp = base_data.copy()
logger.info('Shape of base data after joining with group molecule is :' + str(base_data.shape))
# =============================================================================
# Attributed Store, Order Mode, Attributed date
# =============================================================================
query = '''
select
"bill-id" ,
"store-id" as "attributed_store",
date("created-at") as "attributed_date",
(case
when "ecom-flag" = 1 then 'Ecom'
else 'Non-Ecom'
end ) as "order_source"
from
"prod2-generico"."prod2-generico".sales s
where
"company-id" = 6984
and "bill-flag" = 'gross'
group by
1,
2,
3,
4 '''
attributed_store = rs_db.get_df(query)
attributed_store.columns = [c.replace('-', '_') for c in attributed_store.columns]
logger.info('Shape of base_data for attributed_bill :' + str(attributed_store.shape))
logger.info('Number of unique bills :' + str(attributed_store['bill_id'].nunique()))
base_data = pd.merge(left=base_data, right=attributed_store, on=['bill_id'], how='left')
logger.info('Shape of base_data after joining attributed_bill :' + str(base_data.shape))
# =============================================================================
# Goodaid incentive day-wise rate card
# =============================================================================
query = '''
select
"drug-id" ,
"rate-date" ,
incentive
from
"prod2-generico"."prod2-generico"."goodaid-incentive-rate-day" '''
rate_card = rs_db.get_df(query)
rate_card.columns = [c.replace('-', '_') for c in rate_card.columns]
logger.info("Data: rate card data successfully fetched: " + str(rate_card.shape))
base_data = pd.merge(left=base_data, right=rate_card, left_on=['drug_id', 'attributed_date'],
right_on=['drug_id', 'rate_date'], how='left')
logger.info('Shape of base data after joining rate card is :' + str(base_data.shape))
base_data['incentive'].fillna(0, inplace=True)
# =============================================================================
# Condition to fetch max incentive amongst the same molecule purchased in a bill
# =============================================================================
base_data_agg = base_data.groupby(['attributed_store', 'patient_id', 'group_molecule', 'group_molecule_text',
'order_source',
'date', 'date_time', 'bill_id', 'bill_flag'],
as_index=False).agg({'quantity': ['sum'], 'sales': ['sum'],
'incentive': ['max']}
).reset_index(drop=True)
base_data_agg.columns = ["_".join(x) for x in base_data_agg.columns.ravel()]
base_data_agg.columns = base_data_agg.columns.str.rstrip('_x')
base_sort = base_data_agg.sort_values(by=['attributed_store', 'patient_id', 'group_molecule',
'group_molecule_text', 'date', 'date_time', 'bill_id', ], ascending=True)
# =============================================================================
# Condition to keep only 1st instance from patient-group_molecule-bill return history
# =============================================================================
return_bill = base_sort[base_sort['bill_flag'] == -1].drop_duplicates(
subset=['patient_id', 'group_molecule', 'bill_id'], keep='first')
logger.info('Shape of return_bill is :' + str(return_bill.shape))
gross_bill = base_sort[base_sort['bill_flag'] == 1]
logger.info('Shape of gross_bill is :' + str(gross_bill.shape))
inc_metadata = gross_bill.append(return_bill)
logger.info('Shape of inc_metadata after appending gross+return is :' + str(inc_metadata.shape))
inc_metadata = inc_metadata.sort_values(by=['patient_id',
'group_molecule', 'group_molecule_text', 'date', 'date_time', 'bill_id'],
ascending=True)
inc_metadata.drop_duplicates(keep='first', inplace=True)
logger.info('Shape of inc_meadata after dropping duplicates is :' + str(inc_metadata.shape))
# =============================================================================
# logic to calculate patient-group_molecule bill rank
# =============================================================================
inc_metadata['cum_sum'] = inc_metadata.groupby(['patient_id',
'group_molecule'])['bill_flag'].cumsum()
# To extract the previous cumulated sum instance
inc_metadata['prev_cum_sum'] = inc_metadata.groupby(['patient_id',
'group_molecule'])['cum_sum'].shift(1)
inc_metadata['prev_cum_sum'].fillna(0, inplace=True)
inc_metadata['cum_sum_old'] = 0 # Can be commented once the job runs for limited time period
inc_metadata['cum_sum_final'] = inc_metadata['cum_sum'] + inc_metadata['cum_sum_old']
# =============================================================================
# Applying condition for eligible incentive
# =============================================================================
conditions = [
(
(inc_metadata['cum_sum_final'] == 1) &
(inc_metadata['prev_cum_sum'] == 0)
),
(
(inc_metadata['cum_sum_final'] == 0)
)
]
choices = ['achieved', 'deduct']
inc_metadata['incentive_flag'] = np.select(conditions, choices, default='no_opportunity')
inc_metadata = pd.merge(left=inc_metadata, right=store_master, right_on=['store_id'], left_on=['attributed_store'],
how='left')
logger.info('Shape of inc_meadata after joining stores_master :' + str(inc_metadata.shape))
# Fetch the cases where incentive is not tagged
zero_incentive = inc_metadata[(inc_metadata['incentive_flag'] == 'achieved') & (inc_metadata['incentive_ma'] == 0)]
logger.info('Shape of zero_incentive data :' + str(zero_incentive.shape))
# =============================================================================
# Adding composition in goodaid_incentive_v3
# =============================================================================
base_data_comp = base_data_temp[['bill_id', 'patient_id', 'group_molecule', 'composition', 'composition_master_id']]
base_data_comp = base_data_comp.sort_values(by=['bill_id', 'patient_id', 'group_molecule',
'composition_master_id'], ascending=True)
logger.info('Shape of base_data_comp :' + str(base_data_comp.shape))
# Extracting the first composition from patient-bill-group_molecule
base_data_comp = base_data_comp.groupby(['bill_id', 'patient_id', 'group_molecule']).first().reset_index()
logger.info('Shape of base_data_comp after extracting unique composition :' + str(base_data_comp.shape))
inc_metadata = pd.merge(left=inc_metadata, right=base_data_comp, on=['bill_id', 'patient_id', 'group_molecule'],
how='left')
logger.info('Shape of inc_metadata data after merging with base_data_comp :' + str(inc_metadata.shape))
# Creating a copy of inc_metadata to sync with output table name
goodaid_incentive_v3 = inc_metadata.copy()
logger.info('Shape of goodaid_incentive_v3 data :' + str(goodaid_incentive_v3.shape))
goodaid_incentive_v3['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_incentive_v3['created-by'] = 'etl-automation'
goodaid_incentive_v3['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_incentive_v3['updated-by'] = 'etl-automation'
goodaid_incentive_v3.columns = [c.replace('_', '-') for c in goodaid_incentive_v3.columns]
logger.info('Shape of goodaid_incentive_v3 data :' + str(goodaid_incentive_v3.shape))
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=goodaid_incentive_v3[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'goodaid-opportunity'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =============================================================================
# Total opportunity store level aggregated and avg selling qty
# =============================================================================
query = f'''
select
b."store-id" ,
count(distinct concat(b."patient-id", q1."group_molecule")) as "total_opportunity",
round(avg(a.quantity)) as "avg_qty"
from
"prod2-generico"."prod2-generico"."bill-items-1" a
left join "prod2-generico"."prod2-generico"."bills-1" b on
b.id = a."bill-id"
left join "prod2-generico"."prod2-generico"."inventory-1" c on
c.id = a."inventory-id"
left join "prod2-generico"."prod2-generico".drugs d on
d.id = c."drug-id"
left join "prod2-generico"."prod2-generico"."patients-metadata-2" pm on
pm."id" = b."patient-id"
inner join ({g_query}) q1 on
q1."composition-master-id" = d."composition-master-id"
where
(d."composition-master-id" in {g_composition}
or d."company-id" = 6984)
and DATE(pm."last-bill-date") >= date(date_trunc('month', current_date) - interval '3 month')
group by
1 '''
opportunity = rs_db.get_df(query)
opportunity.columns = [c.replace('-', '_') for c in opportunity.columns]
logger.info('Shape of opportunity data :' + str(opportunity.shape))
# =============================================================================
# opportunity achieved
# =============================================================================
query = f'''
select
b."store-id" ,
count(distinct concat(b."patient-id", q1."group_molecule")) as "total_opportunity_achieved"
from
"prod2-generico"."prod2-generico"."bill-items-1" a
left join "prod2-generico"."prod2-generico"."bills-1" b on
b.id = a."bill-id"
left join "prod2-generico"."prod2-generico"."inventory-1" c on
c.id = a."inventory-id"
left join "prod2-generico"."prod2-generico".drugs d on
d.id = c."drug-id"
left join "prod2-generico"."prod2-generico"."patients-metadata-2" pm on
pm."id" = b."patient-id"
inner join ({g_query}) q1 on
q1."composition-master-id" = d."composition-master-id"
where
d."composition-master-id" in {g_composition}
and d."company-id" = 6984
and DATE(pm."last-bill-date") >= date(date_trunc('month', current_date) - interval '3 month')
group by
1 '''
opportunity_ach = rs_db.get_df(query)
opportunity_ach.columns = [c.replace('-', '_') for c in opportunity_ach.columns]
logger.info('Shape of opportunity_ach data :' + str(opportunity_ach.shape))
goodaid_opportunity = pd.merge(left=opportunity, right=opportunity_ach, on=['store_id'], how='left')
logger.info('Shape of goodaid_opportunity data after merging opp and opp_achieved :' + str(goodaid_opportunity.shape))
goodaid_opportunity = pd.merge(left=goodaid_opportunity, right=store_master, on=['store_id'], how='left')
logger.info('Shape of goodaid_opportunity data after joining stores_master :' + str(goodaid_opportunity.shape))
goodaid_opportunity['created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_opportunity['created-by'] = 'etl-automation'
goodaid_opportunity['updated-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_opportunity['updated-by'] = 'etl-automation'
goodaid_opportunity.columns = [c.replace('_', '-') for c in goodaid_opportunity.columns]
logger.info('Shape of goodaid_oppurtunity data :' + str(goodaid_opportunity.shape))
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=goodaid_opportunity[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-incentive-v3.py | goodaid-incentive-v3.py |
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
import argparse
import pandas as pd
import datetime
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected],"
"[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
query = f'''
select
a."drug-id" ,
d.composition ,
d."drug-name"
from
"prod2-generico"."prod2-generico"."wh-sku-subs-master" a
left join "prod2-generico"."prod2-generico".drugs d on
a."drug-id" = d.id
left join
"prod2-generico"."prod2-generico"."goodaid-incentive-rate-card" b on
a."drug-id" = b."drug-id"
where
b."drug-id" is null
and d."company-id" = 6984
and a."add-wh" = 'Yes'
group by
a."drug-id" ,
d.composition,
d."drug-name"
'''
new_drugs =rs_db.get_df(query)
new_drugs.columns = [c.replace('-', '_') for c in new_drugs.columns]
run_date = str(datetime.datetime.now().date())
file_name = 'Goodaid_ATC_SR_and_Incentive_Mapping_{}.csv'.format(str(run_date))
no_of_drugs = len(new_drugs)
logger.info('Total number of miss drugs are {}'.format(no_of_drugs))
drug_names= new_drugs.drug_name.unique()
logger.info('Unique missed drugs are {}'.format(drug_names))
if no_of_drugs > 0:
# Uploading the file to s3
new_drugs = s3.save_df_to_s3(df=new_drugs, file_name=file_name)
# Sending email
subject = ''' Goodaid ATC SR and Incentive Mapping'''
mail_body = '''Provide GoodAid Incentive mapping for -{} Drugs
'''.format(no_of_drugs)
file_uris = [new_drugs]
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/ga-wh-incentive-miss-automailer.py | ga-wh-incentive-miss-automailer.py |
import os
import sys
import argparse
import datetime as dt
import pandas as pd
import numpy as np
sys.path.append('../../../..')
from dateutil.tz import gettz
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'goodaid-daily-store-opportunity'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# =============================================================================
# Existing Good-Aid composition
# =============================================================================
query = f'''
select
distinct(d.composition),d."composition-master-id"
from
"prod2-generico"."prod2-generico"."wh-sku-subs-master" a
inner join
"prod2-generico"."prod2-generico".drugs d on
d.id = a."drug-id"
where
d."company-id" = 6984
and a."add-wh" = 'Yes'
and d."type" = 'generic'
group by d.composition,d."composition-master-id" '''
ga_active_compositions= rs_db.get_df(query)
ga_active_compositions.columns = [c.replace('-', '_') for c in ga_active_compositions.columns]
g_composition = tuple(map(int, list(ga_active_compositions['composition_master_id'].unique())))
compositions = tuple([str(i) for i in ga_active_compositions['composition']])
logger.info('Data: ga_active_compositions fetched successfully: ' +str(len(ga_active_compositions)))
# =============================================================================
# Stores Master
# =============================================================================
query= '''
select
id as "store_id",
"store-type" ,
city ,
store as "store_name",
"line-manager" ,
abo
from
"prod2-generico"."prod2-generico"."stores-master" sm '''
store_master= rs_db.get_df(query)
store_master.columns = [c.replace('-', '_') for c in store_master.columns]
logger.info('Data: store_master data fetched successfully: ' +str(len(store_master)))
# =============================================================================
# Group Molecule
# =============================================================================
g_query= '''
select
cm."composition-master-id" as "composition-master-id",
LISTAGG( distinct cm."molecule-master-id" , '-' )
WITHIN GROUP (ORDER by cm."molecule-master-id" ) as "group_molecule",
listagg(distinct mm."name" ,'-' ) as "group_molecule_text"
from
"prod2-generico"."prod2-generico"."composition-master-molecules-master-mapping" cm
inner join "prod2-generico"."prod2-generico"."molecule-master" mm
on
cm."molecule-master-id" = mm.id
where
cm."composition-master-id" is not null
group by
cm."composition-master-id"
'''
group_molecule = rs_db.get_df(g_query)
group_molecule.columns= [c.replace ('-','_') for c in group_molecule.columns]
group_molecule.sort_values(by='group_molecule', ascending=True, inplace= True)
group_molecule.reset_index(inplace= True, drop = True)
logger.info("Data: group_molecule table fetched successfully" +str(group_molecule.shape))
# =============================================================================
# Extract store-wise day wise actual opportunity based on billing
# =============================================================================
query = f'''
select
date(s."created-at") as "date",
s."store-id" ,
q1."group_molecule",
q1."group_molecule_text",
count(distinct concat(s."patient-id", q1."group_molecule")) as "total_opportunity"
from
"prod2-generico"."prod2-generico".sales s
inner join ({g_query}) q1
on
q1."composition-master-id" = s."composition-master-id"
where
(s."composition-master-id" in {g_composition}
or s."company-id" = 6984)
and date(s."created-at") >= DATEADD(month, -3, GETDATE())
and "bill-flag" = 'gross'
group by
1,2,3,4 '''
goodaid_daily_store_opportunity = rs_db.get_df(query)
goodaid_daily_store_opportunity.columns = [c.replace('-', '_')
for c in goodaid_daily_store_opportunity.columns]
logger.info('Shape of goodaid_daily_store_opportunity data :'
+ str(goodaid_daily_store_opportunity.shape))
# =============================================================================
# Extracting composition wise opportunity
# =============================================================================
query = f'''
select
date(s."created-at") as "date",
s."store-id" ,
q1."group_molecule",
s.composition ,
s."composition-master-id" ,
count(distinct concat(concat(s."patient-id", q1."group_molecule"), s."composition-master-id")) as "total_opportunity"
from
"prod2-generico"."prod2-generico".sales s
inner join ({g_query}) q1
on
q1."composition-master-id" = s."composition-master-id"
where
(s."composition-master-id" in {g_composition}
or s."company-id" = 6984)
and date(s."created-at") >= DATEADD(month, -3, GETDATE())
and "bill-flag" = 'gross'
group by
1,2,3,4,5 '''
goodaid_daily_opp_comp = rs_db.get_df(query)
goodaid_daily_opp_comp.columns = [c.replace('-', '_') for c in goodaid_daily_opp_comp.columns]
logger.info('Shape of goodaid_daily_opp_comp data :' + str(goodaid_daily_opp_comp.shape))
# =============================================================================
# Extrating total_opportunity at date-store_group_molecule
# =============================================================================
goodaid_composition = goodaid_daily_opp_comp.groupby\
(['date', 'store_id', 'group_molecule'],as_index=False).agg\
({"total_opportunity": "sum"}).reset_index(drop=True)
goodaid_composition.rename({'total_opportunity': 'aggregate_total_opportunity'},
axis=1, inplace=True)
goodaid_daily_opp_comp = pd.merge\
(left=goodaid_daily_opp_comp, right=goodaid_composition,
on=['date', 'store_id', 'group_molecule'], how='left')
logger.info('Shape of goodaid_daily_opp_comp data after joining goodaid_composition :'
+ str(goodaid_daily_opp_comp.shape))
#Creating an multiplier field
goodaid_daily_opp_comp['multiplier'] =\
goodaid_daily_opp_comp['total_opportunity']/\
goodaid_daily_opp_comp['aggregate_total_opportunity']
goodaid_daily_opp_comp=goodaid_daily_opp_comp[['date', 'store_id', 'group_molecule',
'composition','composition_master_id','multiplier']]
goodaid_daily_store_opportunity = pd.merge\
(right=goodaid_daily_opp_comp, left=goodaid_daily_store_opportunity,
on=['date', 'store_id', 'group_molecule'], how='left')
goodaid_daily_store_opportunity['actaul_total_opp'] = \
goodaid_daily_store_opportunity['total_opportunity'] * goodaid_daily_store_opportunity[
'multiplier']
goodaid_daily_store_opportunity['actaul_total_opp']=\
np.ceil(goodaid_daily_store_opportunity['actaul_total_opp'])
logger.info('Shape of goodaid_daily_store_opportunity data after joining comp_opp :'
+ str(goodaid_daily_store_opportunity.shape))
goodaid_daily_store_opportunity = pd.merge(left=goodaid_daily_store_opportunity, right=store_master,
on=['store_id'], how='left')
logger.info('Shape of goodaid_daily_store_opportunity data after joining stores_master :'
+ str(goodaid_daily_store_opportunity.shape))
goodaid_daily_store_opportunity['created-at'] = \
dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_daily_store_opportunity['created-by'] = 'etl-automation'
goodaid_daily_store_opportunity['updated-at'] = \
dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
goodaid_daily_store_opportunity['updated-by'] = 'etl-automation'
goodaid_daily_store_opportunity.columns = [c.replace('_','-')
for c in goodaid_daily_store_opportunity.columns]
logger.info('Shape of goodaid_opportunity data :' + str(goodaid_daily_store_opportunity.shape))
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=goodaid_daily_store_opportunity[table_info['column_name']],
table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-daily-store-opportunity.py | goodaid-daily-store-opportunity.py |
# @Purpose: To replicate the table from warehouse db to redshift
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.db.db import MSSql
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.email.email import Email
import argparse
import traceback
import pandas as pd
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-eet', '--error_email_to', default="NA", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
error_email_to = args.error_email_to
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
email = Email()
schema = 'prod2-generico'
# from bhiwandi wh
# SalePurchase table
mssql = MSSql(connect_via_tunnel=False)
cnxn = mssql.open_connection()
cursor = cnxn.cursor()
query = ''' SELECT * FROM Salepurchase2 WHERE Vtype not in ('SB') '''
try:
salepur_bhw = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
salepur_bhw['warehouse'] = 'BHW'
salepur_bhw['warehouseId'] = 199
logger.info("Data from BHW acquired: " + str(len(salepur_bhw)))
# from Goodaid wh
mssql_ga = MSSql(connect_via_tunnel=False, db='Esdata_WS_2')
cnxn_ga = mssql_ga.open_connection()
cursor_ga = cnxn_ga.cursor()
query = '''
SELECT * FROM Salepurchase2 WHERE Vtype not in ('SB')'''
try:
salepur_ga = pd.read_sql(query, cnxn_ga)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
salepur_ga['warehouse'] = 'GA'
salepur_ga['warehouseId'] = 343
logger.info("Data from GAW acquired: " + str(len(salepur_ga)))
# From TEPL
mssql_tepl = MSSql(connect_via_tunnel=False, db='Esdata_TEPL')
cnxn_tepl = mssql_tepl.open_connection()
cursor = cnxn_tepl.cursor()
query = ''' SELECT * FROM SalePurchase2 sp '''
try:
salepur_tepl = pd.read_sql(query, cnxn_tepl)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
salepur_tepl['warehouse'] = 'TEPL'
salepur_tepl['warehouseId'] = 342
logger.info("Data from TEPLW acquired: " + str(len(salepur_tepl)))
# From older db salepurchase 2
#mssql_old = MSSql(connect_via_tunnel=False, db='Esdata2122')
#cnxn_old = mssql_old.open_connection()
#cursor = cnxn_old.cursor()
#query = ''' SELECT * FROM SalePurchase2 sp where Vtype not in ('SB') '''
#try:
#salepur_old = pd.read_sql(query, cnxn_old)
#except Exception as error:
#helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
#salepur_old['warehouse'] = 'BHW'
#salepur_old['warehouseId'] = 199
#logger.info("Data from BHW acquired: " + str(len(salepur_tepl)))
# concating the above dataframes
df_new = pd.concat([salepur_bhw, salepur_ga, salepur_tepl]).reset_index(drop=True)
df_new[['scm1', 'scm2', 'Gacno', 'Sman', 'Area', 'route', 'CodeCent', 'ChlnSrlno',
'RefVno', 'SRGGVno', 'SBPsrlno', 'CompCode', 'mState', 'PsTypeTOM', 'ICase',
'IBox', 'ILoose', 'PorderNo', 'StockLocation']] \
= df_new[['scm1', 'scm2', 'Gacno', 'Sman', 'Area', 'route', 'CodeCent', 'ChlnSrlno',
'RefVno', 'SRGGVno', 'SBPsrlno', 'CompCode', 'mState', 'PsTypeTOM', 'ICase',
'IBox', 'ILoose', 'PorderNo', 'StockLocation']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
df_new.columns = df_new.columns.str.lower()
logger.info("Data from both BHW and GAW concatenated: " + str(len(df_new)))
# def main(rs_db, s3):
table_name='salepurchase2'
table_name_temp = 'salepurchase2_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name_temp} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=df_new[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}" where
date(vdt)>= '2022-04-01'; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
logger.info(f"Table:{table_name} table uploaded")
df_new.drop(df_new.index, inplace=True)
# salepurchase1 table
# SP1 from BHW
query = '''
SELECT * FROM Salepurchase1 s WHERE Vtyp not in ('SB') '''
try:
salepur1_bhw = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
salepur1_bhw['warehouse'] = 'BHW'
salepur1_bhw['warehouseId'] = 199
logger.info("Data from BHW acquired: " + str(len(salepur1_bhw)))
# SP1 from GAW
query = '''
SELECT * FROM Salepurchase1 s WHERE Vtyp not in ('SB') '''
try:
salepur1_ga = pd.read_sql(query, cnxn_ga)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
salepur1_ga['warehouse'] = 'GA'
salepur1_ga['warehouseId'] = 343
logger.info("Data from GAW acquired: " + str(len(salepur1_ga)))
# SP1 from TEPLW
query = ''' SELECT * FROM Salepurchase1 s '''
try:
salepur1_tepl = pd.read_sql(query, cnxn_tepl)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
salepur1_tepl['warehouse'] = 'TEPL'
salepur1_tepl['warehouseId'] = 342
logger.info("Data from TEPLW acquired: " + str(len(salepur1_tepl)))
# SP1 form old BHW
#query = '''
#SELECT * FROM Salepurchase1 s WHERE Vtyp not in ('SB') '''
#try:
#salepur1_bhw_old = pd.read_sql(query, cnxn_old)
#except Exception as error:
#helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
#salepur1_bhw_old['warehouse'] = 'BHW'
#salepur1_bhw_old['warehouseId'] = 199
#logger.info("Data from BHW acquired: " + str(len(salepur1_bhw)))
salepurchase1 = pd.concat([salepur1_bhw, salepur1_ga, salepur1_tepl]).reset_index(drop=True)
salepurchase1[['SyncNo', 'StndVno', 'BillTocode']] = salepurchase1[
['SyncNo', 'StndVno', 'BillTocode']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
salepurchase1.columns = salepurchase1.columns.str.lower()
# def main(rs_db, s3):
table_name = 'salepurchase1'
table_name_temp = 'salepurchase1_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name_temp} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=salepurchase1[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}" where
date(vdt)>= '2022-04-01'; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
salepurchase1.drop(salepurchase1.index, inplace=True)
# Fifo from bhiwandi
query = '''
SELECT * FROM FIFO f '''
try:
fifo_bhw = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
fifo_bhw['warehouse'] = 'BHW'
fifo_bhw['warehouseId'] = 199
logger.info("FIFO Data from BHW acquired: " + str(len(fifo_bhw)))
# Fifo from GA warehouse
query = '''
SELECT * FROM FIFO f '''
try:
fifo_ga = pd.read_sql(query, cnxn_ga)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
fifo_ga['warehouse'] = 'GA'
fifo_ga['warehouseId'] = 343
logger.info("FIFO Data from GAW acquired: " + str(len(fifo_ga)))
# fifo from tepl warehouse
query = '''
SELECT * FROM FIFO f '''
try:
fifo_tepl = pd.read_sql(query, cnxn_tepl)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
fifo_tepl['warehouse'] = 'TEPL'
fifo_tepl['warehouseId'] = 342
logger.info("FIFO Data from GAW acquired: " + str(len(fifo_tepl)))
fifo = pd.concat([fifo_bhw, fifo_ga, fifo_tepl]).reset_index(drop=True)
fifo[['ScmOfferNo', 'WUCode', 'PsrlnoGDNTrf', 'StockLocation', 'SyncNo']] \
= fifo[['ScmOfferNo', 'WUCode', 'PsrlnoGDNTrf', 'StockLocation', 'SyncNo']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
fifo.columns = fifo.columns.str.lower()
logger.info("FIFO Data from both GA and BHW acquired: " + str(len(fifo)))
# def main(rs_db, s3):
table_name = 'fifo'
table_name_temp='fifo_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name_temp} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=fifo[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name_temp} table uploaded")
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
fifo.drop(fifo.index, inplace=True)
# acknow table from BHW , GAW and TEPL
# acknow from BHW
query = '''
SELECT * FROM Acknow a '''
try:
acknow_bhw = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
acknow_bhw['warehouse'] = 'BHW'
acknow_bhw['warehouseId'] = 199
logger.info("Acknow Data from BHW acquired: " + str(len(acknow_bhw)))
# acknow from GAW
query = '''
SELECT * FROM Acknow a '''
try:
acknow_ga = pd.read_sql(query, cnxn_ga)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
acknow_ga['warehouse'] = 'GA'
acknow_ga['warehouseId'] = 343
logger.info("Acknow Data from GAW acquired: " + str(len(acknow_ga)))
# acknow from TEPL warehouse
query = '''
SELECT * FROM Acknow a '''
try:
acknow_tepl = pd.read_sql(query, cnxn_tepl)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
acknow_tepl['warehouse'] = 'TEPL'
acknow_tepl['warehouseId'] = 342
logger.info("Acknow Data from GAW acquired: " + str(len(acknow_ga)))
# acknow from BHW
#query = '''
#SELECT * FROM Acknow a '''
#try:
#acknow_bhw_old = pd.read_sql(query, cnxn_old)
#except Exception as error:
#helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
#acknow_bhw_old['warehouse'] = 'BHW'
#acknow_bhw_old['warehouseId'] = 199
#logger.info("Acknow Data from BHW acquired: " + str(len(acknow_bhw_old)))
acknow = pd.concat([acknow_bhw, acknow_ga, acknow_tepl]).reset_index(drop=True)
logger.info("Acknow Data from after combining: " + str(len(acknow)))
acknow.columns = acknow.columns.str.lower()
# def main(rs_db, s3):
table_name = 'acknow'
table_name_temp='acknow_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=acknow[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name_temp} table uploaded")
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}" where
date(vdt)>= '2022-04-01'; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
acknow.drop(acknow.index, inplace=True)
# Getting item, master and acm from BHW warehouse
query = '''
select * from Item '''
try:
item = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
item[['Saltcode', 'IucCode', 'SyncIdMod', 'SyncNo']] = item[
['Saltcode', 'IucCode', 'SyncIdMod', 'SyncNo']] \
.apply(pd.to_numeric, errors='ignore').astype('Int64')
item.columns = item.columns.str.lower()
logger.info("Item Data from BHW acquired: " + str(len(item)))
# def main(rs_db, s3):
table_name = 'item'
table_name_temp = 'item_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name_temp} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=item[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name_temp} table uploaded")
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
item.drop(item.index, inplace=True)
# ACM table
query = '''
select * from Acm '''
try:
acm = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
acm[['BillToCode', 'SyncNo']] = acm[['BillToCode', 'SyncNo']].apply(pd.to_numeric,
errors='ignore').astype('Int64')
acm.columns = acm.columns.str.lower()
logger.info("acm Data from BHW acquired: " + str(len(acm)))
# def main(rs_db, s3):
table_name = 'acm'
table_name_temp='acm_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name_temp} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=acm[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name_temp} table uploaded")
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
# from Master table
query = '''
select * from Master '''
try:
master = pd.read_sql(query, cnxn)
except Exception as error:
helper.log_or_email_error(logger=logger, exception=error, email_to=error_email_to)
master[['SyncNo']] = master[['SyncNo']].apply(pd.to_numeric, errors='ignore').astype('Int64')
master.columns = master.columns.str.lower()
logger.info("master Data from BHW acquired: " + str(len(master)))
# def main(rs_db, s3):
table_name = 'master'
table_name_temp='master_temp'
table_info = helper.get_table_info(db=rs_db, table_name=table_name_temp, schema=schema)
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name_temp} do not exist, create the table first")
else:
print(f"Table:{table_name_temp} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name_temp}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name_temp} table truncated")
s3.write_df_to_db(df=master[table_info['column_name']], table_name=table_name_temp, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
rs_db.execute(query="begin ;")
rs_db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query=f'''
insert into "{schema}"."{table_name}" select * FROM "{schema}"."{table_name_temp}"
'''
rs_db.execute(query=query)
""" committing the transaction """
rs_db.execute(query=" end; ")
# closing the DB connection in the end
rs_db.close_connection()
mssql.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/supplychain.py | supplychain.py |
import os
import sys
import argparse
import pandas as pd
import datetime
import numpy as np
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
# def main(rs_db, s3):
schema = 'prod2-generico'
table_name = 'inventory-ga'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
#Getting the data
#Active GoodAid drugs
query = f'''
SELECT
d.id as "drug_id",
d."composition-master-id" as "composition_master_id",
d.composition
FROM
"prod2-generico"."prod2-generico"."inventory-1" i
join "prod2-generico"."prod2-generico".drugs d on
i."drug-id" = d.id
WHERE
d."company-id" = 6984
and i."created-at" <= current_date
group by
d."composition-master-id" ,
d.composition,
d.id '''
ga_active= rs_db.get_df(query)
logger.info("Data: ga_active compositions fetched")
gaid_compositions = tuple(map(int, list(ga_active['composition_master_id'].unique())))
# Store master
query = f'''
select
id as "store-id",
store as "store-name",
"store-manager" ,
"line-manager" ,
abo
from
"prod2-generico"."prod2-generico"."stores-master" sm '''
store_master = rs_db.get_df(query)
logger.info("Data: got stores master data successfully")
# current inventory
inv = '''
select
a."store-id" ,
a."drug-id" ,
b."drug-name" ,
b."type" ,
b.category ,
b.company ,
b.composition ,
b."composition-master-id" ,
(c.min) as "min",
(c."safe-stock") as "safe-stock",
(c.max) as "max",
SUM(a.quantity + a."locked-for-check" + a."locked-for-audit" +
a."locked-for-return" + a."locked-for-transfer") as "current-inventory",
SUM(a."locked-quantity") as "in-transit",
SUM((a.quantity + a."locked-for-check" + a."locked-for-audit" +
a."locked-for-return" + a."locked-for-transfer") * a.ptr) as "value"
from
"prod2-generico"."prod2-generico"."inventory-1" a
join "prod2-generico"."prod2-generico".drugs b on
a."drug-id" = b.id
left join
"prod2-generico"."prod2-generico"."drug-order-info" c on
c."store-id" = a."store-id"
and c."drug-id" = b.id
where
b."composition-master-id" in {}
group by
a."store-id" ,
a."drug-id" ,
b."drug-name" ,
b."type" ,
b.category ,
b.composition ,
b.company,
b."composition-master-id",
c.min ,
c."safe-stock" ,
c.max'''
inventory= rs_db.get_df(inv.format(gaid_compositions))
logger.info("Inventory table successfully fetched")
inventory['goodaid-flag'] = np.where(inventory['company'] == 'GOODAID',
'GoodAid', 'Non-GoodAid')
inventory['ss-status'] = np.where(inventory['max'] > 0, 'ss_set', 'ss_not_set')
conditions = [
(
(inventory['current-inventory'] <= inventory['safe-stock']) &
(inventory['current-inventory'] > 0)
),
(
(inventory['current-inventory'] > inventory['safe-stock'])
),
(
inventory['current-inventory'] <= 0
)
]
choices = ['inv_less_than_ss', 'inv_more_than_ss', 'not_in_inventory']
inventory['inventory-flag'] = np.select(conditions, choices, default='not_in_inventory')
inventory_ga = pd.merge(left=inventory, right=store_master, on=['store-id'], how='left')
logger.info("Data: inventory_ga table fetched successfully")
inventory_ga['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
inventory_ga['created-by'] = 'etl-automation'
inventory_ga['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
inventory_ga['updated-by'] = 'etl-automation'
inventory_ga.columns = [c.replace('_', '-') for c in inventory_ga.columns]
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
logger.info(f"Table:{table_name} table truncated")
s3.write_df_to_db(df=inventory_ga[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
logger.info(f"Table:{table_name} table uploaded")
####
# def main(rs_db, s3):
table_name = 'order-status-ga'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# order status
o_status= f'''
select
a.id as "short-book-id",
a."store-id" ,
a."drug-id" ,
a."drug-name" ,
c."type" ,
c.company ,
c.composition ,
a.quantity ,
a."required-quantity" ,
a.status ,
a."created-at" as "short-book-created-at",
a."dispatched-at" as "short-book-dispatched-at" ,
a."received-at" as "short-book-received-at" ,
a."auto-short" ,
a."patient-id"
from
"prod2-generico"."prod2-generico"."short-book-1" a
left join
"prod2-generico"."prod2-generico".drugs c on
c.id = a."drug-id"
where
DATE(a."created-at") >= DATEADD(month, -2, GETDATE())
and c."company-id" = 6984 '''
order_status= rs_db.get_df(o_status)
logger.info("Data: order_status df fetched successfully")
order_status['goodaid-flag'] = np.where(order_status['company'] == 'GOODAID',
'GoodAid', 'Non-GoodAid')
def get_status(order_status):
if (order_status['auto-short'] == 1) & (order_status['patient-id'] == 4480):
return 'auto-short'
elif (order_status['auto-short'] == 1) & (order_status['patient-id'] != 4480):
return 'manual-short'
order_status['order-type'] = order_status.apply(get_status, axis=1)
del order_status['auto-short']
del order_status['patient-id']
order_status['ff-hours'] = (pd.to_datetime(order_status['short-book-dispatched-at'], errors='coerce') -
order_status['short-book-created-at']) / np.timedelta64(1, 'h')
order_status_ga = pd.merge(left=order_status, right=store_master, on=['store-id'], how='left')
order_status_ga['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
order_status_ga['created-by'] = 'etl-automation'
order_status_ga['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
order_status_ga['updated-by'] = 'etl-automation'
order_status_ga.columns = [c.replace('_', '-') for c in order_status_ga.columns]
logger.info("Data: all the operations performed and order status data fetched successfully")
# =========================================================================
# Writing table in Redshift
# =========================================================================
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
print(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=order_status_ga[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/goodaid/goodaid-corp.py | goodaid-corp.py |
import argparse
import os
import sys
from datetime import datetime as dt
import numpy as np
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.parameter.job_parameter import parameter
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
job_params = parameter.get_params(job_id=42)
email_to = job_params['email_to']
logger = get_logger()
# params
# Segment calculation date should be 1st of every month
try:
period_end_d_plus1 = job_params['period_end_d_plus1']
period_end_d_plus1 = str(dt.strptime(period_end_d_plus1, "%Y-%m-%d").date())
period_end_d_plus1 = period_end_d_plus1[:-3] + '-01'
except ValueError:
period_end_d_plus1 = dt.today().strftime('%Y-%m') + '-01'
logger.info(f"segment calculation date : {period_end_d_plus1}")
read_schema = 'prod2-generico'
table_name = 'customer-value-segment'
rs_db = DB()
rs_db.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=read_schema)
logger.info(table_info)
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
s = f"""
SELECT
"patient-id",
COUNT(DISTINCT "id") AS "total-bills",
SUM("net-payable") AS "total-spend"
FROM "{read_schema}"."bills-1"
WHERE DATEDIFF('days', '{period_end_d_plus1}', date("created-at")) between -90 and -1
GROUP BY "patient-id"
"""
logger.info(f"data query : {s}")
data = rs_db.get_df(query=s)
logger.info(data.head())
total_patients = data['patient-id'].nunique()
logger.info(f"total patient count for run {period_end_d_plus1} : {total_patients}")
data['total-spend'] = data['total-spend'].astype(float)
data['abv'] = np.round(data['total-spend'] / data['total-bills'], 2)
data = data.sort_values(['total-spend'], ascending=False)
data['rank'] = data['total-spend'].rank(method='dense', ascending=False)
data['rank'] = data['rank'].astype(int)
data['cumm-sales'] = data.sort_values(['total-spend'], ascending=False)['total-spend'].cumsum()
len_data = len(data)
logger.info(len_data)
def assign_value_segment(row):
"""
:param row:
:return: value-segment
"""
if row['rank'] <= 0.05 * len_data:
return 'platinum'
if (row['rank'] > 0.05 * len_data) & (row['rank'] <= 0.1 * len_data):
return 'gold'
if (row['rank'] > 0.1 * len_data) & (row['rank'] <= 0.2 * len_data):
return 'silver'
return 'others'
data['value-segment'] = data.apply(assign_value_segment, axis=1)
platinum_length = len(data[data['value-segment'] == 'platinum'])
gold_length = len(data[data['value-segment'] == 'gold'])
silver_length = len(data[data['value-segment'] == 'silver'])
others_length = len(data[data['value-segment'] == 'others'])
platinum_data = data[data['value-segment'] == 'platinum']
# Write to csv
s3.save_df_to_s3(df=platinum_data,
file_name='Shubham_G/value_segment/value_segment_data_platinum.csv')
logger.info(f'Length of Platinum segment is {platinum_length}')
logger.info(f'Length of Gold segment is {gold_length}')
logger.info(f'Length of Silver segment is {silver_length}')
logger.info(f'Length of Others segment is {others_length}')
q2 = f"""
SELECT
"patient-id",
"store-id",
COUNT(DISTINCT "id") AS "store-bills",
SUM("net-payable") AS "store-spend"
FROM "{read_schema}"."bills-1"
WHERE DATEDIFF('days', '{period_end_d_plus1}', date("created-at")) between -90 and -1
GROUP BY "patient-id","store-id"
"""
logger.info(q2)
data_store = rs_db.get_df(query=q2)
logger.info(f"data_store {data_store.head()}")
data_store['rank'] = data_store.sort_values(['store-bills',
'store-spend'],
ascending=[False, False]). \
groupby(['patient-id']).cumcount() + 1
patient_store = data_store[data_store['rank'] == 1][['patient-id', 'store-id']]
q3 = f"""
SELECT
"id" AS "store-id",
"name" AS "store-name"
FROM "{read_schema}"."stores"
"""
logger.info(q3)
stores = rs_db.get_df(q3)
logger.info(f"stores {stores}")
patient_store = patient_store.merge(stores, how='inner', on=['store-id', 'store-id'])
data = data.merge(patient_store, how='inner', left_on=['patient-id'], right_on=['patient-id'])
runtime_month = dt.today().strftime('%Y-%m')
runtime_date = dt.today().strftime('%Y-%m-%d')
data['segment-calculation-date'] = period_end_d_plus1
data['base-list-identifier'] = runtime_month
data['upload-date'] = runtime_date
# etl
data['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data['created-by'] = 'etl-automation'
data['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data['updated-by'] = 'etl-automation'
logger.info(f"data write : \n {data.head()}")
# truncate data if current month data already exist
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."{table_name}"
WHERE
"segment-calculation-date" = '{period_end_d_plus1}';
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
# drop duplicates subset - patient-id
data.drop_duplicates(subset=['patient-id'], inplace=True)
# Write to csv
s3.save_df_to_s3(df=data[table_info['column_name']],
file_name='Shubham_G/value_segment/value_segment_data.csv')
s3.write_df_to_db(df=data[table_info['column_name']], table_name=table_name,
db=rs_db, schema=read_schema)
logger.info("Script ran successfully")
# email after job ran successfully
email = Email()
mail_body = f"Value segments upload succeeded for segment calculation date {period_end_d_plus1} " \
f"with data shape {data.shape} and total patient count {total_patients}"
if data.shape[0] == total_patients:
subject = "Task Status behaviour segment calculation : successful"
else:
subject = "Task Status behaviour segment calculation : failed"
email.send_email_file(subject=subject,
mail_body=mail_body,
to_emails=email_to, file_uris=[], file_paths=[])
# closing connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/consumer_segments/consumer-value-segments.py | consumer-value-segments.py |
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from datetime import datetime as dt
from datetime import timedelta
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
logger.info(f"env: {env}")
read_schema = 'prod2-generico'
table_name = 'diagnostic-visibility'
rs_db = DB()
rs_db.open_connection()
end = dt.now().date()
start = end - timedelta(days=15)
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=read_schema)
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f''' DELETE FROM "{read_schema}"."{table_name}"
WHERE "date" BETWEEN '{start}' and '{end}' '''
logger.info(truncate_query)
rs_db.execute(truncate_query)
diagnostics_q = f"""
select
distinct *
from
(
select
d1."cash-payment",
d2.*,
rcrm.comments,
zer."reason-name",
zer."type" as "reason-type",
test."number-of-tests",
test."booking-at",
sm.store,
sm.city,
sm.line,
sm.abo,
sm."store-manager",
acq."acq-medium",
(case
when d2.status in ('REDEMPTION', 'COMPLETED') then 1
else 0
end ) "red-com",
sum("red-com") over (partition by d2."patient-id"
order by
d2."date",
d2."redemption-id" rows unbounded preceding) as nodo
from
(
select
r.id as "redemption-id",
r."patient-id",
r."source",
r."status",
r."store-id",
r."total-amount" as "total-sales",
r."redeemed-value" as "reward-payment",
date(r."created-at") as date,
r."call-status",
r."created-by" as pharmacist
from
"prod2-generico".redemption r) d2
left join
(
select
rp."redemption-id",
SUM(case when rp."payment-type" in ('CASH', 'LINK') then rp.amount else 0 end) "cash-payment"
from
"prod2-generico"."redemption-payments" rp
group by
rp."redemption-id") d1
on
d2."redemption-id" = d1."redemption-id"
left join "prod2-generico"."redemption-cancellation-reason-mapping" rcrm on
rcrm."redemption-id" = d2."redemption-id"
left join "prod2-generico"."zeno-escalation-reason" zer on
rcrm."redemption-cancellation-reason-id" = zer.id
left join (
select
rs."redemption-id",
count(distinct rs."sku-id") as "number-of-tests",
max(rs."slot-date") as "booking-at"
from
"prod2-generico"."redemption-skus" rs
left join "prod2-generico"."reward-product" rp on
rs."sku-id" = rp.id
group by
rs."redemption-id") test on
d2."redemption-id" = test."redemption-id"
left join "prod2-generico"."stores-master" sm on
d2."store-id" = sm.id
left join (
select
r."patient-id",
(case
when (MIN(DATE(r."redemption-date")) - MIN(DATE(b."created-at"))) > 0
then 'drug_store'
when (MIN(DATE(r."redemption-date")) - MIN(DATE(b."created-at"))) < 0
then 'diagnostic'
else 'diagnostic_no_visit_store'
end ) "acq-medium"
from
"prod2-generico".redemption r
left join "prod2-generico"."bills-1" b on
r."patient-id" = b."patient-id"
where
r.status in ('REDEMPTION', 'COMPLETED')
group by
r."patient-id") acq on
d2."patient-id" = acq."patient-id") X
where
X."date" between '{start}' and '{end}';
"""
diagnostics = rs_db.get_df(diagnostics_q)
diagnostics['store-id'] = diagnostics['store-id'].fillna(0)
diagnostics['source'] = diagnostics['source'].map({'OPS_DASHBOARD': 'OPS Oracle',
'LOYALTY_UI': 'App',
'STORE': 'Store'})
# datatype correction
diagnostics['number-of-tests'] = diagnostics['number-of-tests'].fillna(0)
diagnostics['number-of-tests'] = diagnostics['number-of-tests'].astype(int)
diagnostics['cash-payment'] = diagnostics['cash-payment'].astype(float)
diagnostics['store-id'] = diagnostics['store-id'].astype(int)
diagnostics['total-sales'] = diagnostics['total-sales'].astype(float)
diagnostics['reward-payment'] = diagnostics['reward-payment'].astype(float)
# etl
diagnostics['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
diagnostics['created-by'] = 'etl-automation'
diagnostics['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
diagnostics['updated-by'] = 'etl-automation'
# Write to csv
s3.save_df_to_s3(df=diagnostics[table_info['column_name']], file_name='data.csv')
s3.write_df_to_db(df=diagnostics[table_info['column_name']], table_name=table_name, db=rs_db, schema=read_schema)
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/diagnostic_data/diagnostic_data_visiblity.py | diagnostic_data_visiblity.py |
import os
import sys
import argparse
import pandas as pd
import datetime as dt
from dateutil.tz import gettz
from ast import literal_eval
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, PostGre
from zeno_etl_libs.django.api import Django
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.non_ipc.data_prep.non_ipc_data_prep import non_ipc_data_prep
from zeno_etl_libs.utils.non_ipc.forecast.forecast_main import non_ipc_forecast
from zeno_etl_libs.utils.non_ipc.safety_stock.safety_stock import non_ipc_safety_stock_calc
from zeno_etl_libs.utils.warehouse.wh_intervention.store_portfolio_consolidation import stores_ss_consolidation
from zeno_etl_libs.utils.ipc.goodaid_substitution import update_ga_ss
from zeno_etl_libs.utils.ipc.npi_exclusion import omit_npi_drugs
from zeno_etl_libs.utils.ipc.post_processing import post_processing
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.utils.ipc.store_portfolio_additions import generic_portfolio
def main(debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, agg_week_cnt, kind, rs_db_read, rs_db_write,
read_schema, write_schema, s3, django, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
if v3_active_flag == 'Y':
corrections_flag = True
else:
corrections_flag = False
# Define empty DF if required in case of fail
order_value_all = pd.DataFrame()
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
logger.info("Forecast pipeline starts...")
try:
for store_id in reset_stores:
logger.info("Non-IPC SS calculation started for store id: " + str(store_id))
if not type_list:
type_list = str(
list(reset_store_ops.loc[reset_store_ops['store_id'] ==
store_id, 'type'].unique()))
type_list = type_list.replace('[', '(').replace(']', ')')
# RUNNING DATA PREPARATION
drug_data_agg_weekly, drug_data_weekly, drug_class, \
bucket_sales = non_ipc_data_prep(
store_id_list=store_id, reset_date=reset_date,
type_list=type_list, db=rs_db_read, schema=read_schema,
agg_week_cnt=agg_week_cnt,
logger=logger)
# CREATING TRAIN FLAG TO HANDLE STORES WITH HISTORY < 16 WEEKS
week_count = drug_data_weekly['date'].nunique()
if week_count >= 16:
train_flag = True
else:
train_flag = False
# RUNNING FORECAST PIPELINE AND SAFETY STOCK CALC
out_of_sample = 1
horizon = 1
train, error, predict, ensemble_train, ensemble_error, \
ensemble_predict = non_ipc_forecast(
drug_data_agg_weekly, drug_data_weekly, drug_class,
out_of_sample, horizon, train_flag, logger, kind)
final_predict = ensemble_predict.query('final_fcst == "Y"')
safety_stock_df, df_corrections, \
df_corrections_111, drugs_max_to_lock_ipcv6, \
drug_rejects_ipcv6 = non_ipc_safety_stock_calc(
store_id, drug_data_weekly, reset_date, final_predict,
drug_class, corrections_flag,
corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff,
chronic_max_flag, train_flag, drug_type_list_v4,
v5_active_flag, v6_active_flag, v6_type_list,
v6_ptr_cut_off, rs_db_read, read_schema, logger)
# WAREHOUSE GENERIC SKU CONSOLIDATION
if wh_gen_consolidation == 'Y':
safety_stock_df, consolidation_log = stores_ss_consolidation(
safety_stock_df, rs_db_read, read_schema,
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point')
# GOODAID SAFETY STOCK MODIFICATION
if goodaid_ss_flag == 'Y':
safety_stock_df, good_aid_ss_log = update_ga_ss(
safety_stock_df, store_id, rs_db_read, read_schema,
ga_inv_weight, rest_inv_weight,
top_inv_weight, substition_type=['generic'],
min_column='safety_stock', ss_column='reorder_point',
max_column='order_upto_point', logger=logger)
# KEEP ALL GENERIC COMPOSITIONS IN STORE
if keep_all_generic_comp == 'Y':
safety_stock_df = generic_portfolio(safety_stock_df,
rs_db_read, read_schema,
logger)
# OMIT NPI DRUGS
if omit_npi == 'Y':
safety_stock_df = omit_npi_drugs(safety_stock_df, store_id,
reset_date, rs_db_read,
read_schema, logger)
# POST PROCESSING AND ORDER VALUE CALCULATION
safety_stock_df['percentile'] = 0.5
final_predict.rename(columns={'month_begin_dt': 'date'},
inplace=True)
drug_class, weekly_fcst, safety_stock_df, \
order_value = post_processing(
store_id, drug_class, final_predict,
safety_stock_df, rs_db_read,
read_schema, logger)
order_value_all = order_value_all.append(order_value,
ignore_index=True)
# WRITING TO RS-DB
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
# writing table ipc-forecast
predict['forecast_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
predict['store_id'] = store_id
predict['store_id'] = predict['store_id'].astype(int)
predict['drug_id'] = predict['drug_id'].astype(int)
predict['month_begin_dt'] = predict['month_begin_dt'].dt.date
predict['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
predict['created-by'] = 'etl-automation'
predict['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
predict['updated-by'] = 'etl-automation'
predict.columns = [c.replace('_', '-') for c in predict.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-predict',
schema=write_schema)
columns = list(table_info['column_name'])
predict = predict[columns] # required column order
logger.info("Writing to table: non-ipc-predict")
s3.write_df_to_db(df=predict,
table_name='non-ipc-predict',
db=rs_db_write, schema=write_schema)
# writing table non-ipc-safety-stock
safety_stock_df['store_id'] = safety_stock_df['store_id'].astype(int)
safety_stock_df['drug_id'] = safety_stock_df['drug_id'].astype(int)
safety_stock_df['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
safety_stock_df['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['created-by'] = 'etl-automation'
safety_stock_df['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
safety_stock_df['updated-by'] = 'etl-automation'
safety_stock_df.columns = [c.replace('_', '-') for c in
safety_stock_df.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-safety-stock',
schema=write_schema)
columns = list(table_info['column_name'])
safety_stock_df = safety_stock_df[columns] # required column order
logger.info("Writing to table: non-ipc-safety-stock")
s3.write_df_to_db(df=safety_stock_df,
table_name='non-ipc-safety-stock',
db=rs_db_write, schema=write_schema)
# writing table non-ipc-abc-xyz-class
drug_class['store_id'] = drug_class['store_id'].astype(int)
drug_class['drug_id'] = drug_class['drug_id'].astype(int)
drug_class['reset_date'] = dt.datetime.strptime(reset_date, '%Y-%m-%d').date()
drug_class['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['created-by'] = 'etl-automation'
drug_class['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
drug_class['updated-by'] = 'etl-automation'
drug_class.columns = [c.replace('_', '-') for c in
drug_class.columns]
table_info = helper.get_table_info(db=rs_db_write,
table_name='non-ipc-abc-xyz-class',
schema=write_schema)
columns = list(table_info['column_name'])
drug_class = drug_class[columns] # required column order
logger.info("Writing to table: non-ipc-abc-xyz-class")
s3.write_df_to_db(df=drug_class,
table_name='non-ipc-abc-xyz-class',
db=rs_db_write, schema=write_schema)
# to write ipc v6 tables ...
# UPLOADING MIN, SS, MAX in DOI-D
logger.info("Updating new SS to DrugOrderInfo-Data")
safety_stock_df.columns = [c.replace('-', '_') for c in
safety_stock_df.columns]
# prevent heavy outliers
ss_data_upload = safety_stock_df.query('order_upto_point < 1000')
ss_data_upload = ss_data_upload.query('order_upto_point > 0')[
['store_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = ['store_id', 'drug_id', 'corr_min',
'corr_ss', 'corr_max']
new_drug_entries_str, missed_entries_str = doid_update(
ss_data_upload, type_list, rs_db_write, write_schema,
logger)
new_drug_entries = new_drug_entries.append(new_drug_entries_str)
missed_entries = missed_entries.append(missed_entries_str)
logger.info("All writes to RS-DB completed!")
# INTERNAL TABLE SCHEDULE UPDATE - OPS ORACLE
logger.info(f"Rescheduling SID:{store_id} in OPS ORACLE")
if isinstance(reset_store_ops, pd.DataFrame):
content_type = 74
object_id = reset_store_ops.loc[
reset_store_ops['store_id'] == store_id, 'object_id'].unique()
for obj in object_id:
request_body = {"object_id": int(obj), "content_type": content_type}
api_response, _ = django.django_model_execution_log_create_api(
request_body)
reset_store_ops.loc[
reset_store_ops['object_id'] == obj,
'api_call_response'] = api_response
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"Non-IPC code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Non-IPC code execution status: {status}")
return status, order_value_all, new_drug_entries, missed_entries
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str,
required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="N", type=str,
required=False)
parser.add_argument('-exsto', '--exclude_stores',
default=[52, 60, 92, 243, 281], nargs='+', type=int,
required=False)
parser.add_argument('-gad', '--gaid_flag', default="Y", type=str,
required=False)
parser.add_argument('-giw', '--gaid_inv_wt', default=0.5, type=float,
required=False)
parser.add_argument('-riw', '--rest_inv_wt', default=0.0, type=float,
required=False)
parser.add_argument('-tiw', '--top_inv_wt', default=1, type=float,
required=False)
parser.add_argument('-cmf', '--chronic_max_flag', default="N", type=str,
required=False)
parser.add_argument('-wgc', '--wh_gen_consld', default="Y", type=str,
required=False)
parser.add_argument('-v5', '--v5_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v6', '--v6_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v6lst', '--v6_type_list',
default=['ethical', 'generic', 'others'], nargs='+',
type=str, required=False)
parser.add_argument('-v6ptr', '--v6_ptr_cut_off', default=400, type=int,
required=False)
parser.add_argument('-rd', '--reset_date', default="YYYY-MM-DD",
type=str,
required=False)
parser.add_argument('-rs', '--reset_stores',
default=[0], nargs='+', type=int,
required=False)
parser.add_argument('-v3', '--v3_active_flag', default="N", type=str,
required=False)
parser.add_argument('-v3sp', '--corr_selling_prob_cutoff',
default="{'ma_less_than_2': 0.40, 'ma_more_than_2' : 0.40}",
type=str, required=False)
parser.add_argument('-v3cp', '--corr_cumm_prob_cutoff',
default="{'ma_less_than_2':0.50,'ma_more_than_2':0.63}",
type=str, required=False)
parser.add_argument('-v4tl', '--v4_drug_type_list',
default="{'generic':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'ethical':'{0:[0,0,0], 1:[0,0,1], 2:[0,1,2],3:[1,2,3]}',"
"'others':'{0:[0,0,0], 1:[0,1,2], 2:[0,1,2],3:[1,2,3]}'}",
type=str, required=False)
parser.add_argument('-wct', '--agg_week_cnt', default=4, type=int, required=False)
parser.add_argument('-k', '--kind', default='mae', type=str, required=False)
parser.add_argument('-npi', '--omit_npi', default='N', type=str, required=False)
parser.add_argument('-kagc', '--keep_all_generic_comp', default='N', type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
# JOB EXCLUSIVE PARAMS
exclude_stores = args.exclude_stores
goodaid_ss_flag = args.gaid_flag
ga_inv_weight = args.gaid_inv_wt
rest_inv_weight = args.rest_inv_wt
top_inv_weight = args.rest_inv_wt
chronic_max_flag = args.chronic_max_flag
wh_gen_consolidation = args.wh_gen_consld
v5_active_flag = args.v5_active_flag
v6_active_flag = args.v6_active_flag
v6_type_list = args.v6_type_list
v6_ptr_cut_off = args.v6_ptr_cut_off
reset_date = args.reset_date
reset_stores = args.reset_stores
v3_active_flag = args.v3_active_flag
corrections_selling_probability_cutoff = args.corr_selling_prob_cutoff
corrections_cumulative_probability_cutoff = args.corr_cumm_prob_cutoff
drug_type_list_v4 = args.v4_drug_type_list
agg_week_cnt = args.agg_week_cnt
kind = args.kind
omit_npi = args.omit_npi
keep_all_generic_comp = args.keep_all_generic_comp
# EVALUATE REQUIRED JSON PARAMS
corrections_selling_probability_cutoff = literal_eval(
corrections_selling_probability_cutoff)
corrections_cumulative_probability_cutoff = literal_eval(
corrections_cumulative_probability_cutoff)
drug_type_list_v4 = literal_eval(drug_type_list_v4)
logger = get_logger()
s3 = S3()
django = Django()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
if reset_date == 'YYYY-MM-DD': # Take current date
reset_date = dt.date.today().strftime("%Y-%m-%d")
if reset_stores == [0]: # Fetch scheduled Non-IPC stores from OPS ORACLE
store_query = """
select "id", name, "opened-at" as opened_at
from "{read_schema}".stores
where name <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in {0}
""".format(
str(exclude_stores).replace('[', '(').replace(']', ')'),
read_schema=read_schema)
stores = rs_db_read.get_df(store_query)
# considering reset of stores aged (3 months < age < 1 year)
store_id = stores.loc[
(dt.datetime.now() - stores['opened_at'] > dt.timedelta(days=90)) &
(dt.datetime.now() - stores['opened_at'] <= dt.timedelta(days=365)),
'id'].values
# QUERY TO GET SCHEDULED STORES AND TYPE FROM OPS ORACLE
pg_internal = PostGre(is_internal=True)
pg_internal.open_connection()
reset_store_query = """
SELECT
"ssr"."id" as object_id,
"s"."bpos_store_id" as store_id,
"dc"."slug" as type,
"ssr"."drug_grade"
FROM
"safety_stock_reset_drug_category_mapping" ssr
INNER JOIN "ops_store_manifest" osm
ON ( "ssr"."ops_store_manifest_id" = "osm"."id" )
INNER JOIN "retail_store" s
ON ( "osm"."store_id" = "s"."id" )
INNER JOIN "drug_category" dc
ON ( "ssr"."drug_category_id" = "dc"."id")
WHERE
(
( "ssr"."should_run_daily" = TRUE OR
"ssr"."trigger_dates" && ARRAY[ date('{reset_date}')] )
AND "ssr"."is_auto_generate" = TRUE
AND "osm"."is_active" = TRUE
AND "osm"."is_generate_safety_stock_reset" = TRUE
AND "dc"."is_safety_stock_reset_enabled" = TRUE
AND "dc"."is_active" = TRUE
AND s.bpos_store_id in {store_list}
)
""".format(
store_list=str(list(store_id)).replace('[', '(').replace(']',')'),
reset_date=reset_date)
reset_store_ops = pd.read_sql_query(reset_store_query,
pg_internal.connection)
pg_internal.close_connection()
reset_store_ops['api_call_response'] = False
reset_stores = reset_store_ops['store_id'].unique()
type_list = None
else:
type_list = "('ethical', 'ayurvedic', 'generic', 'discontinued-products', " \
"'banned', 'general', 'high-value-ethical', 'baby-product'," \
" 'surgical', 'otc', 'glucose-test-kit', 'category-2', " \
"'category-1', 'category-4', 'baby-food', '', 'category-3')"
reset_store_ops = None
""" calling the main function """
status, order_value_all, new_drug_entries, \
missed_entries = main(
debug_mode, reset_stores, reset_date, type_list, reset_store_ops,
goodaid_ss_flag, ga_inv_weight, rest_inv_weight, top_inv_weight,
chronic_max_flag, wh_gen_consolidation, v5_active_flag,
v6_active_flag, v6_type_list, v6_ptr_cut_off, v3_active_flag,
omit_npi, corrections_selling_probability_cutoff,
corrections_cumulative_probability_cutoff, drug_type_list_v4,
keep_all_generic_comp, agg_week_cnt, kind, rs_db_read, rs_db_write,
read_schema, write_schema, s3, django, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# save email attachements to s3
order_value_all_uri = s3.save_df_to_s3(order_value_all,
file_name=f"order_value_all_{reset_date}.csv")
new_drug_entries_uri = s3.save_df_to_s3(new_drug_entries,
file_name=f"new_drug_entries_{reset_date}.csv")
missed_entries_uri = s3.save_df_to_s3(missed_entries,
file_name=f"missed_entries_{reset_date}.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Non-IPC SS Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Reset Stores: {reset_stores}
Job Params: {args}
""",
to_emails=email_to, file_uris=[order_value_all_uri,
new_drug_entries_uri,
missed_entries_uri])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/non_ipc/non_ipc_ss_main.py | non_ipc_ss_main.py |
import boto3
import sys
import os
import argparse
import pandas as pd
import numpy as np
from datetime import datetime
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
client = boto3.client('glue')
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
db = DB()
db.open_connection()
s3 = S3()
record_list = []
table_name = 'audit-log'
schema = 'prod2-generico'
table_info = helper.get_table_info(db=db, table_name=table_name, schema=schema)
job_list_response = client.list_jobs(MaxResults=500)
job_list = job_list_response['JobNames']
for i in job_list:
if i.__contains__(env):
query = """select max("started-on") from "{}"."{}" where "job-name" = '{}';""".format(schema, table_name, i)
df_init = db.get_df(query)
max_processing_date = df_init.values[0][0]
response = client.get_job_runs(
JobName=i
)
response_list = response['JobRuns']
for i in response_list:
if max_processing_date is None:
record_list.append(i)
elif i['StartedOn'].strftime('%Y-%m-%d %H:%M:%s') > datetime.utcfromtimestamp((max_processing_date - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')).strftime('%Y-%m-%d %H:%M:%s'):
record_list.append(i)
df = pd.DataFrame(record_list)
if df.empty:
logger.info(df)
else:
logger.info(df.columns)
df.drop(['Arguments', 'PredecessorRuns', 'AllocatedCapacity', 'Timeout', 'LogGroupName', 'GlueVersion'],
axis=1, inplace=True)
column_list = df.columns
if 'ErrorMessage' not in column_list:
df['ErrorMessage'] = 'NULL'
if 'TriggerName' not in column_list:
df['TriggerName'] = 'NULL'
if column_list.__contains__('WorkerType') or column_list.__contains__('NumberOfWorkers'):
df.drop(['WorkerType', 'NumberOfWorkers'], axis=1, inplace=True)
logger.info(df.columns)
df = df.reindex(columns=['Id', 'Attempt', 'TriggerName','JobName', 'StartedOn', 'LastModifiedOn', 'CompletedOn',
'JobRunState', 'ExecutionTime', 'ErrorMessage', 'MaxCapacity'])
df.columns = ['id', 'attempt', 'trigger-name', 'job-name', 'started-on', 'last-modified-on', 'completed-on',
'job-run-state', 'execution-time', 'error-message', 'max-capacity']
s3.write_df_to_db(df=df[table_info['column_name']], table_name=table_name, db=db,
schema=schema) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/audit_log/update_audit_log.py | update_audit_log.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--runtime_date_exp', default="0101-01-01", type=str, required=False)
# any_period_run should be "yes" in case of manual run for historic data
parser.add_argument('-apr', '--any_period_run', default="no", type=str, required=False)
# start date should be assigned in case any_period_run =='yes'
parser.add_argument('-sd', '--start_date', default="0101-01-01", type=str, required=False)
# end date should be assigned in case any_period_run =='yes'
parser.add_argument('-ed', '--end_date', default="0101-01-01", type=str, required=False)
# run_type should be "delete" in case some past entries has to be deleted and
# "update" in case selected entries need update
parser.add_argument('-rt', '--run_type', default="no", type=str, required=False)
# Upload type should be "normal" on daily run, in case of manual update value should be changed
parser.add_argument('-ut', '--upload_type', default="normal", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
runtime_date_exp = args.runtime_date_exp
email_to = args.email_to
any_period_run = args.any_period_run
start_date = args.start_date
end_date = args.end_date
run_type = args.run_type
upload_type = args.upload_type
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
# Run date
if runtime_date_exp == '0101-01-01':
# Timezone aware
run_date = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d")
else:
run_date = runtime_date_exp
# runtime_date = '2018-09-01'
# runtime_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
# Period end date
# Paramatrize it
if any_period_run == 'no':
period_end_d_ts = datetime.strptime(run_date, '%Y-%m-%d') - timedelta(days=1)
period_end_d = period_end_d_ts.strftime('%Y-%m-%d')
else:
period_end_d = end_date
logger.info("Run date minus 1 is {}".format(period_end_d))
# Read last list so that only new data to be uploaded
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
past_date_q = f"""
SELECT
"search-date"
FROM
"cfr-searches-v2"
GROUP BY
"search-date"
"""
logger.info(past_date_q)
last_data_date = rs_db_write.get_df(query=past_date_q)
last_data_date.columns = [c.replace('-', '_') for c in last_data_date.columns]
logger.info(f"length of last_data_date: {len(last_data_date)}")
if any_period_run == 'no':
try:
last_s_date_max = pd.to_datetime(last_data_date['search_date']).max().strftime('%Y-%m-%d')
except ValueError:
last_s_date_max = '2000-06-01'
else:
last_s_date_max_ts = datetime.strptime(start_date, '%Y-%m-%d') - timedelta(days=1)
last_s_date_max = last_s_date_max_ts.strftime('%Y-%m-%d')
logger.info("Last date in last data for cfr searches is : {}".format(last_s_date_max))
# Remaining data to be fetched
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
search_q = """
SELECT
id,
`store-id`,
`created-at`,
`drug-id`,
`drug-name`,
`composition`,
`inventory-quantity`
FROM
`searches`
WHERE date(`created-at`) > '{0}'
and date(`created-at`) <= '{1}'
and `inventory-quantity` = 0
""".format(last_s_date_max, period_end_d)
search_q = search_q.replace('`', '"')
logger.info(search_q)
data_s_zero = rs_db_write.get_df(query=search_q)
data_s_zero.columns = [c.replace('-', '_') for c in data_s_zero.columns]
logger.info("New Searches data length is : {}".format(len(data_s_zero)))
data_s_zero['search_timestamp'] = pd.to_datetime(data_s_zero['created_at'])
data_s_zero['search_date'] = pd.to_datetime(data_s_zero['search_timestamp'].dt.normalize())
# logger.info("Min date in new data is {} and max date is "
# "{}".format(data_s['search_date'].min().strftime("%Y-%m-%d")
# , data_s['search_date'].max().strftime("%Y-%m-%d")))
##################################################
# Now loss calculation starts
##################################################
# MySQL drugs table
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
drugs_q = """
SELECT
id as drug_id,
`drug-name`,
`composition`,
category as drug_category,
type as drug_type,
`repeatability-index`
FROM
drugs
"""
drugs_q = drugs_q.replace('`', '"')
logger.info(drugs_q)
data_drugs = rs_db_write.get_df(query=drugs_q)
data_drugs.columns = [c.replace('-', '_') for c in data_drugs.columns]
logger.info("Drug master length is : {}".format(len(data_drugs)))
# Join PR data with drugs
data_s_zero = data_s_zero.merge(data_drugs, how='left', on=['drug_id'])
logger.info("New Searches data length with zero inventory is : {}".format(len(data_s_zero)))
# Group at store, date, timestamp, drug level
data_s_zero_unique = data_s_zero.groupby(['store_id', 'search_date', 'search_timestamp',
'drug_id', 'drug_name_y', 'composition_y',
'repeatability_index', 'drug_category', 'drug_type']
)['id'].count().reset_index().rename(columns={'id': 'search_count'})
# Todo - to be checked later, since it doesn't make a row count difference currently,
# and drugs table used for other joins also
"""
Merge with the drugs table and groupby can be done in single query:
It will save compute time and memory utilisation
"""
logger.info("Zero inventory search data with grouping at store, date, timestamp, drug is length : "
"{}".format(len(data_s_zero_unique)))
logger.info("Unique length is {}".format(len(data_s_zero_unique[['store_id', 'search_timestamp',
'drug_id', 'drug_name_y']].drop_duplicates())))
#########################################
# Sales data
########################################
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
sales_q = """
SELECT
`store-id`,
`drug-id`,
date(`created-at`) as sales_date,
`created-at` as sold_timestamp,
sum(quantity) as sold_quantity
FROM
sales
WHERE date(`created-at`) >= '{0}'
and date(`created-at`) <= '{1}'
and `bill-flag` = 'gross'
GROUP BY
`store-id`,
`drug-id`,
date(`created-at`),
`created-at`
""".format(last_s_date_max, period_end_d)
sales_q = sales_q.replace('`', '"')
logger.info(sales_q)
data_sales = rs_db_write.get_df(query=sales_q)
data_sales.columns = [c.replace('-', '_') for c in data_sales.columns]
logger.info(len(data_sales))
logger.info("Sales data length is {}".format(len(data_sales)))
data_sales['sold_timestamp'] = pd.to_datetime(data_sales['sold_timestamp'])
data_sales['sales_date'] = pd.to_datetime(data_sales['sales_date'])
# Merge
data_sales_merge = data_s_zero_unique[['store_id', 'drug_id', 'search_date', 'search_timestamp']].merge(
data_sales[['store_id', 'drug_id', 'sales_date', 'sold_timestamp']], how='inner', on=['store_id', 'drug_id'])
logger.info("Length of search and sales data merged (containing all combinations) is "
"{}".format(len(data_sales_merge)))
data_sales_merge['sold_timestamp_diff'] = (data_sales_merge['sold_timestamp'] -
data_sales_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Data sales merge head is {}".format(data_sales_merge.head()))
# Date same
data_sales_merge = data_sales_merge[data_sales_merge['sales_date'] == data_sales_merge['search_date']].copy()
logger.info("Data sales merge length after filtering same date is {}".format(len(data_sales_merge)))
# Time diff > 0
data_sales_merge = data_sales_merge[data_sales_merge['sold_timestamp_diff'] > 0].copy()
logger.info("Data sales merge length after filtering positive time difference is "
"{}".format(len(data_sales_merge)))
# Flag
data_sales_merge_unique = data_sales_merge.drop_duplicates(
subset=['store_id', 'drug_id', 'search_date', 'search_timestamp'])
data_sales_merge_unique['drug_sold_same_day_flag'] = 1
logger.info("Data sales merge length after dropping duplicates at search timestamp is "
"{}".format(len(data_sales_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(
data_sales_merge_unique[['store_id', 'drug_id',
'search_date', 'search_timestamp',
'drug_sold_same_day_flag']], how='left',
on=['store_id', 'drug_id', 'search_date', 'search_timestamp'])
data_s_zero_unique['drug_sold_same_day_flag'] = data_s_zero_unique['drug_sold_same_day_flag'].fillna(0)
logger.info("Zero inventory search data after merging with sold data, length : "
"{}".format(len(data_s_zero_unique)))
###############################################
# Composition sold same day (but after search)
###############################################
data_sales_comp = data_sales.merge(data_drugs[['drug_id', 'composition']], how='left', on='drug_id')
data_sales_comp = data_sales_comp[~data_sales_comp['composition'].isnull()].copy()
data_sales_comp = data_sales_comp[data_sales_comp['composition'] != ''].copy()
logger.info("Composition sales data length is {}".format(len(data_sales_comp)))
data_sales_comp_unique = data_sales_comp[
['store_id', 'composition', 'sales_date', 'sold_timestamp']].drop_duplicates()
logger.info("Composition sales data unique - length is {}".format(len(data_sales_comp_unique)))
# Merge
data_sales_comp_merge = data_s_zero_unique[['store_id', 'drug_id', 'composition_y', 'search_date',
'search_timestamp']].merge(
data_sales_comp_unique[['store_id', 'composition', 'sales_date', 'sold_timestamp']], how='inner',
left_on=['store_id', 'composition_y'],
right_on=['store_id', 'composition'])
data_sales_comp_merge = data_sales_comp_merge.drop(columns=['composition'], axis=1)
logger.info(
"Length of search and comp sales data merged (containing all combinations) is {}".format(
len(data_sales_comp_merge)))
data_sales_comp_merge['sold_timestamp_diff'] = (data_sales_comp_merge['sold_timestamp'] -
data_sales_comp_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Data comp sales merge head is {}".format(data_sales_comp_merge.head()))
# Date same
data_sales_comp_merge = data_sales_comp_merge[
data_sales_comp_merge['sales_date'] == data_sales_comp_merge['search_date']].copy()
logger.info("Data comp sales merge length after filtering same date is "
"{}".format(len(data_sales_comp_merge)))
# Time diff > 0
data_sales_comp_merge = data_sales_comp_merge[data_sales_comp_merge['sold_timestamp_diff'] > 0].copy()
logger.info("Data comp sales merge length after filtering positive time difference is "
"{}".format(len(data_sales_comp_merge)))
# Flag
data_sales_comp_merge_unique = data_sales_comp_merge.drop_duplicates(
subset=['store_id', 'drug_id', 'search_timestamp'])
data_sales_comp_merge_unique['comp_sold_same_day_flag'] = 1
logger.info("Data comp sales merge length after dropping duplicates at search timestamp is "
"{}".format(len(data_sales_comp_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(
data_sales_comp_merge_unique[['store_id', 'drug_id', 'search_timestamp',
'comp_sold_same_day_flag']], how='left',
on=['store_id', 'drug_id', 'search_timestamp'])
data_s_zero_unique['comp_sold_same_day_flag'] = data_s_zero_unique['comp_sold_same_day_flag'].fillna(0)
logger.info("Zero inventory search data after merging with comp sold data, length : "
"{}".format(len(data_s_zero_unique)))
###################################
# Composition sold in window
##################################
data_sales_comp = data_sales.merge(data_drugs[['drug_id', 'composition']], how='left', on='drug_id')
data_sales_comp = data_sales_comp[~data_sales_comp['composition'].isnull()].copy()
data_sales_comp = data_sales_comp[data_sales_comp['composition'] != ''].copy()
logger.info("Window-Composition sales data length is {}".format(len(data_sales_comp)))
data_sales_comp_unique = data_sales_comp[['store_id', 'composition', 'sold_timestamp']].drop_duplicates()
logger.info("Window-Composition sales data unique - length is {}".format(len(data_sales_comp_unique)))
# Merge
data_sales_comp_merge = data_s_zero_unique[['store_id', 'drug_id', 'composition_y', 'search_timestamp']].merge(
data_sales_comp_unique[['store_id', 'composition', 'sold_timestamp']], how='inner',
left_on=['store_id', 'composition_y'],
right_on=['store_id', 'composition'])
data_sales_comp_merge = data_sales_comp_merge.drop(columns=['composition'], axis=1)
logger.info(
"Window-Length of search and comp sales data merged (containing all combinations) is {}".format(
len(data_sales_comp_merge)))
data_sales_comp_merge['sold_timestamp_diff'] = (data_sales_comp_merge['sold_timestamp'] -
data_sales_comp_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Window-Data comp sales merge head is {}".format(data_sales_comp_merge.head()))
# Time diff > 0
data_sales_comp_merge = data_sales_comp_merge[data_sales_comp_merge['sold_timestamp_diff'] > 0].copy()
logger.info("Window-Data comp sales merge length after filtering positive time difference is "
"{}".format(len(data_sales_comp_merge)))
# Time diff window 30 minutes = 1800 seconds
data_sales_comp_merge = data_sales_comp_merge[data_sales_comp_merge['sold_timestamp_diff'].between(1, 1800)].copy()
logger.info(
"Window-Data comp sales merge length after filtering for window (30minutes) is {}".format(
len(data_sales_comp_merge)))
# Flag
data_sales_comp_merge_unique = data_sales_comp_merge.drop_duplicates(
subset=['store_id', 'drug_id', 'search_timestamp'])
data_sales_comp_merge_unique['comp_sold_window_flag'] = 1
logger.info("Window-Data comp sales merge length after dropping duplicates at search timestamp is "
"{}".format(len(data_sales_comp_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(
data_sales_comp_merge_unique[['store_id', 'drug_id', 'search_timestamp',
'comp_sold_window_flag']], how='left',
on=['store_id', 'drug_id', 'search_timestamp'])
data_s_zero_unique['comp_sold_window_flag'] = data_s_zero_unique['comp_sold_window_flag'].fillna(0)
logger.info("Window-Zero inventory search data after merging with comp sold data, length : "
"{}".format(len(data_s_zero_unique)))
######################################################
# PR Made same day (but after search)
######################################################
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
pr_q = """
SELECT
`store-id`,
date(`created-at`) AS pr_date,
`drug-id`,
`created-at` as pr_timestamp
FROM `patient-requests`
WHERE date(`created-at`) >= '{0}'
and date(`created-at`) <= '{1}'
GROUP BY
`store-id`,
date(`created-at`),
`drug-id`,
`created-at`
""".format(last_s_date_max, period_end_d)
pr_q = pr_q.replace('`', '"')
logger.info(pr_q)
data_pr = rs_db_write.get_df(query=pr_q)
data_pr.columns = [c.replace('-', '_') for c in data_pr.columns]
logger.info(len(data_pr))
logger.info("PR data length is {}".format(len(data_pr)))
data_pr['pr_timestamp'] = pd.to_datetime(data_pr['pr_timestamp'])
data_pr['pr_date'] = pd.to_datetime(data_pr['pr_date'])
# Merge
data_pr_merge = data_s_zero_unique[['store_id', 'drug_id', 'search_date', 'search_timestamp']].merge(
data_pr[['store_id', 'drug_id', 'pr_date', 'pr_timestamp']], how='inner', on=['store_id', 'drug_id'])
logger.info("Length of search and PR data merged (containing all combinations) is "
"{}".format(len(data_pr_merge)))
data_pr_merge['pr_timestamp_diff'] = (data_pr_merge['pr_timestamp'] -
data_pr_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Data pr merge head is {}".format(data_pr_merge.head()))
# Date same
data_pr_merge = data_pr_merge[data_pr_merge['pr_date'] == data_pr_merge['search_date']].copy()
logger.info("Data pr merge length after filtering same date is {}".format(len(data_pr_merge)))
# Time diff > 0
data_pr_merge = data_pr_merge[data_pr_merge['pr_timestamp_diff'] > 0].copy()
logger.info("Data pr merge length after filtering positive time difference is {}".format(len(data_pr_merge)))
# Flag
data_pr_merge_unique = data_pr_merge.drop_duplicates(subset=['store_id', 'drug_id', 'search_timestamp'])
data_pr_merge_unique['pr_same_day_flag'] = 1
logger.info("Data pr merge length after dropping duplicates at search timestamp is "
"{}".format(len(data_pr_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(data_pr_merge_unique[['store_id', 'drug_id',
'search_timestamp',
'pr_same_day_flag']],
how='left',
on=['store_id', 'drug_id', 'search_timestamp'])
data_s_zero_unique['pr_same_day_flag'] = data_s_zero_unique['pr_same_day_flag'].fillna(0)
logger.info("Zero inventory search data after merging with pr data, length : "
"{}".format(len(data_s_zero_unique)))
###################################
# Composition PR same day (but after search)
##################################
data_pr_comp = data_pr.merge(data_drugs[['drug_id', 'composition']], how='left', on='drug_id')
data_pr_comp = data_pr_comp[~data_pr_comp['composition'].isnull()].copy()
data_pr_comp = data_pr_comp[data_pr_comp['composition'] != ''].copy()
logger.info("Composition pr data length is {}".format(len(data_pr_comp)))
data_pr_comp_unique = data_pr_comp[['store_id', 'composition',
'pr_date', 'pr_timestamp']].drop_duplicates()
logger.info("Composition pr data unique - length is {}".format(len(data_pr_comp_unique)))
# Merge
data_pr_comp_merge = data_s_zero_unique[
['store_id', 'drug_id', 'composition_y', 'search_date', 'search_timestamp']].merge(
data_pr_comp_unique[['store_id', 'composition', 'pr_date', 'pr_timestamp']], how='inner',
left_on=['store_id', 'composition_y'],
right_on=['store_id', 'composition'])
data_pr_comp_merge = data_pr_comp_merge.drop(columns=['composition'], axis=1)
logger.info(
"Length of search and comp pr data merged (containing all combinations) is "
"{}".format(len(data_pr_comp_merge)))
data_pr_comp_merge['pr_timestamp_diff'] = (data_pr_comp_merge['pr_timestamp'] -
data_pr_comp_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Data comp pr merge head is {}".format(data_pr_comp_merge.head()))
# Date same
data_pr_comp_merge = data_pr_comp_merge[data_pr_comp_merge['pr_date'] == data_pr_comp_merge['search_date']].copy()
logger.info("Data comp pr merge length after filtering same date is {}".format(len(data_pr_comp_merge)))
# Time diff > 0
data_pr_comp_merge = data_pr_comp_merge[data_pr_comp_merge['pr_timestamp_diff'] > 0].copy()
logger.info("Data comp pr merge length after filtering positive time difference is "
"{}".format(len(data_pr_comp_merge)))
# Flag
data_pr_comp_merge_unique = data_pr_comp_merge.drop_duplicates(subset=['store_id', 'drug_id', 'search_timestamp'])
data_pr_comp_merge_unique['comp_pr_same_day_flag'] = 1
logger.info("Data comp pr merge length after dropping duplicates at search timestamp is {}".format(
len(data_pr_comp_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(data_pr_comp_merge_unique[['store_id', 'drug_id',
'search_timestamp',
'comp_pr_same_day_flag']],
how='left',
on=['store_id', 'drug_id', 'search_timestamp'])
data_s_zero_unique['comp_pr_same_day_flag'] = data_s_zero_unique['comp_pr_same_day_flag'].fillna(0)
logger.info("Zero inventory search data after merging with comp pr data, length : "
"{}".format(len(data_s_zero_unique)))
#################################################
# MS made same day (but after search)
#################################################
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
ms_q = """
SELECT
`store-id`,
date(`created-at`) AS ms_date,
`drug-id`,
`created-at` as ms_timestamp
FROM `short-book-1`
WHERE `auto-short` = 1
and `patient-id` != 4480
and date(`created-at`) >= '{0}'
and date(`created-at`) <= '{1}'
GROUP BY
`store-id`,
date(`created-at`),
`drug-id`,
`created-at`
""".format(last_s_date_max, period_end_d)
ms_q = ms_q.replace('`', '"')
logger.info(ms_q)
data_ms = rs_db_write.get_df(query=ms_q)
data_ms.columns = [c.replace('-', '_') for c in data_ms.columns]
logger.info(len(data_ms))
logger.info("MS data length is {}".format(len(data_pr)))
data_ms['ms_timestamp'] = pd.to_datetime(data_ms['ms_timestamp'])
data_ms['ms_date'] = pd.to_datetime(data_ms['ms_date'])
# Merge
data_ms_merge = data_s_zero_unique[['store_id', 'drug_id', 'search_date', 'search_timestamp']].merge(
data_ms[['store_id', 'drug_id', 'ms_date', 'ms_timestamp']], how='inner', on=['store_id', 'drug_id'])
logger.info("Length of search and ms data merged (containing all combinations) is "
"{}".format(len(data_ms_merge)))
data_ms_merge['ms_timestamp_diff'] = (data_ms_merge['ms_timestamp'] -
data_ms_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Data ms merge head is {}".format(data_ms_merge.head()))
# Date same
data_ms_merge = data_ms_merge[data_ms_merge['ms_date'] == data_ms_merge['search_date']].copy()
logger.info("Data ms merge length after filtering same date is {}".format(len(data_ms_merge)))
# Time diff > 0
data_ms_merge = data_ms_merge[data_ms_merge['ms_timestamp_diff'] > 0].copy()
logger.info("Data ms merge length after filtering positive time difference is "
"{}".format(len(data_ms_merge)))
# Flag
data_ms_merge_unique = data_ms_merge.drop_duplicates(subset=['store_id', 'drug_id', 'search_timestamp'])
data_ms_merge_unique['ms_same_day_flag'] = 1
logger.info("Data ms merge length after dropping duplicates at search timestamp is "
"{}".format(len(data_ms_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(data_ms_merge_unique[['store_id', 'drug_id', 'search_timestamp',
'ms_same_day_flag']], how='left',
on=['store_id', 'drug_id', 'search_timestamp'])
data_s_zero_unique['ms_same_day_flag'] = data_s_zero_unique['ms_same_day_flag'].fillna(0)
logger.info("Zero inventory search data after merging with ms data, length : "
"{}".format(len(data_s_zero_unique)))
######################################################
# Short-book present same day (but after search)
#####################################################
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
sb_q = """
SELECT
`store-id`,
date(`created-at`) as shortbook_date,
`drug-id`,
`created-at` as shortbook_timestamp
FROM `short-book-1`
WHERE `auto-short` = 0
and `auto-generated` = 0
and date(`created-at`) >= '{0}'
and date(`created-at`) <= '{1}'
GROUP BY
`store-id`,
date(`created-at`),
`drug-id`,
`created-at`
""".format(last_s_date_max, period_end_d)
sb_q = sb_q.replace('`', '"')
logger.info(sb_q)
data_sb = rs_db_write.get_df(query=sb_q)
data_sb.columns = [c.replace('-', '_') for c in data_sb.columns]
logger.info(len(data_sb))
logger.info("short-book data length is {}".format(len(data_sb)))
data_sb['shortbook_timestamp'] = pd.to_datetime(data_sb['shortbook_timestamp'])
data_sb['shortbook_date'] = pd.to_datetime(data_sb['shortbook_date'])
# Merge
data_sb_merge = data_s_zero_unique[['store_id', 'drug_id', 'search_date', 'search_timestamp']].merge(
data_sb[['store_id', 'drug_id', 'shortbook_date', 'shortbook_timestamp']], how='inner',
on=['store_id', 'drug_id'])
logger.info(
"Length of search and shortbook data merged (containing all combinations) is "
"{}".format(len(data_sb_merge)))
data_sb_merge['sb_timestamp_diff'] = (data_sb_merge['shortbook_timestamp'] -
data_sb_merge['search_timestamp']
).astype('timedelta64[s]')
logger.info("Data shortbook merge head is {}".format(data_sb_merge.head()))
# Date same
data_sb_merge = data_sb_merge[data_sb_merge['shortbook_date'] == data_sb_merge['search_date']].copy()
logger.info("Data shortbook merge length after filtering same date is {}".format(len(data_sb_merge)))
# Time diff > 0
data_sb_merge = data_sb_merge[data_sb_merge['sb_timestamp_diff'] > 0].copy()
logger.info("Data shortbook merge length after filtering positive time difference is "
"{}".format(len(data_sb_merge)))
# Flag
data_sb_merge_unique = data_sb_merge.drop_duplicates(subset=['store_id', 'drug_id', 'search_timestamp'])
data_sb_merge_unique['shortbook_present'] = 1
logger.info("Data shortbook merge length after dropping duplicates at search timestamp is "
"{}".format(len(data_sb_merge_unique)))
# Merge with main dataframe
data_s_zero_unique = data_s_zero_unique.merge(data_sb_merge_unique[['store_id', 'drug_id',
'search_timestamp',
'shortbook_present']],
how='left',
on=['store_id', 'drug_id', 'search_timestamp'])
data_s_zero_unique['shortbook_present'] = data_s_zero_unique['shortbook_present'].fillna(0)
logger.info("Zero inventory search data after merging with sb data, length : "
"{}".format(len(data_s_zero_unique)))
#########################################
# Group at store, date, drug level
#########################################
data_s_zero_grp = data_s_zero_unique.groupby(['store_id', 'search_date', 'drug_id',
'drug_name_y', 'composition_y',
'repeatability_index',
'drug_category', 'drug_type']
)['search_count', 'drug_sold_same_day_flag',
'comp_sold_same_day_flag',
'comp_sold_window_flag',
'pr_same_day_flag',
'comp_pr_same_day_flag',
'ms_same_day_flag',
'shortbook_present'].sum().reset_index()
logger.info(
"Zero inventory search data with grouping at store, date, drug is length : "
"{}".format(len(data_s_zero_grp)))
# Convert some columns into binary columns
for col in ['drug_sold_same_day_flag', 'comp_sold_same_day_flag',
'comp_sold_window_flag', 'pr_same_day_flag', 'comp_pr_same_day_flag',
'ms_same_day_flag', 'shortbook_present']:
data_s_zero_grp[col] = np.where(data_s_zero_grp[col] > 0, 1, 0)
logger.info(
"Zero inventory search data after taking binary columns - length : "
"{}".format(len(data_s_zero_grp)))
# Populate rate for those not fulfilled
drugs = tuple(list(data_s_zero_grp['drug_id'].dropna().drop_duplicates().astype(int)))
logger.info("Count of drugs to look up in historical sales is : {}".format(len(drugs)))
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
rate_q = """
SELECT
"drug-id",
SUM("revenue-value")/SUM("quantity") AS avg_rate_system
FROM
"sales"
WHERE
date("created-at") <= '{0}'
and "bill-flag" = 'gross'
GROUP BY
"drug-id"
""".format(period_end_d)
rate_q = rate_q.replace('`', '"')
logger.info(rate_q)
data_d = rs_db_write.get_df(query=rate_q)
data_d.columns = [c.replace('-', '_') for c in data_d.columns]
logger.info(len(data_d))
logger.info("Count of drugs to looked up successfully in historical sales is : "
"{}".format(len(data_d)))
# Join with main data
data_s_zero_grp = data_s_zero_grp.merge(data_d, how='left', on=['drug_id'])
data_s_zero_grp['rate_present'] = np.where(data_s_zero_grp['avg_rate_system'] > 0, 1, 0)
# What should the final rate be, if not present in system
data_s_zero_grp['attributed_rate'] = np.where(data_s_zero_grp['rate_present'] == 1,
data_s_zero_grp['avg_rate_system'],
np.where(data_s_zero_grp['drug_type'] == 'generic',
35, 100))
# Read standard quantity
# Write connection for now, because it's a dependency table
# Change to read connection later
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
std_q = """
SELECT
"drug-id",
"std-qty" as "standard-quantity"
FROM
"drug-std-info"
"""
std_q = std_q.replace('`', '"')
logger.info(std_q)
data_drugs_q = rs_db_write.get_df(query=std_q)
data_drugs_q.columns = [c.replace('-', '_') for c in data_drugs_q.columns]
logger.info(len(data_drugs_q))
# Merge with main data
data_s_zero_grp = data_s_zero_grp.merge(data_drugs_q, how='left', on=['drug_id'])
# Impute for missing standard quantity
data_s_zero_grp['standard_quantity'] = data_s_zero_grp['standard_quantity'].fillna(1).astype(int)
# Remove outlier quantity
data_s_zero_grp['search_count_clean'] = np.where(data_s_zero_grp['search_count'] > 30, 30,
data_s_zero_grp['search_count'])
data_s_zero_grp['loss_quantity'] = data_s_zero_grp['search_count_clean'] * data_s_zero_grp[
'standard_quantity']
data_s_zero_grp['lost_sales'] = data_s_zero_grp['loss_quantity'].astype(float) * data_s_zero_grp[
'attributed_rate'].astype(float)
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
sales_summ_q = """
SELECT
"store-id",
"drug-id",
COUNT(distinct date("created-at")) AS num_days_sold,
MAX(date("created-at")) as last_sold
FROM
"sales"
WHERE
date("created-at") <= '{0}'
and "drug-id" in {1}
and "bill-flag" = 'gross'
GROUP BY
"store-id",
"drug-id"
""".format(period_end_d, drugs)
sales_summ_q = sales_summ_q.replace('`', '"')
# logger.info(sales_summ_q)
data_d2 = rs_db_write.get_df(query=sales_summ_q)
data_d2.columns = [c.replace('-', '_') for c in data_d2.columns]
logger.info(len(data_d2))
logger.info("Count of drugs with sold quantity and num_days_sold is : {}".format(len(data_d2)))
# Join with main data
data_s_zero_grp = data_s_zero_grp.merge(data_d2, how='left', on=['store_id', 'drug_id'])
# Put 0 for those not sold in that store
data_s_zero_grp['num_days_sold'] = data_s_zero_grp['num_days_sold'].fillna(0)
############################################
# Final loss (to be defined)
############################################
data_s_zero_grp['sold_or_substituted_flag'] = np.where(((data_s_zero_grp['comp_sold_window_flag'] > 0) |
(data_s_zero_grp['drug_sold_same_day_flag'] > 0)), 1, 0)
data_s_zero_grp['pr_opportunity_flag'] = np.where(data_s_zero_grp['sold_or_substituted_flag'] > 0, 0, 1)
data_s_zero_grp['pr_converted_flag'] = np.where(data_s_zero_grp['comp_pr_same_day_flag'] > 0, 1, 0)
# Those which are already converted, should also be get added in opportunity
data_s_zero_grp['pr_opportunity_flag'] = np.where(data_s_zero_grp['pr_converted_flag'] > 0, 1,
data_s_zero_grp['pr_opportunity_flag'])
data_s_zero_grp['pr_opportunity_converted_flag'] = np.where((data_s_zero_grp['pr_opportunity_flag'] > 0) &
(data_s_zero_grp['pr_converted_flag'] > 0), 1, 0)
# Amount
data_s_zero_grp['lost_sales_not_substituted'] = np.where(data_s_zero_grp['sold_or_substituted_flag'] == 0,
data_s_zero_grp['lost_sales'], 0)
data_s_zero_grp['lost_sales_not_substituted_not_pr'] = np.where(data_s_zero_grp['pr_converted_flag'] == 0,
data_s_zero_grp['lost_sales_not_substituted'], 0)
# Final lost sales, sensitive
data_s_zero_grp['final_loss_flag'] = np.where((data_s_zero_grp['sold_or_substituted_flag'] == 0) &
(data_s_zero_grp['pr_converted_flag'] == 0), 1, 0)
data_s_zero_grp['final_lost_sales'] = np.where(data_s_zero_grp['final_loss_flag'] == 1, data_s_zero_grp['lost_sales'],
0)
# Round off some values
for i in ['attributed_rate', 'lost_sales', 'lost_sales_not_substituted',
'lost_sales_not_substituted_not_pr', 'final_lost_sales']:
data_s_zero_grp[i] = np.round(data_s_zero_grp[i].astype(float), 2)
# Merge with drug order info data to get safe-stock,min and max quantity on store_id,drug_id level
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
doi_q = """
SELECT
`store-id` ,
`drug-id` ,
`safe-stock` safety_stock,
`min` minimum_quantity,
`max` maximum_quantity
FROM
`drug-order-info` doi
WHERE
`drug-id` in {}
""".format(drugs)
doi_q = doi_q.replace('`', '"')
# logger.info(doi_q)
drug_order_info_data = rs_db_write.get_df(query=doi_q)
drug_order_info_data.columns = [c.replace('-', '_') for c in drug_order_info_data.columns]
logger.info(len(drug_order_info_data))
data_s_zero_grp = data_s_zero_grp.merge(drug_order_info_data, how='left', on=['store_id', 'drug_id'])
logger.info("Data length after merging with drug-order-info, is {}".format(len(data_s_zero_grp)))
# Merge stores
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
stores_q = """
SELECT
id AS store_id,
store AS store_name
FROM
"stores-master"
"""
stores_q = stores_q.replace('`', '"')
logger.info(stores_q)
stores = rs_db_write.get_df(query=stores_q)
stores.columns = [c.replace('-', '_') for c in stores.columns]
logger.info(len(stores))
data_s_zero_grp = data_s_zero_grp.merge(stores, how='left', on=['store_id'])
logger.info("Data length after merging with stores, is {}".format(len(data_s_zero_grp)))
# DB upload columns
final_cols = ['store_id', 'store_name', 'search_date',
'drug_id', 'drug_name_y', 'composition_y',
'repeatability_index', 'drug_category',
'drug_type', 'search_count_clean',
'rate_present', 'attributed_rate',
'standard_quantity', 'loss_quantity',
'lost_sales', 'lost_sales_not_substituted',
'lost_sales_not_substituted_not_pr',
'shortbook_present', 'final_lost_sales',
'num_days_sold', 'last_sold',
'safety_stock', 'minimum_quantity', 'maximum_quantity',
'comp_sold_window_flag',
'drug_sold_same_day_flag', 'comp_sold_same_day_flag',
'pr_same_day_flag', 'comp_pr_same_day_flag',
'ms_same_day_flag',
'sold_or_substituted_flag', 'pr_opportunity_flag',
'pr_converted_flag', 'pr_opportunity_converted_flag',
'final_loss_flag']
data_export = data_s_zero_grp[final_cols]
# For redshift specific
# Convert int columns to int
for i in ['num_days_sold', 'safety_stock', 'minimum_quantity', 'maximum_quantity']:
data_export[i] = data_export[i].fillna(0).astype(int)
logger.info(data_export.columns)
################################
# DB WRITE
###############################
write_schema = 'prod2-generico'
write_table_name = 'cfr-searches-v2'
update_table_name = 'cfr-searches-v2-etl-update'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
################################
# Table info for update table
###############################
table_info_update = helper.get_table_info(db=rs_db_write, table_name=update_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id', 'created-at', 'updated-at'])]
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# id and created-at
# Todo change to - later and remove id
data_export['id'] = -1
# Mandatory lines
data_export['created_at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# Deleting data for the choosen time period
if any_period_run == 'yes':
if run_type == 'delete':
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
delete_main_query = f"""
delete
from
"cfr-searches-v2"
where
date("search-date") > '{last_s_date_max}'
and date("search-date") <= '{period_end_d}'
"""
rs_db_write.execute(delete_main_query)
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
# logger.info(str('cfr-searches-v2') + ' table data deleted for specific time period')
elif run_type == 'update':
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
delete_query = f"""
delete from
"cfr-searches-v2-etl-update"
"""
rs_db_write.execute(delete_query)
s3.write_df_to_db(df=data_export[table_info_update['column_name']], table_name=update_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
rs_db_write.execute(query="begin ;")
update_main_query = f"""
update
"cfr-searches-v2" set
"rate-present" = cfe2."rate-present" ,
"attributed-rate" = cfe2."attributed-rate" ,
"num-days-sold" = cfe2."num-days-sold" ,
"last-sold" = cfe2."last-sold" ,
"sold-or-substituted-flag" = cfe2."sold-or-substituted-flag" ,
"pr-opportunity-flag" = cfe2."pr-opportunity-flag" ,
"pr-converted-flag" = cfe2."pr-converted-flag" ,
"lost-sales-not-substituted" = cfe2."lost-sales-not-substituted" ,
"lost-sales-not-substituted-not-pr" = cfe2."lost-sales-not-substituted-not-pr" ,
"final-loss-flag" = cfe2."final-loss-flag" ,
"final-lost-sales" = cfe2."final-lost-sales" ,
"search-count-clean" = cfe2."search-count-clean" ,
"loss-quantity" = cfe2."loss-quantity" ,
"lost-sales" = cfe2."lost-sales"
from
"prod2-generico"."cfr-searches-v2" cf2
inner join "cfr-searches-v2-etl-update" cfe2 on
cfe2."search-date" = cf2."search-date"
and cfe2."store-id" = cf2."store-id"
and cfe2."drug-id" = cf2."drug-id" """
rs_db_write.execute(update_main_query)
rs_db_write.execute(query="commit ;")
if upload_type == 'normal':
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
# Closing the DB Connection
rs_db_write.close_connection()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cfr-searches-v2/cfr-searches-v2.py | cfr-searches-v2.py |
import argparse
import datetime
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.utils.inventory.inventory import Data
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-sd', '--start_date', default="NA", type=str, required=False,
help="Start date in IST")
parser.add_argument('-ed', '--end_date', default="NA", type=str, required=False,
help="End date in IST")
parser.add_argument('-bs', '--batch_size', default=1, type=int, required=False,
help="How many stores to process in one go")
parser.add_argument('-fr', '--is_full_run', default="NO", type=str, required=False,
help="Only one batch or all to process")
args, unknown = parser.parse_known_args()
env = args.env
start_date = args.start_date
end_date = args.end_date
batch_size = args.batch_size
is_full_run = args.is_full_run
os.environ['env'] = env
logger = get_logger()
write_schema = "prod2-generico" if env == 'prod' else 'test-generico'
""" read connection """
db = DB()
db.open_connection()
""" write connection """
w_db = DB(read_only=False)
w_db.open_connection()
s3 = S3(bucket_name=f"{env}-zeno-s3-db")
if not (start_date and end_date) or start_date == "NA" or end_date == "NA":
""" if no dates given, then run for yesterday """
end_date = datetime.datetime.now().strftime("%Y-%m-%d")
start_date = datetime.datetime.now() + datetime.timedelta(days=-1)
start_date = start_date.strftime("%Y-%m-%d")
"""
Instructions to use(README):
0. Make sure tables for both the dates (start and end) are present in public schema (eg: bills-1-mis-2022-06-11)
1. set the start date and end date
2. Set the store id if only one store changes are required, if all stores are required then don't set store id
3. Data is uploaded to s3(prod-zeno-s3-db) inside "inventory/ledger/" folder (eg: s3://dev-zeno-s3-db/inventory/ledger/2022/06/11/240.csv)
4. S3 Data can be queried using AWS Athena
Tables Required:
inventory-1,invoice-items-1,invoices-1,customer-return-items-1,customer-returns-1,stock-transfer-items-1,
stock-transfers-1,bill-items-1,bills-1,return-items-1,returns-to-dc-1,deleted-invoices,deleted-invoices-1,
inventory-changes-1
Improvements:
1. use parquet format to store the data
import pandas as pd
df = pd.read_csv('example.csv')
df.to_parquet('output.parquet')
Meaning of columns:
"o": Opening/Start
"cr": Customer Return
"xin": Stock transfer in
"xout": Stock transfer out
"sold": Sold to customer
"ret": Return to DC
"ar": Audit
"rr": Reverted Return
"del": Invoice Deleted
"c": closing
"""
""" get all the stores """
q = f"""
select
distinct "store-id" as "store-id"
from
"prod2-generico"."inventory-1" i
"""
stores = db.get_df(query=q)
""" this column order will be maintained across all csv files """
column_order = ["id", "barcode", "ptr", "o", "cr", "xin", "cin", "xout", "cout", "sold", "ret",
"ar", "rr", "del", "c", "e"]
""" clean existing records, if any """
q = f"""
delete from "{write_schema}"."inventory-ledger" where date("start-time") = '{start_date}';
"""
w_db.execute(query=q)
batch = 0
for store_id_batch in helper.batch(stores['store-id'], batch_size):
csv_store_ids = ','.join([str(s) for s in store_id_batch])
batch += 1
logger.info(f"batch: {batch}, csv store ids: {csv_store_ids}")
data = Data(db=db, csv_store_ids=csv_store_ids, start_date=start_date, end_date=end_date)
recon_df = data.concat()
uri = s3.save_df_to_s3(df=recon_df[column_order],
file_name=f"inventory/ledger/{start_date.replace('-', '/')}/batch_{batch}.csv",
index=False)
table_info = helper.get_table_info(db=w_db, table_name="inventory-ledger", schema=write_schema)
recon_df['start-time'] = data.start_ts
recon_df['end-time'] = data.end_ts
recon_df['created-at'] = datetime.datetime.now()
recon_df['updated-at'] = datetime.datetime.now()
recon_df['created-by'] = "etl-automation"
recon_df['updated-by'] = "etl-automation"
s3.write_df_to_db(df=recon_df[table_info['column_name']], table_name="inventory-ledger",
db=w_db, schema=write_schema)
logger.info(f"Uploaded successfully @ {uri}")
if is_full_run.lower() == "no":
logger.info(f"Stopping after one batch, since is_full_run: {is_full_run}")
db.close_connection()
w_db.close_connection()
break
db.close_connection()
w_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/inventory-ledger/inventory-ledger.py | inventory-ledger.py |
import argparse
import datetime
import sys
import os
import numpy as np
# from memory_profiler import profile
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
# from zeno_etl_libs.utils.inventory.inventory_2 import Data
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
def remove_duplicates(df: pd.DataFrame, f):
"""
#remove duplicates from dataframe of the form id, f, ptr
#f corresponds to quantity which is added up for duplicate ids
#ptr remains same for all
"""
print(f"Removed duplicates on column: {f}")
df1 = df.groupby('id', as_index=False)[f].sum()
df2 = df.drop_duplicates(subset='id').drop(columns=f)
df = pd.merge(left=df1, right=df2, on='id', how='left')
return df
def getids(df: pd.DataFrame):
"""
utility function to generate string to be used in "in" query
"""
return ",".join(str(i) for i in df['id'].unique())
def combin_xin(recon_l: pd.DataFrame, xin_l: pd.DataFrame):
"""
this will take care of stores own inventory coming back
"""
return pd.concat([recon_l, xin_l], axis=0).drop_duplicates(subset='id')
class Data:
"""
class to get the inventory related data
Example:
'o', 'cr', 'xin', 'xout', 'ret', 'sold', 'del', 'ar', 'rr', 'c'
"""
def __init__(self, db, csv_store_ids, start_date, end_date, snapshot_ist_time_delta=0):
"""
:param db: database connection
:param csv_store_ids: multiple store ids in csv
:param start_date: start date in IST
:param end_date: end date in IST
"""
self.db = db
self.csv_store_ids = csv_store_ids
self.start_ts = f"{start_date} 02:00:00" # in IST
self.end_ts = f"{end_date} 03:00:00" # in IST
""" since snapshots names are in UTC so tables alias is one day back"""
start_date_utc = datetime.datetime.strptime(start_date, '%Y-%m-%d') - datetime.timedelta(
days=snapshot_ist_time_delta)
start_date_utc = start_date_utc.strftime("%Y-%m-%d")
end_date_utc = datetime.datetime.strptime(end_date, '%Y-%m-%d') - datetime.timedelta(
days=snapshot_ist_time_delta)
end_date_utc = end_date_utc.strftime("%Y-%m-%d")
self.s_alias = f"-mis-{start_date_utc}"
if start_date == "2022-06-01":
# Only for 2022-06-01 manual snapshot, since snapshot name and date are same
self.s_alias = f"-mis-{start_date}"
self.e_alias = f"-inv-{end_date_utc}"
self.s_schema = "public"
self.e_schema = "public"
""" Data frames """
self.recon_l = pd.DataFrame() # Final reconciled data frame
self.p_l = pd.DataFrame() # purchased / received
self.prd_l = pd.DataFrame() # purchased return dispatched
self.prs_l = pd.DataFrame() # purchased return settled
self.pr_l = pd.DataFrame() # purchased return cogs
self.o_l = pd.DataFrame() # opening / initial
self.cr_l = pd.DataFrame() #
self.xin_l = pd.DataFrame() #
self.xout_l = pd.DataFrame() #
self.sold_l = pd.DataFrame() #
self.ret_l = pd.DataFrame() #
self.ar_l = pd.DataFrame() #
self.rr_l = pd.DataFrame() #
self.del_l = pd.DataFrame() #
self.c_l = pd.DataFrame() #
def take_union(self):
"""
select one value of barcode from left or right data frame
"""
for col in ['barcode', 'ptr']:
self.recon_l[col] = np.where(self.recon_l[f'{col}_x'].isna(), self.recon_l[f'{col}_y'],
self.recon_l[f'{col}_x'])
self.recon_l.drop(columns=[f'{col}_x', f'{col}_y'], axis=1, inplace=True)
def opening(self):
"""
opening inventory calculation
"""
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity o,
ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is null
and quantity != 0
order by
id
"""
o_l_1 = self.db.get_df(query=q)
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity o,
ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is not null
and quantity != 0
order by
id
"""
o_l_2 = self.db.get_df(query=q)
self.o_l = pd.concat([o_l_1, o_l_2], ignore_index=True)
return self.o_l
def purchased(self):
"""
purchased inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."actual-quantity" p,
c.ptr
from
"{self.e_schema}"."invoice-items-1{self.e_alias}" a
join "{self.e_schema}"."invoices-1{self.e_alias}" b on
a."franchisee-invoice-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(c."invoice-item-id" = a.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a."actual-quantity" !=0
"""
self.p_l = self.db.get_df(query=q)
return self.p_l
def purchase_returns(self):
"""
purchase returns cogs inventory calculation
"""
q = f"""
select
e.id,
nvl("barcode-reference", 0) barcode,
sum(nvl(d."returned-quantity", 0)) pr,
e.ptr
from
"{self.e_schema}"."debit-notes-1{self.e_alias}" a
join "{self.e_schema}"."debit-note-items-1{self.e_alias}" c on
c."debit-note-id" = a."id"
join "{self.e_schema}"."return-items-1{self.e_alias}" d on
d."id" = c."item-id"
join "{self.e_schema}"."inventory-1{self.e_alias}" e on
e."id" = d."inventory-id"
where
a."store-id" in ({self.csv_store_ids})
and c."is-active" = true
and a."settled-at" >= '{self.start_ts}'
and a."settled-at" <= '{self.end_ts}'
and d."returned-quantity" !=0
group by
e.id, "barcode-reference", e.ptr
"""
self.pr_l = self.db.get_df(query=q)
return self.pr_l
def purchased_return_dispatched(self):
"""
purchased return dispatched inventory calculation
"""
q = f"""
select
d.id ,
nvl("barcode-reference", 0) barcode,
sum(nvl(c."returned-quantity", 0)) prd,
d."ptr"
from
"{self.e_schema}"."debit-notes-1{self.e_alias}" a
join "{self.e_schema}"."debit-note-items-1{self.e_alias}" b on
b."debit-note-id" = a.id
join "{self.e_schema}"."return-items-1{self.e_alias}" c on
b."item-id" = c.id
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" e on
c."return-id" = e.id
join "{self.e_schema}"."inventory-1{self.e_alias}" d on
c."inventory-id" = d.id
where
e."store-id" in ({self.csv_store_ids})
and a."dispatched-at" >= '{self.start_ts}'
and a."dispatched-at" <= '{self.end_ts}'
group by
d.id, "barcode-reference", d.ptr
"""
self.prd_l = self.db.get_df(query=q)
return self.prd_l
def purchased_return_settled(self):
"""
purchased return settled inventory calculation
"""
q = f"""
select
d.id ,
nvl("barcode-reference", 0) barcode,
sum(nvl(c."returned-quantity",0)) prs,
d."ptr"
from
"{self.e_schema}"."debit-notes-1{self.e_alias}" a
join "{self.e_schema}"."debit-note-items-1{self.e_alias}" b on
b."debit-note-id" = a.id
join "{self.e_schema}"."return-items-1{self.e_alias}" c on
b."item-id" = c.id
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" e on
c."return-id" = e.id
join "{self.e_schema}"."inventory-1{self.e_alias}" d on
c."inventory-id" = d.id
where
e."store-id" in ({self.csv_store_ids})
and a."settled-at" >= '{self.start_ts}'
and a."settled-at" <= '{self.end_ts}'
group by
d.id, "barcode-reference", d.ptr
"""
self.prs_l = self.db.get_df(query=q)
return self.prs_l
def customer_returns(self):
"""
customer return inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" cr,
c.ptr
from
"{self.e_schema}"."customer-return-items-1{self.e_alias}" a
join "{self.e_schema}"."customer-returns-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."returned-at" >= '{self.start_ts}'
and b."returned-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" cr,
c.ptr
from
"{self.e_schema}"."customer-return-items-1{self.e_alias}" a
join "{self.e_schema}"."customer-returns-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."returned-at" >= '{self.start_ts}'
and b."returned-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.cr_l = self.db.get_df(query=q)
self.cr_l = remove_duplicates(df=self.cr_l, f="cr")
return self.cr_l
def xin(self):
"""
Stock transfer in - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity xin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."destination-store")
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a.quantity xin,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."destination-store")
where
b."destination-store" in ({self.csv_store_ids})
and b."received-at" >= '{self.start_ts}'
and b."received-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.xin_l = self.db.get_df(query=q)
self.xin_l = remove_duplicates(df=self.xin_l, f="xin")
return self.xin_l
def xout(self):
"""
Stock transfer out inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" xout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."source-store")
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" xout,
c.ptr
from
"{self.e_schema}"."stock-transfer-items-1{self.e_alias}" a
join "{self.e_schema}"."stock-transfers-1{self.e_alias}" b on
a."transfer-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."source-store")
where
b."source-store" in ({self.csv_store_ids})
and a."transferred-at" >= '{self.start_ts}'
and a."transferred-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.xout_l = self.db.get_df(query=q)
self.xout_l = remove_duplicates(self.xout_l, "xout")
return self.xout_l
def sold(self):
"""
Sold inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" sold,
c.ptr
from
"{self.e_schema}"."bill-items-1{self.e_alias}" a
join "{self.e_schema}"."bills-1{self.e_alias}" b on
a."bill-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a.quantity !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."quantity" sold,
c.ptr
from
"{self.e_schema}"."bill-items-1{self.e_alias}" a
join "{self.e_schema}"."bills-1{self.e_alias}" b on
a."bill-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.sold_l = self.db.get_df(query=q)
self.sold_l = remove_duplicates(self.sold_l, "sold")
return self.sold_l
def returned_to_dc(self):
"""
Return to dc - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" ret,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" ret,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and b."created-at" >= '{self.start_ts}'
and b."created-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.ret_l = self.db.get_df(query=q)
self.ret_l = remove_duplicates(self.ret_l, "ret")
return self.ret_l
def deleted(self):
"""
Deleted - inventory calculation
"""
q = f"""
select
a.id,
nvl("barcode-reference", 0) barcode,
a.quantity del,
a.ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}" a
join "{self.e_schema}"."deleted-invoices{self.e_alias}" c on
a."invoice-id" = c.id
where
a."store-id" in ({self.csv_store_ids})
and c."deleted-at" >= '{self.start_ts}'
and c."deleted-at" <= '{self.end_ts}'
and a.quantity !=0
union
select
a.id,
nvl("barcode-reference", 0) barcode,
a.quantity del,
a.ptr
from
"{self.s_schema}"."inventory-1{self.s_alias}" a
join "{self.e_schema}"."deleted-invoices-1{self.e_alias}" c on
a."franchisee-invoice-id" = c.id
where
a."store-id" in ({self.csv_store_ids})
and c."deleted-at" >= '{self.start_ts}'
and c."deleted-at" <= '{self.end_ts}'
and a.quantity !=0
"""
self.del_l = self.db.get_df(query=q)
self.del_l = remove_duplicates(self.del_l, "del")
return self.del_l
def closing(self):
"""
Closing inventory calculation
"""
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity c,
ptr
from
"{self.e_schema}"."inventory-1{self.e_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is null
and quantity !=0
order by
id
"""
c_l_1 = self.db.get_df(query=q)
q = f"""
select
id,
nvl("barcode-reference", 0) barcode,
quantity c,
ptr
from
"{self.e_schema}"."inventory-1{self.e_alias}"
where
"store-id" in ({self.csv_store_ids})
and "barcode-reference" is not null
and quantity !=0
order by
id
"""
c_l_2 = self.db.get_df(query=q)
self.c_l = pd.concat([c_l_1, c_l_2], ignore_index=True)
return self.c_l
def audit_recon(self):
"""
Audit recon - inventory calculation
"""
q = f"""
select
b.id,
nvl("barcode-reference", 0) barcode,
a.change ar,
b.ptr
from
"{self.e_schema}"."inventory-changes-1{self.e_alias}" a
join "{self.e_schema}"."inventory-1{self.e_alias}" b on
(a."inventory-id" = b.id
and b."store-id" = a."store-id")
where
a."store-id" in ({self.csv_store_ids})
and a."created-at" >= '{self.start_ts}'
and a."created-at" <= '{self.end_ts}'
and a.change !=0
union all
select
b.id,
nvl("barcode-reference", 0) barcode,
a.change ar,
b.ptr
from
"{self.e_schema}"."inventory-changes-1{self.e_alias}" a
join "{self.e_schema}"."inventory-1{self.e_alias}" b on
(a."inventory-id" = b."barcode-reference"
and b."store-id" = a."store-id")
where
a."store-id" in ({self.csv_store_ids})
and a."created-at" >= '{self.start_ts}'
and a."created-at" <= '{self.end_ts}'
and a.change !=0
"""
self.ar_l = self.db.get_df(query=q)
self.ar_l = remove_duplicates(self.ar_l, "ar")
return self.ar_l
def reverted_returns(self):
"""
Reverted returns - inventory calculation
"""
q = f"""
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" rr,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c.id
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and a."reverted-at" >= '{self.start_ts}'
and a."reverted-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
union all
select
c.id,
nvl("barcode-reference", 0) barcode,
a."returned-quantity" rr,
c.ptr
from
"{self.e_schema}"."return-items-1{self.e_alias}" a
join "{self.e_schema}"."returns-to-dc-1{self.e_alias}" b on
a."return-id" = b.id
join "{self.e_schema}"."inventory-1{self.e_alias}" c on
(a."inventory-id" = c."barcode-reference"
and c."store-id" = b."store-id")
where
b."store-id" in ({self.csv_store_ids})
and a."reverted-at" >= '{self.start_ts}'
and a."reverted-at" <= '{self.end_ts}'
and a."returned-quantity" !=0
"""
self.rr_l = self.db.get_df(query=q)
self.rr_l = remove_duplicates(self.rr_l, "rr")
return self.rr_l
def get_meta_data(self):
""" extra data needed for inventory """
q = f"""
select
i.id,
i."purchase-rate" ,
d."drug-name"
from
"prod2-generico"."prod2-generico"."inventory-1" i
left join "prod2-generico"."prod2-generico".drugs d on
i."drug-id" = d.id
where
i."store-id" in ({self.csv_store_ids})
"""
return self.db.get_df(query=q)
# @profile
def start_data_fetch(self):
""" calls all the function which fetch the data from database """
print("Starting data fetch.")
self.opening()
print("opening: Successfully fetched.")
self.purchased()
print("purchased: Successfully fetched.")
self.purchase_returns()
print("purchase returns: Successfully fetched.")
self.purchased_return_dispatched()
print("purchased_return_dispatched : Successfully fetched.")
self.purchased_return_settled()
print("purchased_return_settled : Successfully fetched.")
self.customer_returns()
print("customer_returns: Successfully fetched.")
self.xin()
print("xin: Successfully fetched.")
self.xout()
print("xout: Successfully fetched.")
self.sold()
print("sold: Successfully fetched.")
self.returned_to_dc()
print("returned_to_dc: Successfully fetched.")
self.deleted()
print("deleted: Successfully fetched.")
self.closing()
print("closing: Successfully fetched.")
self.audit_recon()
print("audit_recon: Successfully fetched.")
self.reverted_returns()
print("reverted_returns: Successfully fetched.")
# @profile
def concat(self):
""" data fetching from database """
self.start_data_fetch()
"""
## combine initial and received
temp_l = select(p_l, :id, :barcode, :p => :o, :ptr)
recon_l = vcat(o_l, temp_l)
## following handles inventory lying in inventory-1 but received later
recon_l = remove_duplicates(recon_l, "o")
recon_l = combine_cr(recon_l, cr_l)
recon_l = combine_xin(recon_l, xin_l)
recon_l = combine_xout(recon_l, xout_l)
recon_l = combine_sold(recon_l, sold_l)
recon_l = combine_ret(recon_l, ret_l)
recon_l = combine_ar(recon_l, ar_l)
recon_l = combine_rr(recon_l, rr_l)
recon_l = leftjoin(recon_l, select(del_l, :id, :del), on = :id)
recon_l = leftjoin(recon_l, select(c_l, :id, :c), on = :id)
"""
# """ combine initial and received and call it opening(o) """
# self.p_l.rename(columns={'p': 'o'}, inplace=True)
# self.recon_l = remove_duplicates(self.o_l, f="o")
self.recon_l = self.o_l
# """ following handles inventory lying in inventory-1 but received later """
# self.recon_l = pd.concat([self.p_l, self.o_l], ignore_index=True)
# self.recon_l = remove_duplicates(self.recon_l, "o")
""" purchase """
self.recon_l = pd.merge(self.recon_l, self.p_l, on='id', how='outer')
self.take_union()
""" purchase returns """
self.recon_l = pd.merge(self.recon_l, self.pr_l, on='id', how='outer')
self.take_union()
""" purchase_return_deleted """
self.recon_l = pd.merge(self.recon_l, self.prd_l, on='id', how='outer')
self.take_union()
""" purchase_return_settled """
self.recon_l = pd.merge(self.recon_l, self.prs_l, on='id', how='outer')
self.take_union()
# self.recon_l['pr'] = 0
# self.recon_l['prd'] = 0
# self.recon_l['prs'] = 0
"""combine_cr: following handles the case where inventory was stock transferred,
after the start time and returned before end time """
self.recon_l = pd.merge(self.recon_l, self.cr_l, on='id', how='outer')
self.take_union()
"""combine_xin: this will take care of stores own inventory coming back"""
self.recon_l = pd.merge(self.recon_l, self.xin_l, on='id', how='outer')
self.take_union()
"""combine_xout: this will take care of stores own inventory transferred out"""
self.recon_l = pd.merge(self.recon_l, self.xout_l, on='id', how='outer')
self.take_union()
"""combine_sold: this will take care of stores inventory sold """
self.recon_l = pd.merge(self.recon_l, self.sold_l, on='id', how='outer')
self.take_union()
"""combine_ret: this will take care of stores inventory returned """
self.recon_l = pd.merge(self.recon_l, self.ret_l, on='id', how='outer')
self.take_union()
"""combine_ar: """
self.recon_l = pd.merge(self.recon_l, self.ar_l, on='id', how='outer')
self.take_union()
"""combine_rr: """
self.recon_l = pd.merge(self.recon_l, self.rr_l, on='id', how='outer')
self.take_union()
""" deleted """
self.recon_l = pd.merge(self.recon_l, self.del_l, on='id', how='left')
self.take_union()
""" closing """
self.recon_l = pd.merge(self.recon_l, self.c_l, on='id', how='left')
self.take_union()
""" calculate the error """
self.recon_l = self.recon_l.fillna(0)
for col in ['id', 'o', 'p', 'pr', 'prd', 'prs', 'cr', 'xin', 'xout', 'ret', 'sold', 'del',
'ar',
'rr', 'c', 'barcode']:
self.recon_l[col] = pd.to_numeric(self.recon_l[col])
self.recon_l[col] = self.recon_l[col].astype('int', errors='raise')
self.recon_l['e'] = self.recon_l['o'] + self.recon_l['p'] + self.recon_l['cr'] + \
self.recon_l['xin'] - \
self.recon_l['xout'] - \
self.recon_l['ret'] - self.recon_l['sold'] - self.recon_l['del'] + \
self.recon_l['ar'] + \
self.recon_l['rr'] - self.recon_l['c']
return self.recon_l
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-sd', '--start_date', default="NA", type=str, required=False,
help="Start date in IST")
parser.add_argument('-ed', '--end_date', default="NA", type=str, required=False,
help="End date in IST")
parser.add_argument('-sitd', '--snapshot_ist_time_delta', default=0, type=int, required=False,
help="End date in IST")
parser.add_argument('-bs', '--batch_size', default=1, type=int, required=False,
help="How many stores to process in one go")
parser.add_argument('-fr', '--is_full_run', default="NO", type=str, required=False,
help="Only one batch or all to process")
args, unknown = parser.parse_known_args()
env = args.env
start_date = args.start_date
end_date = args.end_date
batch_size = args.batch_size
snapshot_ist_time_delta = args.snapshot_ist_time_delta
is_full_run = args.is_full_run
os.environ['env'] = env
logger = get_logger()
# schema = "test-generico"
schema = "public"
if env == "prod":
schema = "prod2-generico"
prefix = "-inv-2022-04-01"
""" read connection """
db = DB()
db.open_connection()
""" write connection """
w_db = DB(read_only=False)
w_db.open_connection()
s3 = S3(bucket_name=f"{env}-zeno-s3-db")
if not (start_date and end_date) or start_date == "NA" or end_date == "NA":
""" if no dates given, then run for yesterday """
end_date = datetime.datetime.now().strftime("%Y-%m-%d")
start_date = datetime.datetime.now() + datetime.timedelta(days=-1)
start_date = start_date.strftime("%Y-%m-%d")
"""
Instructions to use(README):
0. Make sure tables for both the dates (start and end) are present in public schema (eg: bills-1-mis-2022-06-11)
1. set the start date and end date
2. Set the store id if only one store changes are required, if all stores are required then don't set store id
3. Data is uploaded to s3(prod-zeno-s3-db) inside "inventory/ledger/" folder (eg: s3://dev-zeno-s3-db/inventory/ledger/2022/06/11/240.csv)
4. S3 Data can be queried using AWS Athena
Tables Required:
inventory-1,invoice-items-1,invoices-1,customer-return-items-1,customer-returns-1,stock-transfer-items-1,
stock-transfers-1,bill-items-1,bills-1,return-items-1,returns-to-dc-1,deleted-invoices,deleted-invoices-1,
inventory-changes-1
Improvements:
1. use parquet format to store the data
import pandas as pd
df = pd.read_csv('example.csv')
df.to_parquet('output.parquet')
Meaning of columns:
"o": Opening/Start
"cr": Customer Return
"xin": Stock transfer in
"xout": Stock transfer out
"sold": Sold to customer
"ret": Return to DC
"ar": Audit
"rr": Reverted Return
"del": Invoice Deleted
"c": closing
"""
""" get all the stores """
q = f"""
select
distinct "store-id" as "store-id"
from
"{schema}"."inventory-1{prefix}" i
"""
stores = db.get_df(query=q)
""" this column order will be maintained across all csv files """
column_order = ["id", "barcode", "ptr", "o", "p", "pr", "prd", "prs", "cr", "xin", "xout", "sold",
"ret", "ar", "rr", "del", "c", "e"]
inventory_ledger_table = "inventory-ledger-v2"
""" clean existing records, if any """
q = f"""
delete from "{schema}"."{inventory_ledger_table}"
-- where date("start-time") = '{start_date}';
"""
w_db.execute(query=q)
batch = 0
for store_id_batch in helper.batch(stores['store-id'], batch_size):
csv_store_ids = ','.join([str(s) for s in store_id_batch])
batch += 1
logger.info(f"batch: {batch}, csv store ids: {csv_store_ids}")
data = Data(db=db, csv_store_ids=csv_store_ids, start_date=start_date, end_date=end_date,
snapshot_ist_time_delta=snapshot_ist_time_delta)
recon_df = data.concat()
uri = s3.save_df_to_s3(df=recon_df[column_order],
file_name=f"inventory/ledger/{start_date.replace('-', '/')}/batch_{batch}.csv",
index=False)
table_info = helper.get_table_info(db=w_db, table_name=inventory_ledger_table, schema=schema)
recon_df['start-time'] = data.start_ts
recon_df['end-time'] = data.end_ts
recon_df['created-at'] = datetime.datetime.now()
recon_df['updated-at'] = datetime.datetime.now()
recon_df['created-by'] = "etl-automation"
recon_df['updated-by'] = "etl-automation"
s3.write_df_to_db(df=recon_df[table_info['column_name']], table_name=inventory_ledger_table,
db=w_db, schema=schema)
logger.info(f"Uploaded successfully @ {uri}")
if is_full_run.lower() == "no":
logger.info(f"Stopping after one batch, since is_full_run: {is_full_run}")
db.close_connection()
w_db.close_connection()
break
db.close_connection()
w_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/inventory-ledger/inventory-ledger-v2.py | inventory-ledger-v2.py |
import argparse
import datetime
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.utils.inventory.inventory import Data
from zeno_etl_libs.helper.aws.s3 import S3
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-sd', '--start_date', default="2022-06-01", type=str, required=False, help="Start date in IST")
parser.add_argument('-ed', '--end_date', default="2022-06-26", type=str, required=False, help="End date in IST")
parser.add_argument('-si', '--store_id', default="215", type=int, required=False,
help="")
parser.add_argument('-lfp', '--local_file_path', default="/Users/kuldeep/Downloads", type=str, required=False, help="")
args, unknown = parser.parse_known_args()
env = args.env
start_date = args.start_date
end_date = args.end_date
store_id = args.store_id
local_file_path = args.local_file_path
os.environ['env'] = env
logger = get_logger()
""" read connection """
db = DB()
db.open_connection()
s3 = S3(bucket_name=f"{env}-zeno-s3-db")
if not (start_date and end_date) or start_date == "NA" or end_date == "NA":
""" if no dates given, then run for yesterday """
end_date = datetime.datetime.now().strftime("%Y-%m-%d")
start_date = datetime.datetime.now() + datetime.timedelta(days=-1)
start_date = start_date.strftime("%Y-%m-%d")
"""
Instructions to use(README):
0. Make sure tables for both the dates (start and end) are present in public schema (eg: bills-1-mis-2022-06-11)
1. set the start date and end date
2. Set the store id
3. Data is uploaded to s3(ie. prod-zeno-s3-db) inside "inventory/ledger/" folder (eg: s3://prod-zeno-s3-db/inventory/ledger/adhoc/2022/06/11/240.csv)
4. S3 Data can be queried using AWS Athena
Tables Required:
inventory-1,invoice-items-1,invoices-1,customer-return-items-1,customer-returns-1,stock-transfer-items-1,
stock-transfers-1,bill-items-1,bills-1,return-items-1,returns-to-dc-1,deleted-invoices,deleted-invoices-1,
inventory-changes-1
"""
""" this column order will be maintained across all csv files """
column_order = ["id", "barcode", "ptr", "o", "cr", "xin", "xout", "sold", "ret", "ar", "rr", "del", "c", "e",
'start-time', 'end-time', 'purchase-rate', 'drug-name']
""" calculating the data """
data = Data(db=db, csv_store_ids=f"{store_id}", start_date=start_date, end_date=end_date)
recon_df = data.concat()
recon_df['start-time'] = data.start_ts
recon_df['end-time'] = data.end_ts
""" add other meta data """
meta_df = data.get_meta_data()
df = recon_df.merge(meta_df, how='left', on=['id'])
""" for testing the s3 part """
# recon_df = pd.DataFrame(data=[[1,2,3,4,5,6,7,8,9,0,1,2,3,4]], columns=column_order)
file_name = f"store-{store_id}-{start_date}-{end_date}.csv"
uri = s3.save_df_to_s3(df=df[column_order], file_name=f"inventory/ledger/adhoc/{file_name}", index=False)
if local_file_path:
local_file_path = f"{local_file_path}/{file_name}"
df.to_csv(path_or_buf=local_file_path)
logger.info(f"Uploaded successfully @ {uri}") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/inventory-ledger/inventory-ledger-one-store.py | inventory-ledger-one-store.py |
import argparse
import os
import sys
from datetime import datetime as dt
import numpy as np
import pandas as pd
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-acd', '--alternate_calc_date', default=0, type=int, required=False)
parser.add_argument('-cd', '--calculation_date', default=0, type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
alternate_calc_date = args.alternate_calc_date
calculation_date = args.calculation_date
logger = get_logger()
logger.info(f"env: {env}")
# params
status = 'Failed'
if alternate_calc_date:
calculation_date = calculation_date
else:
calculation_date = str(dt.now().date())
schema = 'prod2-generico'
table_name = 'crm-view'
rs_db = DB()
rs_db.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
read_schema = 'prod2-generico'
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."{table_name}"
WHERE
"calculation-date" = '{calculation_date}';
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
# Fetching all patient recency, frequency, monetary(abv)
rfm_q = f"""
SELECT
b."patient-id",
SUM(bi.rate * bi.quantity) AS "total-cx-purchase",
SUM(bi.rate * bi.quantity)/ COUNT(DISTINCT bi."bill-id") AS monetary,
COUNT(DISTINCT bi."bill-id") AS frequency,
DATEDIFF('days', MAX(DATE(b."created-at")), '{calculation_date}') AS recency,
SUM(i."purchase-rate" * bi.quantity) AS "total-wc-purchase",
MIN(DATE(b."created-at")) AS "acq-date",
MAX(DATE(b."created-at")) AS "last-bill"
FROM
"prod2-generico"."bills-1" b
LEFT JOIN "prod2-generico"."bill-items-1" bi
ON
b.id = bi."bill-id"
LEFT JOIN "prod2-generico"."inventory-1" i ON
bi."inventory-id" = i.id
WHERE DATE(b."created-at") <= '{calculation_date}'
GROUP BY
b."patient-id";"""
# Fetching all patient promos
promo_q = f"""
SELECT
b."patient-id",
SUM(b."promo-discount") AS "promo-discount",
SUM(b."redeemed-points") AS "redeemed-points"
FROM
"{read_schema}"."bills-1" b
WHERE
DATE(b."created-at") <= '{calculation_date}'
GROUP BY
b."patient-id";"""
# Fetching all patient consumer behaviour and value segment
calc_date = calculation_date[:7] + "-01"
cbs_q = f"""
select
cbs."patient-id" ,
cbs."behaviour-segment" as "current-behaviour-segment"
from
"{read_schema}"."customer-behaviour-segment" cbs
where
cbs."segment-calculation-date" = '{calc_date}';"""
cvs_q = f"""
select
cvs."patient-id" ,
cvs."value-segment" as "current-value-segment"
from
"{read_schema}"."customer-value-segment" cvs
where
cvs."segment-calculation-date" = '{calc_date}'; """
# Fetching all patient consumer_flag
flag_q = f"""
select
rm."patient-id" ,
(case
when max(cast (rm."is-generic" as int))= 1 then 'generic'
else 'non-generic'
end) as "is-generic",
(case
when max(cast (rm."is-repeatable" as int ))= 1 then 'repetable'
else 'non-repeatable'
end) as "is-repeatable",
(case
when max(cast (rm."is-chronic" as int))= 1 then 'chronic'
else 'acute'
end) as "is-chronic" ,
(case
when max(cast (rm."hd-flag" as int))= 1 then 'hd-customer'
else 'non-hd-customer'
end) as "is-hd"
from
"{read_schema}"."retention-master" rm
where
date(rm."bill-date") < '{calculation_date}'
group by
rm."patient-id";"""
# Fetching all patient acquisition source
acq_q = f"""
select
rm."patient-id",
(case
when rm."promo-code-id" is null then 'organic'
else 'inorganic'
end) as acquisition
from
"{read_schema}"."retention-master" rm
where
rm."p-first-bill-date" = rm."created-at";"""
# Fetching patient primary stores
store_q = f"""
select
rm."patient-id",
rm.store,
rm.abo,
rm."store-manager",
rm."store-type",
rm."store-city" as "city",
rm."line-manager",
rm."store-b2b"
from
"{read_schema}"."retention-master" rm
where
rm."p-first-bill-id" = rm.id;"""
rfm = rs_db.get_df(rfm_q)
promo = rs_db.get_df(promo_q)
cbs = rs_db.get_df(cbs_q)
cvs = rs_db.get_df(cvs_q)
flags = rs_db.get_df(flag_q)
acq = rs_db.get_df(acq_q)
stores = rs_db.get_df(store_q)
# logger_info
logger.info('total number of patient, size : {}'.format(len(rfm)))
# data types
rfm['total-cx-purchase'] = rfm['total-cx-purchase'].astype(float)
rfm['total-wc-purchase'] = rfm['total-wc-purchase'].astype(float)
rfm['monetary'] = rfm['monetary'].astype(float)
rfm['acq-date'] = pd.to_datetime(rfm['acq-date'])
rfm['last-bill'] = pd.to_datetime(rfm['last-bill'])
promo['promo-discount'] = promo['promo-discount'].astype(float)
# Function for Customer stages
rfm['r-score'] = 1
rfm['f-score'] = 1
rfm['m-score'] = 1
rfm['r-score'] = pd.qcut(rfm['recency'], 5, labels=[5, 4, 3, 2, 1])
rfm['m-score'] = pd.qcut(rfm['monetary'], 5, labels=[1, 2, 3, 4, 5])
try:
rfm['f-score'] = pd.qcut(rfm['frequency'], 5, labels=[1, 2, 3, 4, 5])
except ValueError:
rfm['f-score'] = pd.cut(rfm['frequency'], bins=[0, 1, 3, 6, 10, np.inf], labels=[1, 2, 3, 4, 5])
rfm['stage'] = np.nan
rfm['stage'] = np.where((rfm['r-score'].isin([3, 4])) & (rfm['f-score'].isin([4, 5])), 'Loyal Customers',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([4, 5])) & (rfm['f-score'].isin([2, 3])), 'Potential Loyalist',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([5])) & (rfm['f-score'].isin([4, 5])), 'Champions', rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([5])) & (rfm['f-score'].isin([1])), 'New Customer', rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([4])) & (rfm['f-score'].isin([1])), 'Promising', rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([3])) & (rfm['f-score'].isin([3])), 'Customer needing attention',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([3])) & (rfm['f-score'].isin([1, 2])), 'About to Sleep',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([1, 2])) & (rfm['f-score'].isin([3, 4])), 'At Risk',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([1, 2])) & (rfm['f-score'].isin([5])), 'Can\'t Lose them',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([1, 2])) & (rfm['f-score'].isin([1, 2])), 'Hibernating',
rfm['stage'])
rfm['stage'] = np.where((rfm['r-score'].isin([1])) & (rfm['f-score'].isin([1])), 'Lost', rfm['stage'])
# roi calculation at customer level
crm = pd.merge(rfm, promo, on='patient-id', how='left')
# roi = profit_gain / investment
# p = wc_purchase, s = cx_purchase, pr = promo/discount
# roi = (s-p-pr )/(p+pr)
crm['investment'] = crm['total-wc-purchase'] + crm['redeemed-points'] + crm['promo-discount']
crm['gain'] = crm['total-cx-purchase'] - crm['investment']
crm['roi'] = crm['gain'] / crm['investment']
crm = crm.drop(columns=['investment', 'gain'])
crm['abv-seg'] = pd.cut(crm['monetary'],
bins=[0, 200, 300, 500, 750, 1000, 1250, 1500, 2000, np.inf],
labels=['<=200', '201-300', '301-500',
'501-750', '751-1000', '1001-1250',
'1251-1500', '1501-2000', '>2000'])
crm['nob-seg'] = pd.cut(crm['frequency'],
bins=[0, 4, 10, 25, 50, 100, np.inf],
labels=['1-4', '5-10', '11-25',
'26-50', '50-100', '>100'])
# consumer behaviour and value segment
crm = pd.merge(crm, cbs, on='patient-id', how='left')
crm = pd.merge(crm, cvs, on='patient-id', how='left')
# consumer flag
crm = pd.merge(crm, flags, on='patient-id', how='left')
# consumer acquisition
crm = pd.merge(crm, acq, on='patient-id', how='left')
# consumer store data
crm = pd.merge(crm, stores, on='patient-id', how='left')
crm['calculation-date'] = calculation_date
crm['segment-calculation-date'] = calc_date
# data correction
crm['r-score'] = crm['r-score'].fillna(1)
crm['f-score'] = crm['f-score'].fillna(1)
crm['m-score'] = crm['m-score'].fillna(1)
crm['r-score'] = crm['r-score'].astype(int)
crm['f-score'] = crm['f-score'].astype(int)
crm['m-score'] = crm['m-score'].astype(int)
logger.info("info :", crm.info())
# etl
crm['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
crm['created-by'] = 'etl-automation'
crm['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
crm['updated-by'] = 'etl-automation'
# Write to csv
s3.save_df_to_s3(df=crm[table_info['column_name']], file_name='crm_view.csv')
s3.write_df_to_db(df=crm[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/crm_view/crm-view.py | crm-view.py |
import argparse
import numpy as np
import os
import pandas as pd
import sys
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from datetime import datetime as dt
from datetime import timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-fr', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
full_run = args.full_run
logger = get_logger()
logger.info(f"env: {env}")
# =============================================================================
# set parameters
# =============================================================================
if full_run:
pso_date1 = '2022-02-01'
pso_date2 = str(dt.today().date() - timedelta(days=1))
else:
pso_date1 = str(dt.today().date() - timedelta(days=10))
pso_date2 = str(dt.today().date() - timedelta(days=1))
schema = 'prod2-generico'
table_name = "pso-recommendation-visibility"
rs_db = DB()
rs_db.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
read_schema = 'prod2-generico'
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."{table_name}"
WHERE
"created-at" BETWEEN '{pso_date1}' AND '{pso_date2}';
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
# =============================================================================
# store clusters
# =============================================================================
qc = f"""
select
sf."feature-id",
f.feature,
sf."store-id",
sf."is-active" as "pso-recomm-active",
coalesce(sc."cluster-id", 0) as "cluster-id",
coalesce(c.name, '0') as "cluster-name"
from
"{read_schema}".features f
left join "{read_schema}"."store-features" sf on
f.id = sf."feature-id"
left join "{read_schema}"."store-clusters" sc on
sc."store-id" = sf."store-id"
left join "{read_schema}".clusters c on
c.id = sc."cluster-id"
where
sf."feature-id" = 68
and sf."is-active" = 1;
"""
store_clusters = rs_db.get_df(query=qc)
orders_transfers_all = pd.DataFrame()
for cluster in store_clusters['cluster-id'].unique():
logger.info("")
logger.info("cluster {}".format(cluster))
temp = store_clusters[store_clusters['cluster-id'] == cluster]
cluster_stores = tuple(map(int, list(temp['store-id'].unique())))
# =============================================================================
# GET PRs
# =============================================================================
q1 = f"""
select
pso."order-number",
pso.id as "patient-store-order-id",
pso."patient-request-id",
pso."zeno-order-id" ,
pso."patient-id" ,
pso."order-source" ,
pso."order-type" ,
pso."slot-recommendation-status" AS "fulfillment-actions",
pso."status" as "pso-status",
pso."created-at" AS "pso-created-at",
pso."store-id" ,
s."name" as "store-name",
pso."drug-id" ,
pso."drug-name" ,
pso."requested-quantity",
pso."inventory-quantity" as "inventory-at-creation",
pr."required-quantity",
pr."quantity-to-order",
pso."bill-id",
b."created-at" AS "bill-date",
dt."delivered-at"
from
"{read_schema}"."patients-store-orders" pso
left join "{read_schema}"."patient-requests" pr
on pso."order-number" = pr."patient-request-number"
and pso."patient-request-id" = pr.id
join "{read_schema}"."stores" s on s."id" = pso."store-id"
left join "{read_schema}"."bills-1" b on b."id" = pso."bill-id"
left join "{read_schema}"."delivery-tracking" dt
on dt."patient-store-order-id" = pso."id"
where
DATE(pso."created-at") >= '{pso_date1}'
and DATE(pso."created-at") <= '{pso_date2}'
and pso."store-id" in {cluster_stores}
and b."created-at" is not null;
"""
q2 = f"""
select
distinct
pso."order-number",
pso."slot-date",
psr."recommended-slot-date",
pso."slot-id",
psr."recommended-slot-id",
ss_1."end-time" as "selected-end-time",
ss_2."end-time" as "recommended-end-time",
(case
when pso."slot-date" = psr."recommended-slot-date"
and pso."slot-id" = psr."recommended-slot-id" then 'recommended_slot'
else
'not_recommended_slot'
end) "selected-slot"
from
"{read_schema}"."patients-store-orders" pso
left join "{read_schema}"."pso-slot-recommendation" psr on
pso."order-number" = psr."order-number"
left join "{read_schema}"."store-slots" ss_1 on
pso."slot-id" = ss_1.id
left join "{read_schema}"."store-slots" ss_2 on
pso."slot-id" = ss_2.id
where
DATE(pso."created-at") >= '{pso_date1}'
and DATE(pso."created-at") <= '{pso_date2}';"""
orders = rs_db.get_df(query=q1)
recommendation = rs_db.get_df(query=q2)
orders['required-quantity'].fillna(0, inplace=True)
orders['quantity-to-order'].fillna(0, inplace=True)
orders['delivered-at'] = pd.to_datetime(orders['delivered-at'], errors='coerce')
orders['delivered-at'] = np.where(orders['delivered-at'].isnull(), orders['bill-date'], orders['delivered-at'])
def string_to_time(x):
try:
return dt.strptime(x, "%I:%M %p").time()
except:
try:
return dt.strptime(x, "%I:%M%p").time()
except:
return "Can't convert"
recommendation['selected-end-time'] = recommendation['selected-end-time'].apply(
lambda x: string_to_time(x))
recommendation['recommended-end-time'] = recommendation['recommended-end-time'].apply(
lambda x: string_to_time(x))
recommendation['recommended-slot-date'] = recommendation['recommended-slot-date'].fillna(dt(2100, 1, 1))
recommendation['slot-date'] = pd.to_datetime(recommendation['slot-date']).dt.date
recommendation['recommended-slot-date'] = pd.to_datetime(recommendation['recommended-slot-date']).dt.date
def early_later(x):
if x['slot-date'] < x['recommended-slot-date']:
return 'early_slot'
elif (x['slot-date'] == x['recommended-slot-date']) & (x['selected-end-time'] < x['recommended-end-time']):
return 'early_slot'
elif x['selected-slot'] == 'recommended_slot':
return 'recommended-slot'
else:
return 'later_slot'
recommendation['early-later-slot'] = recommendation.apply(lambda x: early_later(x), 1)
orders = pd.merge(orders, recommendation, on='order-number', how='left')
orders['availability-tag'] = np.where(
orders['quantity-to-order'] > 0, "pr-short", np.nan)
orders['availability-tag'] = orders.fillna('').sort_values(
['order-number',
'availability-tag']).groupby('order-number')['availability-tag'].transform('last')
orders['availability-tag'] = np.where(
orders['availability-tag'] != 'pr_short',
"pr_not_short", orders['availability-tag'])
# =============================================================================
# Get transfers
# =============================================================================
trnfrs = tuple(map(int, list(orders['patient-store-order-id'].unique())))
q2 = f"""
select
pso."order-number",
pstm."patient-store-order-id",
pstm."from-store-id",
pstm."to-store-id",
pstm."item-quantity" as "to-be-transferred-qty",
sti."quantity" as "actual-transferred-qty",
st."total-items",
pstm."slot-date",
pstm."status" as "tn-status",
st."status" as "transfer-status",
st."initiated-at",
st."transferred-at",
st."received-at",
DATEDIFF(minute, st."transferred-at", st."received-at") as "transfer-minutes",
zo."created-at" as "zeno-created-at"
from
"{read_schema}"."pso-stock-transfer-mapping" pstm
left join "{read_schema}"."patients-store-orders" pso on
pso.id = pstm."patient-store-order-id"
left join "{read_schema}"."stock-transfers-1" st
on
st.id = pstm."stock-transfer-id"
left join "{read_schema}"."stock-transfer-items-1" sti
on
sti.id = pstm."stock-transfer-item-id"
left join "{read_schema}"."zeno-order" zo
on
zo.id = pso."zeno-order-id"
where
pstm."patient-store-order-id" in {trnfrs}
"""
transfers = rs_db.get_df(query=q2)
transfers['received-at'] = pd.to_datetime(transfers['received-at'],
format='%Y-%m-%d %H:%M:%S',
errors='coerce')
transfers_summ = transfers.groupby(['order-number',
'patient-store-order-id']).agg(
{'initiated-at': [np.max],
'transferred-at': [np.max],
'received-at': [np.max],
'zeno-created-at': [np.max],
'to-be-transferred-qty': [np.sum],
'actual-transferred-qty': [np.sum]}).reset_index()
transfers_summ.columns = ["_".join(x) for x in transfers_summ.columns.ravel()]
transfers_summ.rename(columns={'initiated-at_amax': 'initiated-at',
'transferred-at_amax': 'transferred-at',
'received-at_amax': 'received-at',
'to-be-transferred-qty_sum': 'to-be-transferred-qty',
'actual-transferred-qty_sum': 'actual-transferred-qty',
'transfer-status_': 'transfer-status',
'order-number_': 'order-number',
'patient-store-order-id_': 'patient-store-order-id',
'zeno-created-at_amax': 'zeno-created-at'},
inplace=True)
if cluster == 0:
transfers_summ['order-number'] = np.nan
transfers_summ['patient-store-order-id'] = np.nan
# transfers_summ = transfers_summ.drop(columns=['index_'])
orders_transfers = pd.merge(left=orders, right=transfers_summ,
how='left', on=['order-number',
'patient-store-order-id'])
orders_transfers['to-be-transferred-qty'].fillna(0, inplace=True)
orders_transfers['actual-transferred-qty'].fillna(0, inplace=True)
orders_transfers['zeno-created-at'] = pd.to_datetime(orders_transfers['zeno-created-at'])
orders_transfers['initiated-at'] = pd.to_datetime(orders_transfers['initiated-at'])
orders_transfers['transferred-at'] = pd.to_datetime(orders_transfers['transferred-at'])
# lead to pso creation
orders_transfers['lead-to-pso-creation-hours'] = \
((orders_transfers['pso-created-at'] - orders_transfers['zeno-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to transfer inititate
orders_transfers['pso-to-transfer-initiate-hours'] = \
((orders_transfers['initiated-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to transfer transferred
orders_transfers['pso-to-transfer-transfer-hours'] = \
((orders_transfers['transferred-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to transfer received
orders_transfers['pso-to-transfer-received-hours'] = \
((orders_transfers['received-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
# PSO to bill
orders_transfers['pso-to-bill-hours'] = \
((orders_transfers['bill-date'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
orders_transfers['pso-to-bill-hours'] = np.where(
orders_transfers['pso-to-bill-hours'] < 0, 0, orders_transfers['pso-to-bill-hours'])
# PSO to delivered
orders_transfers['pso-to-delivered-hours'] = \
((orders_transfers['delivered-at'] - orders_transfers['pso-created-at'])
/ np.timedelta64(1, 'h'))
orders_transfers['cluster-id'] = cluster
# =============================================================================
# Cluster Name
# =============================================================================
qc1 = f"""
select
c.id AS "cluster-id" ,
c.name AS "cluster-name"
from
"{read_schema}".clusters c
"""
cluster_info = rs_db.get_df(query=qc1)
orders_transfers = pd.merge(orders_transfers, cluster_info, on='cluster-id', how='left')
# =============================================================================
# OTIF calculation
# =============================================================================
bills = tuple(map(int,
list(orders_transfers[orders_transfers['bill-id'].notna()]['bill-id'].unique())))
qc = f"""
select
bi."bill-id",
count(distinct i."drug-id") as "drug-billed-cnt",
sum(bi.quantity) as "quantity-billed-sum"
from
"{read_schema}"."bills-1" b
join "{read_schema}"."bill-items-1" bi on
b.id = bi."bill-id"
join "{read_schema}"."inventory-1" i on
i.id = bi."inventory-id"
where
bi."bill-id" in {bills}
group by
bi."bill-id"
"""
billed = rs_db.get_df(query=qc)
orders_transfers1 = pd.merge(left=orders_transfers,
right=billed,
how='left', on=['bill-id'])
orders_transfers_d_infull1 = orders_transfers.groupby(
['order-number'])['drug-id'].nunique().reset_index().rename(
columns={'drug-id': 'drug-ordered-cnt'})
orders_transfers_q_infull1 = orders_transfers.groupby(
['order-number']).agg(
{'requested-quantity': [np.sum]}).reset_index().rename(
columns={'requested-quantity': 'requested-quantity-ordered-sum'})
orders_transfers_q_infull1.columns = ["_".join(x) for x in orders_transfers_q_infull1.columns.ravel()]
orders_transfers_q_infull1.rename(
columns={'requested-quantity-ordered-sum_sum': 'requested-quantity-ordered-sum',
'order-number_': 'order-number'},
inplace=True)
orders_transfers_infull1 = pd.merge(orders_transfers_d_infull1, orders_transfers_q_infull1,
on='order-number', how='inner')
orders_transfers2 = pd.merge(left=orders_transfers1,
right=orders_transfers_infull1,
how='left', on=['order-number'])
orders_transfers2['in-full-flag'] = np.where(
orders_transfers2['drug-billed-cnt'] >= orders_transfers2['drug-ordered-cnt'],
"in_full", "not_in_full")
orders_transfers2['qty-in-full-flag'] = np.where(
orders_transfers2['quantity-billed-sum'] >= orders_transfers2['requested-quantity-ordered-sum'],
"qty_in_full", "qty_not_in_full")
orders_transfers2['drug-billed-cnt'].fillna(0, inplace=True)
orders_transfers2['quantity-billed-sum'].fillna(0, inplace=True)
orders_transfers2['drug-ordered-cnt'].fillna(0, inplace=True)
orders_transfers2['requested-quantity-ordered-sum'].fillna(0, inplace=True)
# del orders_transfers2['drug_ordered_cnt']
# del orders_transfers2['drug_billed_cnt']
# del orders_transfers2['quantity_billed_sum']
# del orders_transfers2['requested_quantity_ordered_sum']
orders_transfers2['slot-date-time'] = orders_transfers2.apply(lambda x:
dt.combine(x['slot-date'],
x['selected-end-time']), 1)
breakpoint()
# Different definition for PSO recommendation
orders_transfers2['otif-flag'] = np.where(
((orders_transfers2['in-full-flag'] == 'in-full') &
(orders_transfers2['delivered-at'] <= orders_transfers2['slot-date-time'])),
"otif", "not_otif")
orders_transfers2['qty-otif-flag'] = np.where(
((orders_transfers2['qty-in-full-flag'] == 'qty-in-full') &
(orders_transfers2['delivered-at'] <= orders_transfers2['slot-date-time'])),
"qty_otif", "qty_not_otif")
del orders_transfers2['slot-date-time']
logger.info("")
logger.info(
"length is same {}".format(len(orders) == len(orders_transfers2)))
logger.info("")
orders_transfers_all = orders_transfers_all.append(orders_transfers2)
pso_recommendation = orders_transfers_all
actions = pso_recommendation.groupby('order-number', as_index=False).agg({'fulfillment-actions': 'unique'})
def order_action_fulfillment(x):
if 'LP' in x:
return 'LP'
elif 'S2S' in x:
return 'S2S'
elif 'DC' in x:
return 'DC'
elif 'SF' in x:
return 'SF'
else:
return None
actions['final-fulfillment'] = actions['fulfillment-actions'].apply(lambda x: order_action_fulfillment(x))
actions = actions.drop(columns=['fulfillment-actions'])
pso_recommendation = pd.merge(pso_recommendation, actions, on='order-number', how='left')
# data type correction
pso_recommendation['recommended-slot-id'] = pso_recommendation['recommended-slot-id'].fillna(0)
pso_recommendation['recommended-slot-id'] = pso_recommendation['recommended-slot-id'].astype(int)
# etl
pso_recommendation['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
pso_recommendation['created-by'] = 'etl-automation'
pso_recommendation['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
pso_recommendation['updated-by'] = 'etl-automation'
# Write to csv
s3.save_df_to_s3(df=pso_recommendation[table_info['column_name']], file_name='Shubham_G/PSO_Recomm/Pso_Recomm.csv')
s3.write_df_to_db(df=pso_recommendation[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
logger.info('PSO Recommendation Table Uploaded Successfully')
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/pso-recommendation/pso-recommendation.py | pso-recommendation.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
from dateutil.tz import gettz
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--runtime_date_exp', default="0101-01-01", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
runtime_date_exp = args.runtime_date_exp
email_to = args.email_to
# env = 'stage'
# limit = 10
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
def get_drugs():
# Drugs
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
drugs_q = """
SELECT
`id` AS drug_id,
`composition`,
`drug-name`,
`type` AS drug_type,
`category` AS drug_category,
`repeatability-index`,
`is-repeatable`
FROM
`drugs`
"""
drugs_q = drugs_q.replace('`', '"')
logger.info(drugs_q)
drugs = rs_db.get_df(query=drugs_q)
drugs.columns = [c.replace('-', '_') for c in drugs.columns]
logger.info(len(drugs))
logger.info("Data for drugs fetched")
return drugs
def get_drugs_metadata():
#########################################
# Drug interval
#########################################
# Read drug interval per strip
# Currently purchase-interval in drug-std-info is not really a per strip interval
# (since it's at drug-id level, not drug-unit level)
# But because it's a median, so taking it divided by std-qty as best substitute for per strip interval
# But later on it must be replaced with correct column/logic for interval_per_strip
# Todo - evaluate interval per strip condition later
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
drugs_metadata_q = """
SELECT
"drug-id",
"purchase-interval"/"std-qty" as interval_per_strip
FROM
"drug-std-info"
"""
drugs_metadata_q = drugs_metadata_q.replace('`', '"')
logger.info(drugs_metadata_q)
drugs_metadata = rs_db.get_df(query=drugs_metadata_q)
drugs_metadata.columns = [c.replace('-', '_') for c in drugs_metadata.columns]
logger.info("Interval per drug strip, data fetched with "
"length {}".format(len(drugs_metadata)))
drugs_metadata['interval_per_strip'] = drugs_metadata['interval_per_strip'].round(2)
logger.info("Mean value of interval per strip, before imputation {}".format(
drugs_metadata['interval_per_strip'].mean()))
# If for any drug, interval is less than 7 days, then make it 7 days
drugs_metadata['interval_per_strip'] = np.where(drugs_metadata['interval_per_strip'] < 7, 7,
drugs_metadata['interval_per_strip'])
# If for any drug, interval is more than 180 days, then make it 180 days
drugs_metadata['interval_per_strip'] = np.where(drugs_metadata['interval_per_strip'] > 180, 180,
drugs_metadata['interval_per_strip'])
logger.info("Mean value of interval per strip, after boundary imputation {}".format(
drugs_metadata['interval_per_strip'].mean()))
return drugs_metadata
def batch(store_id, drugs, drugs_metadata):
# Run date
if runtime_date_exp != '0101-01-01':
runtime_date = runtime_date_exp
else:
runtime_date = datetime.today().strftime('%Y-%m-%d')
# runtime_date = '2021-09-01'
logger.info("Running for {}".format(runtime_date))
# Period end date
# Paramatrize it
period_end_d = (datetime.strptime(runtime_date, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')
# period_end_d = period_end_d_ts.strftime('%Y-%m-%d')
# Data to be fetched
#########################################################
# Bill data
########################################################
read_schema = 'prod2-generico'
rs_db.execute(f"set search_path to '{read_schema}'", params=None)
sales_q = """
SELECT
s."patient-id",
s."store-id",
s."year-created-at" as year_bill,
s."month-created-at" as month_bill,
s."created-date" as bill_date,
s."bill-id",
s."drug-id",
NVL(pdi."mean-interval", 0) as mean_interval_hist,
SUM(s."quantity") as "quantity"
FROM
"sales" s
left join
(
select "patient-id",
"drug-id",
"mean-interval"
from "patient-drug-interval"
where "cov" <= 0.5
) pdi on
pdi."patient-id" = s."patient-id"
and pdi."drug-id" = s."drug-id"
WHERE "store-id" = {0}
AND s."created-date" <= '{1}'
AND s."bill-flag" = 'gross'
group by
s."patient-id",
s."store-id",
s."year-created-at",
s."month-created-at",
s."created-date",
s."bill-id",
s."drug-id",
NVL(pdi."mean-interval", 0)
""".format(store_id, period_end_d)
# AND "store-id" = 2
logger.info(sales_q)
data_s = rs_db.get_df(query=sales_q)
data_s.columns = [c.replace('-', '_') for c in data_s.columns]
logger.info("Data length is : {}".format(len(data_s)))
data_s['bill_date'] = pd.to_datetime(data_s['bill_date'])
# Merge main data, with drugs metadata
data_raw = data_s.merge(drugs, how='left', on='drug_id')
data_raw['is_repeatable'] = data_raw['is_repeatable'].fillna(0)
data_raw['is_generic'] = np.where(data_raw['drug_type'] == 'generic', 1, 0)
logger.info("Raw data length - {}".format(len(data_raw)))
data = data_raw
#######################################
# Estimated due date
#######################################
# # Grp on unique columns
# data = data_raw.groupby(['patient_id', 'store_id', 'year_bill', 'month_bill',
# 'bill_date', 'bill_id', 'composition',
# 'drug_id', 'drug_name', 'drug_type',
# 'drug_category', 'repeatability_index',
# 'is_repeatable', 'is_generic'])[['quantity']].sum().reset_index()
logger.info("Data length after grouping at unique level - {}".format(len(data)))
# Impute mean at composition level
drugs_metadata_merge = drugs[['drug_id', 'composition']].merge(drugs_metadata,
how='inner', on=['drug_id'])
drugs_metadata_merge['interval_per_strip'] = drugs_metadata_merge['interval_per_strip'].fillna(
drugs_metadata_merge.groupby('composition')['interval_per_strip'].transform('mean')).astype(int)
logger.info("After imputation - Interval per drug strip, data fetched with "
"length {}".format(len(drugs_metadata_merge)))
logger.info("Mean value of interval per strip, after composotion imputation {}".format(
drugs_metadata_merge['interval_per_strip'].mean()))
# Merge with refill data
data = data.merge(drugs_metadata_merge[['drug_id', 'interval_per_strip']],
how='left', on=['drug_id'])
# Don't impute for now, but if were to impute for those not having interval
# Useful when customer buys new drug label, which is not popular
# Should it be imputed by avg of other drugs in same comp?
# Imputed
# data['interval_per_strip'] = data['interval_per_strip'].fillna(15)
data['expected_next_interval_drug'] = data['quantity'] * data['interval_per_strip']
data['expected_next_interval_drug'] = data['expected_next_interval_drug'].round(2)
logger.info("Mean value of expected interval, at drug level impute {}".format(
data['expected_next_interval_drug'].mean()))
logger.info("Data length after merging refill data - {}".format(len(data)))
###################################
# Patient-drug-interval
###################################
# Impute for patient drug id's where it's already consistent
# If for any drug, interval is less than 7 days, then make it 7 days
data['mean_interval_hist'] = np.where(((data['mean_interval_hist'] > 0) & (data['mean_interval_hist'] < 7)), 7,
data['mean_interval_hist'])
# If for any drug, interval is more than 180 days, then make it 180 days
data['mean_interval_hist'] = np.where(data['mean_interval_hist'] > 180, 180,
data['mean_interval_hist'])
data['mean_interval_hist'] = data['mean_interval_hist'].round(2)
logger.info("Mean value of interval, of patient drug interval after boundary imputation {}".format(
data['mean_interval_hist'].mean()))
# Number of cases where it will be imputed
pd_impute_length = len(data[data['mean_interval_hist'] > 0])
logger.info("Impute to be done for length {}".format(pd_impute_length))
# Finally impute
data['expected_next_interval'] = np.where(data['mean_interval_hist'] > 0,
data['mean_interval_hist'],
data['expected_next_interval_drug'])
logger.info("Mean value of interval, after patient drug level imputation {}".format(
data['expected_next_interval'].mean()))
# Remove any nulls
# Todo - evaluate null interval exclusion condition later
data = data[data['expected_next_interval'] > 0]
data['expected_next_interval'] = data['expected_next_interval'].astype(int)
data = data[data['expected_next_interval'] > 0]
logger.info("Data length after removing any nulls - {}".format(len(data)))
# If for any drug, interval is more than 180 days, then make it 180 days
data['expected_next_interval'] = np.where(data['expected_next_interval'] > 180, 180,
data['expected_next_interval'])
data['refill_date'] = data['bill_date'] + pd.to_timedelta(data['expected_next_interval'], unit='D')
data['refill_date'] = pd.to_datetime(data['refill_date'].dt.date)
data['year_refill'] = data['refill_date'].dt.year
data['month_refill'] = data['refill_date'].dt.month
# Refill relevancy flag - for ops oracle
# Todo write custom logic for refill relevancy for front-end, check confluence documentation
data['refill_relevancy_flag'] = 1
# DB upload columns
final_cols = ['patient_id', 'store_id', 'year_bill', 'month_bill',
'bill_date', 'bill_id', 'composition',
'drug_id', 'drug_name', 'drug_type', 'drug_category',
'repeatability_index', 'is_repeatable', 'is_generic',
'quantity', 'interval_per_strip', 'expected_next_interval_drug',
'mean_interval_hist', 'expected_next_interval',
'refill_date', 'year_refill', 'month_refill',
'refill_relevancy_flag']
data_export = data[final_cols]
# For redshift specific
# Convert int columns to int
for i in ['bill_id', 'drug_id', 'repeatability_index', 'is_repeatable', 'quantity']:
data_export[i] = data_export[i].fillna(0).astype(int)
logger.info(data_export.columns)
################################
# DB WRITE
###############################
write_schema = 'prod2-generico'
write_table_name = 'retention-refill'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name, schema=write_schema)
# table_info_clean = table_info[~table_info['column_name'].isin(['id', 'created-at', 'updated-at'])]
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# Mandatory lines
data_export['created-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['created-by'] = 'etl-automation'
data_export['updated-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['updated-by'] = 'etl-automation'
# Truncate and append
rs_db_write.execute(f"set search_path to '{write_schema}'", params=None)
truncate_q = """
DELETE FROM
"{0}"
WHERE "store-id" = {1}
"""
rs_db_write.execute(truncate_q.format(write_table_name, store_id))
# Write to DB
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
def get_store_ids():
query = """
select
"store-id"
from
"prod2-generico"."bills-1"
group by
"store-id"
"""
store_list = rs_db.get_df(query=query)['store-id'].drop_duplicates().to_list()
return store_list
def main():
store_list = get_store_ids()
drugs = get_drugs()
drugs_metadata = get_drugs_metadata()
for store_id in store_list:
# if store_id not in (2, 4):
# continue
logger.info("running for store id: {}".format(store_id))
batch(store_id, drugs, drugs_metadata)
if __name__ == '__main__':
try:
main()
except Exception as err:
logger.exception(err)
finally:
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/retention-refill/retention-refill.py | retention-refill.py |
# !/usr/bin/env python
# coding: utf-8
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MongoDB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from datetime import datetime
from datetime import timedelta
import dateutil
from dateutil.tz import gettz
import pandas as pd
import numpy as np
# Import custom functions
from zeno_etl_libs.utils.consumer.crm_campaigns import CrmCampaigns
from zeno_etl_libs.utils.general_funcs import hms_to_seconds
parser = argparse.ArgumentParser(description="This is ETL custom script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-rd', '--runtime_date_exp', default="0101-01-01", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
runtime_date_exp = args.runtime_date_exp
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
# Instantiate the CRM campaigns class
# This imports connections also
cc = CrmCampaigns()
# Write connection instantiated, because
# to check data upload sync, same connection needed
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
# MongoDB Client
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
s3 = S3()
# Run date
if runtime_date_exp != '0101-01-01':
run_date = runtime_date_exp
else:
run_date = datetime.today().strftime('%Y-%m-%d')
# run_date = '2021-09-01'
logger.info("Running for {}".format(run_date))
run_date_minus1 = (pd.to_datetime(run_date) - timedelta(days=1)).strftime('%Y-%m-%d')
logger.info(f"Run date is {run_date} and previous date is {run_date_minus1}")
run_date_minus1_ts = dateutil.parser.parse(f"{run_date_minus1} 00:00:00")
run_date_ts = dateutil.parser.parse(f"{run_date} 23:59:59")
logger.info(f"Look up period is {run_date_minus1_ts} to {run_date_ts}")
#########################################################
# Read from Mongo-DB
########################################################
# Pick only for period mentioned
# date_time is the timestamp column
# But since it's string, so have to convert to date, before processing
# Read Generico crm table
db = mg_client['generico-crm']
# Query
collection = db['callLogs'].find({"type": "STORE_CALL_CONNECT",
"$expr": {
"$and": [
{
"$gte": [{"$dateFromString": {"dateString": "$date_time"}},
run_date_minus1_ts]
},
{
"$lte": [{"$dateFromString": {"dateString": "$date_time"}},
run_date_ts]
}
]
}
})
# Get into pandas data-frame
data_raw = pd.DataFrame(list(collection))
logger.info("Data fetched is with length {}".format(len(data_raw)))
# List data columns
logger.info("Column names in data are {}".format(data_raw.columns))
####################################################
# Filters on rows or columns
###################################################
"""
# Doing it in later blocks, so ignore for now
# Exclude patient id 0
# data = data_raw[data_raw['patient_id'] > 0]
"""
data = data_raw.copy()
unique_cols = ['call_type', 'status', 'type', 'is_active', 'to_number', 'from_number',
'store_id', 'duration', 'date_time']
# Find unique entries
data_unique = data[unique_cols].drop_duplicates()
logger.info("Unique data length is - {}".format(len(data_unique)))
# Convert to object
data_unique['is_active'] = data_unique['is_active'].astype(str)
# Convert to time-stamp
data_unique['date_time'] = pd.to_datetime(data_unique['date_time'])
############################################
# Upload to DSS unique table
############################################
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
last_dss_q = """
SELECT
"call-type",
"status",
"type",
"is-active",
"to-number",
"from-number",
"store-id",
"duration",
"date-time"
FROM
"store-call-logs-entries"
"""
logger.info(last_dss_q)
last_data_dss = rs_db_write.get_df(query=last_dss_q)
last_data_dss.columns = [c.replace('-', '_') for c in last_data_dss.columns]
logger.info("Last data in DSS length {}".format(len(last_data_dss)))
# Convert to date-time
last_data_dss['date_time'] = pd.to_datetime(last_data_dss['date_time'])
# Join and check which to insert and which to update
# Data match with mySQL
data_export_dss = data_unique.merge(
last_data_dss, how='outer', on=unique_cols, indicator=True)
# To upload
data_insert_dss = data_export_dss[data_export_dss['_merge'] == 'left_only']
data_insert_dss.drop(['_merge'], axis=1, inplace=True)
logger.info("Length in left dataset after outer join {}".format(len(data_insert_dss)))
# Upload to DSS
################################
# DB WRITE
###############################
write_schema = 'prod2-generico'
write_table_name = 'store-call-logs-entries'
table_info = helper.get_table_info(db=rs_db_write, table_name=write_table_name,
schema=write_schema)
data_export = data_insert_dss.copy()
data_export.columns = [c.replace('_', '-') for c in data_export.columns]
# Mandatory lines
data_export['created-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['created-by'] = 'etl-automation'
data_export['updated-at'] = datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
data_export['updated-by'] = 'etl-automation'
logger.info("Insert DSS started")
# Write to DB
s3.write_df_to_db(df=data_export[table_info['column_name']], table_name=write_table_name,
db=rs_db_write, schema=write_schema)
logger.info("Uploading successful with length: {}".format(len(data_export)))
logger.info("Insert DSS Done")
###############################################################
# Processing for calling-dashboard
##############################################################
# Remove dummy stores
# Now fetch from unique table, and process for calling dashboard
read_schema = 'prod2-generico'
rs_db_write.execute(f"set search_path to '{read_schema}'", params=None)
dss_q = """
SELECT
"call-type",
"status",
"type",
"is-active",
"to-number",
"from-number",
"store-id",
"duration",
"date-time"
FROM
"store-call-logs-entries"
WHERE
"store-id" not in (52, 60, 92, 111, 149)
and "date-time" >= '{}'
""".format(run_date_minus1)
logger.info(dss_q)
data = rs_db_write.get_df(query=dss_q)
data.columns = [c.replace('-', '_') for c in data.columns]
# Convert to time-stamp
data['date_time'] = pd.to_datetime(data['date_time'])
logger.info("Data last 2-days length is - {}".format(len(data)))
# First make customer number final
# If Missed or incoming, then use from_number,
# If outgoing then used to_number
data['patient_number'] = np.where(data['call_type'].isin(['MISSED', 'INCOMING']), data['from_number'],
np.where(data['call_type'] == 'OUTGOING', data['to_number'], np.nan))
# Now see how many of them actually have patient id's mapped
# Remove those who don't
phones = tuple(data['patient_number'].dropna().drop_duplicates().to_list())
logger.info("Phones to be searched in patients table - length is {}".format(len(phones)))
##############################
# MySQL patients table
##############################
""" When number of phones > 200, in query will slows down the query performance,
try using "inner join" with the "store-call-logs-entries" table to apply the filter """
# Todo Patients info needed from real-time table, and not from replication,
# challenge to be addressed later
patients_info_q = """
SELECT
phone as patient_number,
id as patient_id
FROM
patients
WHERE
phone in {}
""".format(phones)
data_p = pd.read_sql_query(patients_info_q, cc.ms_connection_read.connection)
data_p.columns = [c.replace('-', '_') for c in data_p.columns]
logger.info("Patient id found for phones - length {}".format(len(data_p)))
# Merge
data = data.merge(data_p, how='inner', on=['patient_number'])
logger.info("Length for calling data for patient id present - {}".format(len(data)))
data = data.sort_values(by=['date_time'])
# Now filters come
# Divide into 3-3hrs windows. And then put year, month, day, window index
data['year_call'] = data['date_time'].dt.year
data['month_call'] = data['date_time'].dt.month
data['day_call'] = data['date_time'].dt.day
data['hour_call'] = data['date_time'].dt.hour
# For now divide into 1-day bucket, and check that same day should not be duplicates
# Now find out cases where missed call, but within 30minutes, and incoming call from same number.
# Remove those cases
# Sort data by date_time alone
data = data.sort_values(by=['date_time'])
# Change HH:MM:SS to seconds
data['duration_sec'] = data.apply(lambda row: hms_to_seconds(row['duration']), axis=1)
logger.info("Avg value of duration is {}".format(data['duration_sec'].mean()))
# Cases can be
# Missed call, incoming
# Missed call, missed call, incoming
# Missed call, incoming, missed call
# First remove the successive missed calls and keep only latest of them
# For that, shift lag -1 and see if successive call is missed call or not
# GROUP CALL TYPE into MISSED, 'OTHERS'
data['call_type_grp'] = np.where(data['call_type'] != 'MISSED', 'OTHERS',
data['call_type'])
logger.info("Unique call type groups are {}".format(data['call_type_grp'].unique()))
logger.info("Current df head is {}".format(data.head()))
# Assign 1 to MISSED and 0 for others, and start from first instance of Missed call
data['missed_call_flag'] = np.where(data['call_type_grp'] == 'MISSED', 1, 0)
# Sort data by date_time
data = data.sort_values(by=['patient_id', 'date_time'])
logger.info("Data length before applying missed call filter is {}".format(len(data)))
# MISSED CALL CUM-FLAG
data['missed_call_cumsum'] = data.groupby(['patient_id'])['missed_call_flag'].cumsum()
logger.info("Current df head is {}".format(data.head()))
# FILTER FOR >=1 MISSED CALL CUMSUM
data = data[data['missed_call_cumsum'] >= 1].copy()
logger.info("Data length AFTER applying missed call filter is {}".format(len(data)))
logger.info("Current df head is {}".format(data.head()))
# Grouping call durations across calls
data = data.groupby(['year_call', 'month_call', 'day_call', 'store_id',
'patient_id', 'call_type_grp'])['duration_sec'].sum().reset_index()
data['same_day_prev_call_type'] = data.groupby(['year_call', 'month_call', 'day_call',
'patient_id'])['call_type_grp'].shift(1)
data['same_day_next_call_type'] = data.groupby(['year_call', 'month_call', 'day_call',
'patient_id'])['call_type_grp'].shift(-1)
data['same_day_next_call_duration_sec'] = data.groupby(['year_call', 'month_call', 'day_call',
'patient_id'])['duration_sec'].shift(-1)
# Only Missed
data_f = data[((data['call_type_grp'] == 'MISSED') & (data['same_day_next_call_type'].isnull()))
| ((data['call_type_grp'] != 'MISSED') & (data['same_day_prev_call_type'] == 'MISSED') &
(data['duration_sec'] < 30))].copy()
logger.info("length of missed call data is {}".format(len(data_f)))
logger.info("Current df head is {}".format(data_f.head()))
# Unique
data_f_unique = data_f.drop_duplicates(subset=['year_call',
'month_call',
'day_call',
'patient_id'])
logger.info("Unique (per day) length {}".format(len(data_f_unique)))
################################################
# Insert in mySQL - mandatory steps
################################################
# Remove Last 7 days billed already
data_f_unique = cc.no_bill_in_last_n_days(data_f_unique, run_date, last_n_days_param=7)
# Should not have been called in last 7-days thru calling dashboard
# Can be paramatrized, or changed later to 2days
data_f_unique = cc.no_call_in_last_n_days(data_f_unique, run_date, last_n_days_param=7)
# Read DND list
data_f_unique = cc.remove_dnd(data_f_unique)
#################################################
# MySQL insert
# Upload to mySQL DB
#################################################
# Output to calling-dashboard, ensure unique
data_c = data_f_unique[['store_id', 'patient_id']].copy()
data_c['campaign_id'] = 19
data_c['callback_reason'] = 'For callback'
data_c['list_date'] = run_date
data_c['call_date'] = data_c['list_date']
# Remove any duplicates
data_c = data_c.drop_duplicates(subset='patient_id')
logger.info("Unique list to be considered for calling dashboard - length {}".format(len(data_c)))
logger.info("mySQL - Insert starting")
data_c.columns = [c.replace('_', '-') for c in data_c.columns]
data_c.to_sql(name='calling-dashboard', con=cc.ms_connection_write.engine, if_exists='append',
index=False, method='multi', chunksize=500)
logger.info("mySQL - Insert ended")
logger.info("Missed call - Calling list data inserted/uploaded to mySQL for run date {}"
" with length : {}".format(run_date, len(data_c)))
# Closing the DB Connections
rs_db_write.close_connection()
cc.close_connections()
logger.info("File ends") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/crm-missed-call/crm-missed-call.py | crm-missed-call.py |
import os
import sys
sys.path.append('./../../../..')
import pandas as pd
import argparse
from datetime import datetime as dt
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
def main(ms_db, rs_db, s3, year_month_input, test_records_limit):
logger = get_logger()
ms_cursor = ms_db.open_connection()
# year_month_input = "2021-08" # eg. "2022-01"
year_month = year_month_input or dt.now().strftime("%Y-%m")
plain_year_month = year_month.replace("-", "")
year, month = int(year_month.split("-")[0]), int(year_month.split("-")[1])
# default_db = "test-generico" if env == EnvNames.development else "prod2-generico"
default_db = "prod2-generico"
db_name = f"generico_{plain_year_month}" if year_month_input else default_db
ms_cursor.execute(query=f"use `{db_name}`;")
# test_records_limit = 1 # None if running for all records
limit_str = f"LIMIT {test_records_limit} ;" if test_records_limit else ";"
# Data 1
query = """
select
'2020-09-01' date,
'' `return-item-id`,
f.id `invoice-item-id`,
d.`id` `invoice-id`,
c.id `inventory-id`,
'' `return-id`,
e.`id` `drug-id`,
e.`drug-name`,
f.vat gst,
c.`purchase-rate`,
c.`expiry`,
case
d.`received-at`
when '0000-00-00 00:00:00' then '1971-01-01 00:00:00'
else d.`received-at`
end `received-at`,
d.`dispatch-status`,
case
d.`dispatched-at`
when '0000-00-00 00:00:00' then '1971-01-01 00:00:00'
else d.`dispatched-at`
end `invoice-dispatched-at`,
d.`franchisee-invoice-number`,
d.`invoice-number`,
d.`invoice-date`,
h.name `distributor`,
g.id `store-id`,
g.name `store-name`,
e.`type`,
c.`locked-quantity` quantity,
'' `return-reason`,
'' serial,
'' `debit-note-status`,
'1971-01-01 00:00:00' `debit-note-created-at`,
'1971-01-01 00:00:00' `debit-note-dispatched-at`,
'1971-01-01 00:00:00' `discarded-at`,
'' `discard-reason`,
'' `return-status`,
'' `return_created_at`,
'' `returns_created_by`,
d.`created-by` 'invoice_created_by',
c.mrp,
e.pack
from
`inventory` c
join `invoices` d on
c.`invoice-id` = d.id
join `drugs` e on
c.`drug-id` = e.id
join `invoice-items` f on
c.`invoice-item-id` = f.id
join `stores` g on
c.`store-id` = g.id
join `distributors` h on
d.`distributor-id` = h.id
where
c.`locked-quantity` > 0
%s
""" % limit_str
df_1 = pd.read_sql_query(query, ms_db.connection)
logger.info("Data 1, fetched.")
# Data: 2
query = """
select
'2020-09-01' date,
'' `return-item-id`,
f.`invoice-item-reference` `invoice-item-id`,
d.`id` `invoice-id`,
c.id `inventory-id`,
'' `return-id`,
e.`id` `drug-id`,
e.`drug-name`,
f.vat gst,
k.`purchase-rate`,
c.`expiry`,
case
d.`received-at` when '0000-00-00 00:00:00' then '1971-01-01 00:00:00'
else d.`received-at`
end `received-at`,
d.`dispatch-status`,
case
d.`dispatched-at` when '0000-00-00 00:00:00' then '1971-01-01 00:00:00'
else d.`dispatched-at`
end `invoice-dispatched-at`,
d.`franchisee-invoice-number`,
d.`invoice-number`,
d.`invoice-date`,
h.name `distributor`,
g.id `store-id`,
g.name `store-name`,
e.`type`,
c.`locked-quantity` quantity,
'' `return-reason`,
'' serial,
'' `debit-note-status`,
'1971-01-01 00:00:00' `debit-note-created-at`,
'1971-01-01 00:00:00' `debit-note-dispatched-at`,
'1971-01-01 00:00:00' `discarded-at`,
'' `discard-reason`,
'' `return-status`,
'' `return_created_at`,
'' `returns_created_by`,
d.`created-by` 'invoice_created_by',
c.mrp,
e.pack
from
`inventory-1` c
join `invoices` d on
c.`invoice-id` = d.id
join `drugs` e on
c.`drug-id` = e.id
join `invoice-items-1` f on
c.`invoice-item-id` = f.id
join `stores` g on
c.`store-id` = g.id
join `distributors` h on
d.`distributor-id` = h.id
join `inventory` k on
c.id = k.id
where
c.`locked-quantity` > 0
%s
""" % limit_str
df_2 = pd.read_sql_query(query, ms_db.connection)
logger.info("Data 2, fetched.")
# # Data: 3
query = """
select
'2020-09-01 ' date,
a.id `return-item-id`,
f.id `invoice-item-id`,
d.`id` `invoice-id`,
c.id `inventory-id`,
b.id `return-id`,
e.`id` `drug-id`,
e.`drug-name`,
f.vat gst,
c.`purchase-rate`,
c.`expiry`,
case
d.`received-at`
when ' 0000-00-00 00:00:00 ' then ' 1971-01-01 00:00:00 '
else d.`received-at`
end `received-at`,
d.`dispatch-status`,
case
d.`dispatched-at`
when ' 0000-00-00 00:00:00 ' then ' 1971-01-01 00:00:00 '
else d.`dispatched-at`
end `invoice-dispatched-at`,
d.`franchisee-invoice-number`,
d.`invoice-number`,
d.`invoice-date`,
h.name `distributor`,
g.id `store-id`,
g.name `store-name`,
e.`type`,
a.`returned-quantity` quantity,
a.`return-reason`,
i.serial,
i.`status` `debit-note-status`,
case
i.`created-at`
when ' 0000-00-00 00:00:00 ' then ' 1971-01-01 00:00:00 '
else i.`created-at`
end `debit-note-created-at`,
case
i.`dispatched-at`
when ' 0000-00-00 00:00:00 ' then ' 1971-01-01 00:00:00 '
else i.`dispatched-at`
end `debit-note-dispatched-at`,
case
a.`discarded-at`
when ' 0000-00-00 00:00:00 ' then ' 1971-01-01 00:00:00 '
else a.`discarded-at`
end `discarded-at`,
a.`discard-reason`,
a.status `return-status`,
b.`created-at` `return_created_at`,
b.`created-by` `returns_created_by`,
d.`created-by` ' invoice_created_by',
c.mrp,
e.pack
from
`return-items` a
join `returns-to-dc` b on
a.`return-id` = b.id
join `inventory` c on
a.`inventory-id` = c.id
join `invoices` d on
c.`invoice-id` = d.id
join `drugs` e on
c.`drug-id` = e.id
join `invoice-items` f on
c.`invoice-item-id` = f.id
join `stores` g on
b.`store-id` = g.id
join `distributors` h on
d.`distributor-id` = h.id
left join `debit-notes` i on
a.`debit-note-reference` = i.id
where
a.`status` in ('saved', 'approved')
%s
""" % limit_str
df_3 = pd.read_sql_query(query, ms_db.connection)
logger.info("Data 3, fetched.")
# Updating the columns
df_1.columns = [c.replace('-', '_') for c in df_1.columns]
df_2.columns = [c.replace('-', '_') for c in df_2.columns]
df_3.columns = [c.replace('-', '_') for c in df_3.columns]
df = df_1.append([df_2, df_3])
df['year'] = year
df['month'] = month
# # Insert the data
table_name = "wc-inventory"
schema = "prod2-generico"
# # Clean the old data for the same month if any
# query = f"""
# delete
# from
# "%s"."%s"
# where
# year = %s
# and month = %s;
# """ % (schema, table_name, year, month)
#
# rs_db.execute(query=query)
inventory_table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
df['date'] = pd.to_datetime(df['date']).dt.date
file_name = f"{table_name}_{year}{month}.csv"
s3.save_df_to_s3(df=df[list(dict.fromkeys(inventory_table_info['column_name']))], file_name=file_name)
file_s3_uri = f's3://aws-glue-temporary-921939243643-ap-south-1/{file_name}'
s3.write_to_db_from_s3_csv(
db=rs_db,
file_s3_uri=file_s3_uri,
schema=schema,
table_name=table_name
)
logger.info(f"Data uploaded to s3 successfully, at {file_s3_uri}")
# s3.write_df_to_db(
# df=df[list(dict.fromkeys(inventory_table_info['column_name']))],
# table_name=table_name, db=rs_db, schema=schema
# )
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-ym', '--year_month', default=None, type=str, required=False, help="Year Month eg. 2022-01")
parser.add_argument('-l', '--limit', default=None, type=int, required=False, help="test records")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
year_month = args.year_month
limit = args.limit
print(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
ms_db = MySQL()
_s3 = S3()
""" calling the main function """
main(ms_db=ms_db, rs_db=rs_db, s3=_s3, year_month_input=year_month, test_records_limit=limit)
# Closing the DB Connection
rs_db.close_connection()
ms_db.close() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/wc-inventory/wc-inventory-data-to-s3.py | wc-inventory-data-to-s3.py |
# this is include zeno_etl_libs in the python search path on the run time
import argparse
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from datetime import datetime as dt
from zeno_etl_libs.logger import get_logger
def main(rs_db, s3, year_month):
logger = get_logger()
# year_month = "2021-05" # eg. "2022-01"
year_month = year_month or dt.now().strftime("%Y-%m")
# plain_year_month = year_month.replace("-", "")
year, month = int(year_month.split("-")[0]), int(year_month.split("-")[1])
# default_db = "prod2-generico"
# db_name = f"generico_{plain_year_month}" if year_month_input else default_db
table_name = "wc-inventory"
schema = "prod2-generico"
# Clean the old data for the same month if any
query = f"""
delete
from
"%s"."%s"
where
year = %s
and month = %s;
""" % (schema, table_name, year, month)
rs_db.execute(query=query)
logger.info(f"Old data cleaned from table: {table_name}, year-month: {year_month}")
file_name = f"{table_name}_{year}{month}.csv"
file_s3_uri = f's3://aws-glue-temporary-921939243643-ap-south-1/{file_name}'
s3.write_to_db_from_s3_csv(
db=rs_db,
file_s3_uri=file_s3_uri,
schema=schema,
table_name=table_name
)
logger.info("Data pushed to table successfully.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-ym', '--year_month', default=None, type=str, required=False, help="YYYY-MM eg. 2022-01")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
year_month = args.year_month
print(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
_s3 = S3()
""" calling the main function """
main(rs_db=rs_db, s3=_s3, year_month=year_month)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/wc-inventory/wc-inventory-data-to-table.py | wc-inventory-data-to-table.py |
echo "start time: $(date)"
year_month="2021-09"
#echo "snapshot to be restored: prod2-$year_month-06-02-00-generico.sql.gz"
# shellcheck disable=SC2164
# go to vol3 because snapshots are big files
#cd /vol3/database/
# switch to super doer(sudo)
# sudo su
# Download the snapshot from s3 to database folder
snapshot_name=prod2-"$year_month"-01-02-00-generico.sql
s3cmd get s3://prod-generico-storage/mysql/"$snapshot_name".gz
echo "S3 Download done at: $(date)"
# extracting the zip
#zcat "$snapshot_name".gz > "$snapshot_name"
# replacing the prod2-generico DB with yearmonth-generico
# shellcheck disable=SC2039
plain_year_month=$(echo "$year_month" | sed 's:[!@#$%^&*()=-]::g')
echo "$plain_year_month"
#sed -i "s#prod2\-generico#'$plain_year_month'\-generico#" "$snapshot_name"
# split the dump for specific tables
# shellcheck disable=SC2006
echo "Start: table spliting"
sh mysqldumpsplitter.sh --source "$snapshot_name".gz --extract DBTABLE --match_str "prod2-generico.(inventory|invoices|drugs|invoice-items|stores|distributors|store-dc-mapping|inventory-1|invoice-items-1|return-items|returns-to-dc|debit-notes)" --decompression gzip --compression none --output_dir ./out/tables/"$plain_year_month"/
echo "End: table spliting at: $(date)"
db_password="dev-server"
db_name="generico_$plain_year_month"
echo "$db_name", "$db_password"
mysql -u admin -h stag-mysql.cdaq46oaug4x.ap-south-1.rds.amazonaws.com -p"$db_password" -e "drop database IF EXISTS $db_name;"
mysql -u admin -h stag-mysql.cdaq46oaug4x.ap-south-1.rds.amazonaws.com -p"$db_password" -e "create database IF NOT EXISTS $db_name;"
# mysql -u server -p"$db_password" -e "drop database IF EXISTS $db_name;"
# mysql -u server -p"$db_password" -e "create database IF NOT EXISTS $db_name;"
# import all the tables
echo "Start: table import to DB"
for filename in ./out/tables/"$plain_year_month"/*.sql; do
echo "$filename start at: $(date)"
mysql -u admin -h stag-mysql.cdaq46oaug4x.ap-south-1.rds.amazonaws.com -p"$db_password" "$db_name" < "$filename"
# mysql -u server -p"$db_password" "$db_name" < "$filename"
done | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/wc-inventory/restore_db_tables.sh | restore_db_tables.sh |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
# getting inventory snapshot data
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata')).date()
cur_year = datetime.datetime.now(tz=gettz('Asia/Kolkata')).year
prev_date = cur_date - datetime.timedelta(1)
logger.info('Inventory snapshot for date ' + str(cur_date))
stores_query = """
select
distinct id as "store-id"
from
"prod2-generico"."stores"
"""
stores = rs_db.get_df(stores_query)
# expiry_loss_zp
status1 = False
try:
exipry_loss_query = """
select
d.*,
e."name",
s."name" as "cost-centre",
e."gstn",
r."id" as "return-item-id",
r."taxable" as "taxable-value",
r."gst" as "tax-rate",
r."gst-amount" as "tax-value",
r."net" as "return-net-value",
e."id" as "distributor-id",
"drug-id",
"invoice-number",
"invoice-date",
f."type",
e.type as "dist-type",
f."drug-name",
x.id as "invt-id",
"batch-number",
x."expiry"
from
"prod2-generico"."return-items" r
join "prod2-generico"."debit-notes" d on
r."debit-note-reference" = d."id"
join "prod2-generico"."inventory" x on
r."inventory-id" = x."id"
join "prod2-generico"."invoices" i on
i."id" = x."invoice-id"
join "prod2-generico"."distributors" e on
d."dist-id" = e."id"
join "prod2-generico"."stores" s on
d."store-id" = s."id"
join "prod2-generico"."drugs" f on
f."id" = x."drug-id"
where
d."dist-id" != 64
and d."status" in ('accounted', 'settled')
and d."category" = 'product-expired'
and (date(d."settled-at") = date(Dateadd(d,-1,current_date)) or date(d."accounted-at") = date(Dateadd(d,-1,current_date)) )
"""
expiry_loss = rs_db.get_df(exipry_loss_query)
logger.info('fetched expiry_loss data for returns whose debit note is settled/accounted yesterday')
schema = 'prod2-generico'
table_name = 'expiry-loss-accounts'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
truncate_query = '''
delete
from
"prod2-generico"."expiry-loss-accounts"
where
(date("settled-at") = date(Dateadd(d,-1,current_date))
or date("accounted-at") = date(Dateadd(d,-1,current_date)) )
'''
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table deleted for yesterday data to avoid data duplication in case of multiple runs')
s3.write_df_to_db(df=expiry_loss[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table appended')
status1 = True
except:
logger.info('expiry_loss_ load failed')
# WC_inventory_invoices
status2 = False
try:
# Keeping Audit loss data for current year only
audit_loss_query = """
SELECT
b."id" AS "inventory-check-item-id",
b."check-id",
a."type" AS "audit-type",
a."created-at" AS "audit-date",
a."store-id",
s."name" AS "store-name",
b."drug-id",
d."drug-name",
d."type",
d."category",
b."inventory-id",
b."expected",
b."accounted",
b."status",
COALESCE(t."sum-changes", 0) AS "sum-changes",
COALESCE((b."accounted" + t."sum-changes"), 0) AS "final-accounted",
i."ptr" AS "zp-ptr",
i."purchase-rate" AS "wc-ptr",
COALESCE((b."expected" - (b."accounted" + t."sum-changes")),
0) AS "qty-diff",
COALESCE((b."expected" * i."ptr"), 0) AS "expected-value",
COALESCE(((b."accounted" + t."sum-changes") * i."ptr"),
0) AS "accounted-value"
FROM
"prod2-generico"."inventory-check-1" a
JOIN
"prod2-generico"."inventory-check-items-1" b ON a."id" = b."check-id"
LEFT JOIN
(SELECT
y."store-id",
y."inventory-id",
y."inventory-check-item-id",
SUM(y."change") AS "sum-changes"
FROM
"prod2-generico"."inventory-changes-1" y
WHERE
y."change-reason" IN ('audit-reconciliation' , 'found-later', 'multiple-scanned', 'pack-size-error')
GROUP BY y."store-id" , y."inventory-id" , y."inventory-check-item-id") t ON b."id" = t."inventory-check-item-id"
LEFT JOIN
"prod2-generico"."stores" s ON s."id" = a."store-id"
LEFT JOIN
"prod2-generico"."drugs" d ON d."id" = b."drug-id"
LEFT JOIN
"prod2-generico"."inventory-1" i ON i."id" = b."inventory-id"
WHERE
date(a."created-at") = date(Dateadd(d,-1,current_date))
"""
audit_loss = rs_db.get_df(audit_loss_query)
logger.info('fetched audit_loss data for yesterday')
schema = 'prod2-generico'
table_name2 = 'audit-loss-accounts'
table_info2 = helper.get_table_info(db=rs_db_write, table_name=table_name2, schema=schema)
truncate_query = '''
delete
from
"prod2-generico"."audit-loss-accounts"
where
date("audit-date") = date(Dateadd(d,-1,current_date))
'''
rs_db_write.execute(truncate_query)
logger.info(str(table_name2) + ' table deleted for yesterday data to avoid duplicate data in case of multiple runs')
truncate_query = '''
delete
from
"prod2-generico"."audit-loss-accounts"
where
extract (y from "audit-date") != {current_year}
'''.format(current_year= cur_year)
rs_db_write.execute(truncate_query)
logger.info(str(table_name2) + ' table deleted for previous years data')
s3.write_df_to_db(df=audit_loss[table_info2['column_name']], table_name=table_name2, db=rs_db_write,
schema=schema)
logger.info(str(table_name2) + ' table uploaded')
status2 = True
except:
logger.info('audit_loss load failed')
if (status1 & status2) is True:
status = 'Success'
else:
status = 'Failed'
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} & {table_name2} table updated",
mail_body=f"{table_name} & {table_name2} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/expiry-audit-loss/expiry-audit-loss.py | expiry-audit-loss.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
import pandas as pd
import dateutil
import datetime as dt
from dateutil.tz import gettz
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'pmf-conversion-table'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema='prod2-generico')
# max session id
try:
max_session_q = """
select max("session-id") max_session_id
FROM
"prod2-generico"."pmf-conversion-table"
"""
max_session = rs_db.get_df(max_session_q)
max_session.columns = [c.replace('-', '_') for c in max_session.columns]
max_session = max_session.max_session_id[0]
base_q = f"""
select
ad."store-id" ,
ad."unique-id" as "session-id",
ad."patients-store-orders-id" ,
ad."requested-drug-id" ,
ad."requested-drug-name" ,
ad."suggested-drug-id",
ad."suggested-drug-name",
ad."patient-phone" ,ad."patient-id",
d2."type" as suggested_drug_type,
d."type" as requested_drug_type,
prm."bill-id" as bill_id_thru_pso ,sda."drug-id" as ass_drug,
ad."created-by" as "session-created-by",
ad."created-at" as "session-date",
sda."is-active" as assortment_active,
plt."tag" as session_source,
nvl(ga."generic-affinity-score",0)"generic-affinity-score" ,
cl."patient-phone" as called_patient,
max(sgdp."requested-selling-rate")"requested-drug-rate",
max(ad."suggested-drug-rate")"suggested-drug-rate",
max(ad."requested-drug-quantity")"requested-drug-quantity",
max(ad."suggested-drug-quantity")"suggested-drug-quantity",
max(ad."required-drug-quantity")"required-drug-quantity",
max(prm."bill-id") over (partition by ad."unique-id") as max_bill,
max(prm."gross-quantity") as "drug-sold",
max(case when b."net-payable">0 then 1 else 0 end) as "billed-same-day",
max(case when sa."gross-quantity">0 then 1 else 0 end) as "drug-sold-same-day",
max(ad."suggested-drug-inventory-quantity") "suggested-drug-inventory-quantity",
max(ad."requested-drug-inventory-quantity") "requested-drug-inventory-quantity",
max(prm."created-by") over (partition by ad."unique-id") as "max-pso-created-by" from
"prod2-generico"."alternate-drugs" ad
left join "prod2-generico"."patient-requests-metadata" prm
on
prm.id = ad."patients-store-orders-id"
left join "prod2-generico".patients p
on right(REPLACE(REPLACE(p."phone",'.',''),' ',''), 10) =
right(REPLACE(REPLACE(ad."patient-phone",'.',''),' ',''), 10)
left join "prod2-generico"."bills-1" b
on
b."patient-id" = (case
when ad."patient-id" is null then prm."patient-id"
when ad."patient-id" is null and prm."patient-id" is null then p.id
else ad."patient-id"
end)
and date(ad."created-at") = date(b."created-at")
left join "prod2-generico"."sales-agg" sa
on
sa."patient-id" = (case
when ad."patient-id" is null then prm."patient-id"
when ad."patient-id" is null and prm."patient-id" is null then p.id
else ad."patient-id"
end)
and sa."drug-id" = prm."drug-id"
and date(sa."created-date") = date(ad."created-at")
left join "prod2-generico"."store-drug-assortment" sda
on sda."store-id" = ad."store-id" and sda."drug-id" = ad."requested-drug-id"
and sda."is-active" =1
left join "prod2-generico".drugs d
on d.id = ad."requested-drug-id"
left join "prod2-generico".drugs d2
on d2.id = ad."suggested-drug-id"
inner join "prod2-generico"."pmf-login-tag" plt
on ad."created-by" = plt."login-id"
left join "prod2-generico"."generic-affinity" ga
on ga."patient-id"=(case
when ad."patient-id" is null then prm."patient-id"
when ad."patient-id" is null and prm."patient-id" is null then p.id
else ad."patient-id"
end)
left join (
select
"drug-id" ,
max("selling-rate") as "requested-selling-rate"
from
"prod2-generico"."store-group-drug-price" i
group by
1
) sgdp on
ad."requested-drug-id" = sgdp."drug-id"
left join (
select
right(callfrom, 10) as "patient-phone",
min(starttime) as "call-time"
from
"prod2-generico"."prod2-generico".exotelincomingcalllogs e
where
calltype = 'completed'
group by
1,
date(starttime)) cl on
ad."patient-phone" = cl."patient-phone"
and date(ad."created-at") = date(cl."call-time")
where ad."unique-id" > {max_session}
group by
ad."store-id" ,
ad."unique-id" ,
ad."patients-store-orders-id" ,
ad."requested-drug-id" ,
ad."requested-drug-name" ,
prm."drug-id",
prm."drug-name" ,
prm."bill-id",sda."drug-id",ga."generic-affinity-score",d."type",d2."type",
ad."created-by",
ad."suggested-drug-id",
ad."suggested-drug-name",
ad."patient-phone",
ad."created-at",
sda."is-active",
plt."tag",
cl."patient-phone" ,ad."patient-id",prm."created-by"
"""
base = rs_db.get_df(base_q)
base.columns = [c.replace('-', '_') for c in base.columns]
base['assortment_active'].fillna(0, inplace=True)
# assortment flag
conditions = [(
(base.suggested_drug_id.isnull()) &
(base['assortment_active'] == 0)
),
(
(base.suggested_drug_id.isnull()) &
(base['assortment_active'] == 1)
),
(base.suggested_drug_id.notnull())
]
choices = [1, 0, 0]
base['flag_not_assort'] = np.select(conditions, choices)
base2 = base[(base['session_id'] == '1663646353324')]
# unavailability flag
conditions = [
(base['flag_not_assort'] == 1),
(base['required_drug_quantity'] > base[
'suggested_drug_inventory_quantity']),
(base['required_drug_quantity'] <= base[
'suggested_drug_inventory_quantity'])
]
choices = [1, 1, 0]
base['flag_unavailability'] = np.select(conditions, choices)
# sessions where patient had called
base['patient_called_flag'] = np.where(base['called_patient'].isnull(), 0, 1)
# session converted (anyone drug converted then converted)
base['session_conv_flag'] = np.where(base['max_bill'].isnull(), 0, 1)
# session drug converted
base['session_drug_conv_flag'] = np.where(base['bill_id_thru_pso'].isnull(), 0, 1)
# drug level expensive or not
base['drug_expensive_flag'] = np.where((base['suggested_drug_id'] != base['requested_drug_id'])
& (base['suggested_drug_rate'] > base['requested_drug_rate']), 1, 0)
session_expensive_count = base.groupby('session_id')['drug_expensive_flag'].sum().reset_index()
session_expensive_count = session_expensive_count.rename(columns={'drug_expensive_flag': 'expensive_drugs'})
session_not_in_assortment_count = base.groupby('session_id')['flag_not_assort'].sum().reset_index()
session_not_in_assortment_count = session_not_in_assortment_count.rename(
columns={'flag_not_assort': 'not_in_assortment_drugs'})
session_drug_count = base[(base['suggested_drug_id'] != base['requested_drug_id'])].groupby('session_id')[
'suggested_drug_id'].nunique().reset_index()
session_drug_count = session_drug_count.rename(columns={'suggested_drug_id': 'suggested_drug_cnt'})
session_ethical_count = base[base['requested_drug_type'] == 'ethical'].groupby('session_id')[
'requested_drug_id'].nunique().reset_index()
session_ethical_count = session_ethical_count.rename(columns={'requested_drug_id': 'eth_drugs'})
session_generic_count = base[base['requested_drug_type'] == 'generic'].groupby('session_id')[
'requested_drug_id'].nunique().reset_index()
session_generic_count = session_generic_count.rename(columns={'requested_drug_id': 'gen_drugs'})
session_unavailable_count = base.groupby('session_id')['flag_unavailability'].sum().reset_index()
session_unavailable_count = session_unavailable_count.rename(columns={'flag_unavailability': 'unavailable_drugs'})
base = pd.merge(base, session_ethical_count, how='left', on=['session_id'])
base = pd.merge(base, session_generic_count, how='left', on=['session_id'])
base = pd.merge(base, session_expensive_count, how='left', on=['session_id'])
base = pd.merge(base, session_drug_count, how='left', on=['session_id'])
base = pd.merge(base, session_not_in_assortment_count, how='left', on=['session_id'])
base = pd.merge(base, session_unavailable_count, how='left', on=['session_id'])
# base2=base[(base['session_id']== '1663646353324')]
# ethical prefering session
base['ethical_preference_issue'] = np.where(
(base['eth_drugs'] > base['gen_drugs']) & (base['generic_affinity_score'] <= 2), 1, 0)
# drug rate comparison
base['rate_issue'] = np.where((base['expensive_drugs'] > 0), 1, 0)
# assortment issue
base['assortment_issue'] = np.where((base['not_in_assortment_drugs'] > 0), 1, 0)
# availability issue
base['availability_issue'] = np.where((base['unavailable_drugs'] > 0), 1, 0)
# issue
conditions = [
(base['assortment_issue'] == 1),
(base['availability_issue'] == 1),
(base['rate_issue'] == 1),
(base['ethical_preference_issue'] == 1)
]
choices = ['assortment', 'availability', 'rate', 'ethical preference']
base['issue'] = np.select(conditions, choices)
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id', 'drug_sold']] = \
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id',
'drug_sold']].fillna(0)
base[
['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'assortment_active', 'patient_id',
'drug_sold']] = base[
['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'assortment_active', 'patient_id',
'drug_sold']] \
.apply(np.int64)
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id', 'drug_sold']] = \
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id',
'drug_sold']].replace({0: None})
base.columns = [c.replace('_', '-') for c in base.columns]
base['etl-created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# To Avoid Duplication
truncate_query = f"""
DELETE
FROM
"prod2-generico"."pmf-conversion-table"
WHERE
"session-id" > {max_session};
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
s3.write_df_to_db(df=base[table_info['column_name']],
table_name='pmf-conversion-table',
db=rs_db, schema='prod2-generico')
except Exception as e:
logger.exception(e)
finally:
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/pmf-conversion-table/pmf-conversion-table.py | pmf-conversion-table.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
import pandas as pd
import dateutil
import datetime as dt
from dateutil.tz import gettz
import numpy as np
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'pmf-conversion-table'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema='prod2-generico')
table_name1 = 'pmf-conversion-temp'
table_info1 = helper.get_table_info(db=rs_db, table_name=table_name1, schema='prod2-generico')
try:
# update session id
update_session_q = """
select
"session-id"
FROM
"prod2-generico"."pmf-conversion-table" pmf
left join "prod2-generico"."patient-requests-metadata" prm on
pmf."patients-store-orders-id" = prm.id
where
nvl(pmf."bill-id-thru-pso",
0) != nvl(prm."bill-id",
0)
group by
1
"""
update_session = rs_db.get_df(update_session_q)
truncate_query = f"""
DELETE
FROM
"prod2-generico"."pmf-conversion-temp"
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
s3.write_df_to_db(df=update_session[table_info1['column_name']],
table_name='pmf-conversion-temp',
db=rs_db, schema='prod2-generico')
base_q = f"""
select
ad."store-id" ,
ad."unique-id" as "session-id",
ad."patients-store-orders-id" ,
ad."requested-drug-id" ,
ad."requested-drug-name" ,
ad."suggested-drug-id",
ad."suggested-drug-name",
ad."patient-phone" ,ad."patient-id",
d2."type" as suggested_drug_type,
d."type" as requested_drug_type,
prm."bill-id" as bill_id_thru_pso ,sda."drug-id" as ass_drug,
ad."created-by" as "session-created-by",
ad."created-at" as "session-date",
sda."is-active" as assortment_active,
plt."tag" as session_source,
nvl(ga."generic-affinity-score",0)"generic-affinity-score" ,
cl."patient-phone" as called_patient,
max(sgdp."requested-selling-rate")"requested-drug-rate",
max(ad."suggested-drug-rate")"suggested-drug-rate",
max(ad."requested-drug-quantity")"requested-drug-quantity",
max(ad."suggested-drug-quantity")"suggested-drug-quantity",
max(ad."required-drug-quantity")"required-drug-quantity",
max(prm."bill-id") over (partition by ad."unique-id") as max_bill,
max(prm."gross-quantity") as "drug-sold",
max(case when b."net-payable">0 then 1 else 0 end) as "billed-same-day",
max(case when sa."gross-quantity">0 then 1 else 0 end) as "drug-sold-same-day",
max(ad."suggested-drug-inventory-quantity") "suggested-drug-inventory-quantity",
max(ad."requested-drug-inventory-quantity") "requested-drug-inventory-quantity",
max(prm."created-by") over (partition by ad."unique-id") as "max-pso-created-by"
from
"prod2-generico"."alternate-drugs" ad
inner join "prod2-generico"."pmf-conversion-temp" pct
on ad."unique-id"= pct."session-id"
left join "prod2-generico"."patient-requests-metadata" prm
on
prm.id = ad."patients-store-orders-id"
left join "prod2-generico".patients p
on right(REPLACE(REPLACE(p."phone",'.',''),' ',''), 10) =
right(REPLACE(REPLACE(ad."patient-phone",'.',''),' ',''), 10)
left join "prod2-generico"."bills-1" b
on
b."patient-id" = (case
when ad."patient-id" is null then prm."patient-id"
when ad."patient-id" is null and prm."patient-id" is null then p.id
else ad."patient-id"
end)
and date(ad."created-at") = date(b."created-at")
left join "prod2-generico"."sales-agg" sa
on sa."patient-id" = (case
when ad."patient-id" is null then prm."patient-id"
when ad."patient-id" is null and prm."patient-id" is null then p.id
else ad."patient-id"
end)
and sa."drug-id" = prm."drug-id"
and date(sa."created-date") = date(ad."created-at")
left join "prod2-generico"."store-drug-assortment" sda
on sda."store-id" = ad."store-id" and sda."drug-id" = ad."requested-drug-id"
and sda."is-active" =1
left join "prod2-generico".drugs d
on d.id = ad."requested-drug-id"
left join "prod2-generico".drugs d2
on d2.id = ad."suggested-drug-id"
inner join "prod2-generico"."pmf-login-tag" plt
on ad."created-by" = plt."login-id"
left join "prod2-generico"."generic-affinity" ga
on ga."patient-id"=(case
when ad."patient-id" is null then prm."patient-id"
when ad."patient-id" is null and prm."patient-id" is null then p.id
else ad."patient-id"
end)
left join (
select
"drug-id" ,
max("selling-rate") as "requested-selling-rate"
from
"prod2-generico"."store-group-drug-price" i
group by
1
) sgdp on
ad."requested-drug-id" = sgdp."drug-id"
left join (
select
right(callfrom, 10) as "patient-phone",
min(starttime) as "call-time"
from
"prod2-generico"."prod2-generico".exotelincomingcalllogs e
where
calltype = 'completed'
group by
1,
date(starttime)) cl on
ad."patient-phone" = cl."patient-phone"
and date(ad."created-at") = date(cl."call-time")
group by
ad."store-id" ,
ad."unique-id" ,
ad."patients-store-orders-id" ,
ad."requested-drug-id" ,
ad."requested-drug-name" ,
prm."drug-id",
prm."drug-name" ,
prm."bill-id",sda."drug-id",ga."generic-affinity-score",d."type",d2."type",
ad."created-by",
ad."suggested-drug-id",
ad."suggested-drug-name",
ad."patient-phone",
ad."created-at",
sda."is-active",
plt."tag",
cl."patient-phone" ,
ad."patient-id" ,
prm."created-by"
"""
base = rs_db.get_df(base_q)
base.columns = [c.replace('-', '_') for c in base.columns]
base['assortment_active'].fillna(0, inplace=True)
# assortment flag
conditions = [(
(base.suggested_drug_id.isnull()) &
(base['assortment_active'] == 0)
),
(
(base.suggested_drug_id.isnull()) &
(base['assortment_active'] == 1)
),
(base.suggested_drug_id.notnull())
]
choices = [1, 0, 0]
base['flag_not_assort'] = np.select(conditions, choices)
base2 = base[(base['session_id'] == '1663646353324')]
# unavailability flag
conditions = [
(base['flag_not_assort'] == 1),
(base['required_drug_quantity'] > base[
'suggested_drug_inventory_quantity']),
(base['required_drug_quantity'] <= base[
'suggested_drug_inventory_quantity'])
]
choices = [1, 1, 0]
base['flag_unavailability'] = np.select(conditions, choices)
# sessions where patient had called
base['patient_called_flag'] = np.where(base['called_patient'].isnull(), 0, 1)
# session converted (anyone drug converted then converted)
base['session_conv_flag'] = np.where(base['max_bill'].isnull(), 0, 1)
# session drug converted
base['session_drug_conv_flag'] = np.where(base['bill_id_thru_pso'].isnull(), 0, 1)
# drug level expensive or not
base['drug_expensive_flag'] = np.where((base['suggested_drug_id'] != base['requested_drug_id'])
& (base['suggested_drug_rate'] > base['requested_drug_rate']), 1, 0)
session_expensive_count = base.groupby('session_id')['drug_expensive_flag'].sum().reset_index()
session_expensive_count = session_expensive_count.rename(columns={'drug_expensive_flag': 'expensive_drugs'})
session_not_in_assortment_count = base.groupby('session_id')['flag_not_assort'].sum().reset_index()
session_not_in_assortment_count = session_not_in_assortment_count.rename(
columns={'flag_not_assort': 'not_in_assortment_drugs'})
session_drug_count = base[(base['suggested_drug_id'] != base['requested_drug_id'])].groupby('session_id')[
'suggested_drug_id'].nunique().reset_index()
session_drug_count = session_drug_count.rename(columns={'suggested_drug_id': 'suggested_drug_cnt'})
session_ethical_count = base[base['requested_drug_type'] == 'ethical'].groupby('session_id')[
'requested_drug_id'].nunique().reset_index()
session_ethical_count = session_ethical_count.rename(columns={'requested_drug_id': 'eth_drugs'})
session_generic_count = base[base['requested_drug_type'] == 'generic'].groupby('session_id')[
'requested_drug_id'].nunique().reset_index()
session_generic_count = session_generic_count.rename(columns={'requested_drug_id': 'gen_drugs'})
session_unavailable_count = base.groupby('session_id')['flag_unavailability'].sum().reset_index()
session_unavailable_count = session_unavailable_count.rename(columns={'flag_unavailability': 'unavailable_drugs'})
base = pd.merge(base, session_ethical_count, how='left', on=['session_id'])
base = pd.merge(base, session_generic_count, how='left', on=['session_id'])
base = pd.merge(base, session_expensive_count, how='left', on=['session_id'])
base = pd.merge(base, session_drug_count, how='left', on=['session_id'])
base = pd.merge(base, session_not_in_assortment_count, how='left', on=['session_id'])
base = pd.merge(base, session_unavailable_count, how='left', on=['session_id'])
# base2=base[(base['session_id']== '1663646353324')]
# ethical prefering session
base['ethical_preference_issue'] = np.where(
(base['eth_drugs'] > base['gen_drugs']) & (base['generic_affinity_score'] <= 2), 1, 0)
# drug rate comparison
base['rate_issue'] = np.where((base['expensive_drugs'] > 0), 1, 0)
# assortment issue
base['assortment_issue'] = np.where((base['not_in_assortment_drugs'] > 0), 1, 0)
# availability issue
base['availability_issue'] = np.where((base['unavailable_drugs'] > 0), 1, 0)
# issue
conditions = [
(base['assortment_issue'] == 1),
(base['availability_issue'] == 1),
(base['rate_issue'] == 1),
(base['ethical_preference_issue'] == 1)
]
choices = ['assortment', 'availability', 'rate', 'ethical preference']
base['issue'] = np.select(conditions, choices)
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id', 'drug_sold']] = \
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id',
'drug_sold']].fillna(0)
base[
['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'assortment_active', 'patient_id',
'drug_sold']] = base[
['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'assortment_active', 'patient_id',
'drug_sold']] \
.apply(np.int64)
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id', 'drug_sold']] = \
base[['patients_store_orders_id', 'max_bill', 'suggested_drug_id', 'bill_id_thru_pso', 'patient_id',
'drug_sold']].replace({0: None})
base.columns = [c.replace('_', '-') for c in base.columns]
base['etl-created-at'] = dt.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
# To Avoid Duplication
truncate_query = f"""
DELETE
FROM
"prod2-generico"."pmf-conversion-table"
where "session-id" in
(select "session-id" from "prod2-generico"."pmf-conversion-temp" group by 1)
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
s3.write_df_to_db(df=base[table_info['column_name']],
table_name='pmf-conversion-table',
db=rs_db, schema='prod2-generico')
except Exception as e:
logger.exception(e)
finally:
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/pmf-conversion-table/pmf-update.py | pmf-update.py |
""
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
from datetime import timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
rs_db = DB()
rs_db.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'system-drug-assortment-count'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
try:
# count of drugs in assortment
df_q = """
select "type",count(distinct d.id) "drug-id-count",
count(distinct case when "is-validated" ='Yes' then d.id end) "verified-drug-count"
from "prod2-generico".drugs d
group by 1
"""
df = rs_db.get_df(df_q)
# etl
df['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df['created-by'] = 'etl-automation'
df['updated-by'] = 'etl-automation'
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where date("created-at")=current_date'''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=df[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema)
except :
raise Exception("error")
finally:
rs_db.close_connection()
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/system-drug-assortment-count/system-drug-assortment-count.py | system-drug-assortment-count.py |
""
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
import pandas as pd
import dateutil
import datetime
from dateutil.tz import gettz
import numpy as np
from datetime import timedelta
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--full_run', default=0, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
os.environ['env'] = env
logger = get_logger()
logger.info(f"full_run: {full_run}")
rs_db = DB()
rs_db_write = DB(read_only=False)
rs_db.open_connection()
rs_db_write.open_connection()
s3 = S3()
schema = 'prod2-generico'
table_name = 'cash-burn-tableau-health-board'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
# max of data
burn_q = """
select
date(max("burn-date")) max_exp
from
"prod2-generico"."cash-burn-tableau-health-board"
"""
rs_db.execute(burn_q, params=None)
max_exp_date: pd.DataFrame = rs_db.cursor.fetch_dataframe()
max_exp_date['max_exp'].fillna(np.nan ,inplace=True)
print(max_exp_date.info())
max_exp_date = max_exp_date['max_exp'].to_string(index=False)
print(max_exp_date)
# Read from gsheet
gs = GoogleSheet()
burn_data = gs.download(data={
"spreadsheet_id": "1WR5VeO1OyBqwMp3xXF2hn9ZIA5BxmVaqqEUBbOv4tzk",
"sheet_name": "cash burn",
"listedFields": []
})
df = pd.DataFrame(burn_data)
print(type(df['date']))
df[['date']] = df[
['date']] \
.apply(pd.to_datetime, errors='coerce')
print(type(df['date']))
burn = df.copy()
# params
if full_run or max_exp_date == 'NaN':
start = '2017-05-13'
else:
start = max_exp_date
start = dateutil.parser.parse(start)
startminus2 = start - timedelta(days=2)
burn = burn[(burn['date'] >= start)]
# etl
burn['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
burn['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
burn['created-by'] = 'etl-automation'
burn['updated-by'] = 'etl-automation'
burn.columns = [c.replace('_', '-') for c in burn.columns]
dict = {'date': 'burn-date'}
burn.rename(columns=dict,
inplace=True)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
else:
logger.info(f"Table:{table_name} exists")
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "burn-date">='{startminus2}' '''
rs_db_write.execute(truncate_query)
s3.write_df_to_db(df=burn[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/cash-burn-tableau-health-board/cash-burn-tableau-health-board.py | cash-burn-tableau-health-board.py |
import argparse
import os
import sys
from datetime import datetime as dt
from datetime import timedelta
from warnings import filterwarnings as fw
import numpy as np
import pandas as pd
fw('ignore')
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.parameter.job_parameter import parameter
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
job_params = parameter.get_params(job_id=131)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = job_params['email_to']
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
read_schema = 'prod2-generico'
#################################################################
# ABV Module
#################################################################
# Last Monday :
today = dt.today().date()
last_monday = today - timedelta(days=(today.weekday()))
logger.info(last_monday)
abv_q = f"""
select
pm."primary-store-id",
cv."abv-seg",
cv."patient-id",
recency,
frequency,
stage,
p.phone,
cv."last-bill"
from
"{read_schema}"."crm-view" cv
left join "{read_schema}"."patients-metadata-2" pm
on
cv."patient-id" = pm.id
left join "{read_schema}".patients p on
pm.id = p.id
where
cv."calculation-date" = '{last_monday}'
and cv."r-score" >= 4
and cv."f-score" >= 3;"""
logger.info(f"query for abv module data : {abv_q}")
abv_driver = rs_db.get_df(abv_q)
# Creating identity for clevertap
abv_driver['phone'] = abv_driver['phone'].apply(lambda x: '91' + x)
logger.info(f"Campaign will run for : {abv_driver.stage.unique()}")
logger.info(f"recency min : {abv_driver.recency.min()}")
logger.info(f"recency max : {abv_driver.recency.max()}")
logger.info(f"frequency min : {abv_driver.frequency.min()}")
logger.info(f"frequency max : {abv_driver.frequency.max()}")
abv_dis = pd.crosstab(index=abv_driver['primary-store-id'],
columns=abv_driver['abv-seg'],
values=abv_driver['patient-id'],
aggfunc='nunique',
normalize='index') * 100
abv_dis['major_contributor'] = abv_dis.idxmax(axis=1)
abv_dis = abv_dis.reset_index()
store_abv_campaign = abv_dis.groupby('major_contributor')['primary-store-id'].apply(lambda x: list(np.unique(x)))
logger.info(store_abv_campaign)
patient_abv = abv_dis[['primary-store-id', 'major_contributor']]
patient_abv = pd.merge(patient_abv,
abv_driver,
how='left',
left_on=['primary-store-id', 'major_contributor'],
right_on=['primary-store-id', 'abv-seg'])
abv_patient_count = patient_abv.groupby('major_contributor')['patient-id'].nunique()
logger.info(f"ABV driver campaign patient count {abv_patient_count}", )
patient_abv = patient_abv[['primary-store-id', 'abv-seg', 'patient-id']]
abv_driver_mail_body = f"""
Hey Team,
For ABV Driver campaign (Targeted) patients list is attached in mail
Campaign will run for patients where NOB is greater than {abv_driver.frequency.min()}
and recency of customer less than {abv_driver.recency.max()} days
Customers will fall in stages : {abv_driver.stage.unique()}
Store-wise campaign : list attached
Please follow targeted customer for their ABV segments ( Neglect if segment size <1000)
Code will be open but sms campaign will run for mentioned patients only.
Test/Control split - 90/10
Code : Open
Min purchase condition : based on targeted abv segment
Max per patients : 1
Conversion condition : Charged (Next 30 Days) & total-amount > abv-seg(max)
*Marketing team : suggest/create promo-codes
*Pooja : send SMS-draft for campaigns
"""
sac = s3.save_df_to_s3(df=pd.DataFrame(store_abv_campaign),
file_name="Store_ABV_Campaigns.csv",
index_label='ABV Segment',
index=True)
acpl = s3.save_df_to_s3(df=pd.DataFrame(patient_abv), file_name="ABV_Campaign_Patients_list.csv")
# Sending email
subject = 'ABV Driver Campaign'
mail_body = abv_driver_mail_body
email = Email()
files = [sac, acpl]
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=files)
#################################################################
# Lost Customers Module
#################################################################
lost_q = f"""
select
pm."primary-store-id",
cv."patient-id",
recency,
frequency,
stage,
p.phone,
"last-bill"
from
"{read_schema}"."crm-view" cv
left join "{read_schema}"."patients-metadata-2" pm
on
cv."patient-id" = pm.id
left join "{read_schema}".patients p on
pm.id = p.id
where
cv."calculation-date" = '{last_monday}'
and stage in ('Hibernating', 'At Risk', 'Can\\\'t Lose them');"""
logger.info(f"query for lost customer module data : {lost_q}")
lost_customers = rs_db.get_df(lost_q)
store_wise_segment = pd.pivot_table(data=lost_customers,
index='primary-store-id',
columns='stage',
values='patient-id',
aggfunc='nunique').reset_index()
segment_properties = pd.pivot_table(data=lost_customers,
index='stage',
values=['recency', 'frequency', 'last-bill'],
aggfunc=['min', 'max']).reset_index()
segment_size = pd.pivot_table(data=lost_customers,
index='stage',
values='patient-id',
aggfunc=['nunique']).reset_index()
lost_customers_data = lost_customers[['primary-store-id', 'stage', 'patient-id']].drop_duplicates()
lost_customer_mail_body = """
Hey Team,
Properties for lost customer segments is attached,
Please create campaign using those properties
Can't lose them : High priority
At risk : Medium priority
Hibernating : low priority
Suggest promo-codes accordingly
Segment size is also attached for reference
Test/Control split - 90/10
Code : Restricted
Min purchase condition : 1
Max per patients : 1
Discount : based on priority
Conversion condition : Charged (Next 30 Days)
*Marketing team : suggest/create promo-codes
*Pooja : send SMS-draft for campaigns
Thanks & Regards
"""
sws = s3.save_df_to_s3(df=store_wise_segment,
file_name="Store_Lost_Customers.csv")
lcsp = s3.save_df_to_s3(df=segment_properties,
file_name="Lost_Customer_Segment_Properties.csv")
ss = s3.save_df_to_s3(df=segment_size,
file_name="Lost_Customer_Segment_Size.csv")
lsd = s3.save_df_to_s3(df=lost_customers_data,
file_name="Lost_Customer_Data.csv")
# Sending email
subject = 'Lost Customer Campaign'
mail_body = lost_customer_mail_body
email = Email()
files = [sws, lcsp, ss, lsd]
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=files)
#################################################################
# Retention Module
#################################################################
retention_q = f"""
select
pm."primary-store-id",
cv."patient-id",
recency,
frequency,
stage,
p.phone,
"last-bill"
from
"{read_schema}"."crm-view" cv
left join "{read_schema}"."patients-metadata-2" pm
on
cv."patient-id" = pm.id
left join "{read_schema}".patients p on
pm.id = p.id
where
cv."calculation-date" = '{last_monday}'
and stage in ('Promising')
and "m-score" >= 4;"""
logger.info(f"query for retention module data : {retention_q}")
retention = rs_db.get_df(retention_q)
retention_data = retention[['primary-store-id', 'stage', 'patient-id']].drop_duplicates()
store_wise_segment = pd.pivot_table(data=retention,
index='primary-store-id',
columns='stage',
values='patient-id',
aggfunc='nunique').reset_index()
segment_properties = pd.pivot_table(data=retention,
index='stage',
values=['recency', 'frequency', 'last-bill'],
aggfunc=['min', 'max']).reset_index()
segment_size = pd.pivot_table(data=retention,
index='stage',
values='patient-id',
aggfunc=['nunique']).reset_index()
retention_mail_body = """
Hey Team,
Properties for retention campaign is attached,
Focus is to increase quarter on quarter retention
Please create campaign using those properties
Suggest promo-codes accordingly
Segment size is also attached for reference
Code will be open but sms campaign will run for mentioned patients only.
Test/Control split - 90/10
Code : Open
Min purchase condition : minimum monetary value of segment
Max per patients : 4 (NOB2-4)
NOB : 2-4
Conversion condition : Charged (Next 30 Days)
*Marketing team : suggest/create promo-codes
*Pooja : send SMS-draft for campaigns
Thanks & Regards
"""
sws = s3.save_df_to_s3(df=store_wise_segment,
file_name="Store_Retention.csv")
lcsp = s3.save_df_to_s3(df=segment_properties,
file_name="Retention_Segment_Properties.csv")
ss = s3.save_df_to_s3(df=segment_size,
file_name="Retention_Segment_Size.csv")
rd = s3.save_df_to_s3(df=retention_data,
file_name="Retention_Customer_Data.csv")
# Sending email
subject = 'Retention Campaign'
mail_body = retention_mail_body
email = Email()
files = [sws, lcsp, ss, rd]
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=files)
# closing the connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/campaign-recommendation/campaign-recommendation.py | campaign-recommendation.py |
from zeno_etl_libs.db.db import DB, MongoDB, MSSql
import sys
import os
import argparse
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
from datetime import datetime
from dateutil.tz import gettz
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]",
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
# Part 1: Preferred Distributor
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
db = mg_client['generico-crm']
collection = db["wmsDrugDistributorMappingV2"].find()
dist_list = pd.DataFrame(list(collection))
pref_dist=dist_list[['wms_id','is_active','drug_id','drug_name','rank1','rank1_name','moq']]
pref_dist=pref_dist.rename(columns={'rank1':'distributor_id','rank1_name':'distributor_name'})
pref_dist[['drug_id','distributor_id']]=\
pref_dist[['drug_id','distributor_id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
created_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
pref_dist['created-date']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
pref_dist['etl-created-at']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
pref_dist['etl-updated-at']=datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
pref_dist['etl-created-by'] = 'etl-automation'
pref_dist['etl-updated-by'] = 'etl-automation'
pref_dist.columns = [c.replace('_', '-') for c in pref_dist.columns]
rs_db= DB()
rs_db.open_connection()
s3=S3()
schema = "prod2-generico"
table_name = "preferred-distributor"
table_info = helper.get_table_info(db=rs_db
, table_name=table_name, schema=schema)
#Truncate the Query
snapshot_date = datetime.now().date()
truncate_query = '''
delete from "prod2-generico"."preferred-distributor"
'''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=pref_dist[table_info['column_name']], table_name=table_name,
db=rs_db, schema='prod2-generico')
# Part 2 : Review Time
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
db = mg_client['generico-crm']
collection = db["distributorConfiguration"].find()
dist_list = pd.DataFrame(list(collection))
review_time=dist_list[['is_active','wms_id','distributor_id','distributor_name','weekly_po','proxy_wh']]
pd.options.mode.chained_assignment = None
review_time['days_in_week']=review_time['weekly_po'].copy().apply(lambda a:len(a))
review_time['days_in_week']=review_time['days_in_week'].replace(0,4)
review_time['review_days']=\
review_time['days_in_week'].copy()\
.apply(lambda a: 4 if a==2 else (3 if a==3 else (7 if a==1 else (1 if a==7 else 4))))
review_time[['distributor_id']]=\
review_time[['distributor_id']].apply(pd.to_numeric, errors='ignore').astype('Int64')
review_time[['proxy_wh']]=\
review_time[['proxy_wh']].astype('str')
created_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
review_time['created-date']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
review_time['etl-created-at']=datetime.strptime(created_at,"%Y-%m-%d %H:%M:%S")
updated_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
review_time['etl-updated-at']=datetime.strptime(updated_at,"%Y-%m-%d %H:%M:%S")
review_time['etl-created-by'] = 'etl-automation'
review_time['etl-updated-by'] = 'etl-automation'
review_time.columns = [c.replace('_', '-') for c in review_time.columns]
rs_db= DB()
rs_db.open_connection()
s3=S3()
schema = "prod2-generico"
table_name = "wms-distributors-review-time"
table_info = helper.get_table_info(db=rs_db
, table_name=table_name, schema=schema)
#Truncate the Query
snapshot_date = datetime.now().date()
truncate_query = '''
delete from "prod2-generico"."wms-distributors-review-time"
'''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=review_time[table_info['column_name']], table_name=table_name,
db=rs_db, schema='prod2-generico')
rs_db.close_connection()
mg_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wms_preferred_distributor.py | wms_preferred_distributor.py |
import os
import sys
import argparse
import pandas as pd
from datetime import datetime, timedelta
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="To update DOID with latest forecast.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-nm', '--for_next_month', default="Y", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
for_next_month = args.for_next_month
logger = get_logger()
logger.info("Scripts begins")
status = False
schema = 'prod2-generico'
err_msg = ''
df_uri1 = ''
df_uri2 = ''
drugs_not_in_doi = 0
drugs_missed = 0
drugs_updated = 0
# getting run date for the script
run_date = str(datetime.now().date())
current_month_date = (
datetime.now().date() -
timedelta(days=datetime.now().day - 1))
if for_next_month == 'Y':
forecast_date = str(
datetime(current_month_date.year +
int(current_month_date.month / 12),
((current_month_date.month % 12) + 1), 1).date())
else:
forecast_date = str(current_month_date)
try:
rs_db = DB()
rs_db.open_connection()
wh_safety_stock_df_query = """
select
*
from
"prod2-generico"."wh-safety-stock" wss
where
"forecast-type" = 'forecast'
and "forecast-date" = '{forecast_date}'
""".format(forecast_date=forecast_date)
wh_safety_stock_df = rs_db.get_df(wh_safety_stock_df_query)
wh_safety_stock_df.columns = [c.replace('-', '_') for c in wh_safety_stock_df.columns]
# CONSIDERING DRUG TYPES FOR DATA LOAD
type_list = rs_db.get_df(
'select distinct type from "prod2-generico".drugs')
type_list = tuple(type_list[
~type_list.type.isin(
['', 'banned', 'discontinued-products'])][
'type'])
# UPLOADING SAFETY STOCK NUMBERS IN DRUG-ORDER-INFO
ss_data_upload = wh_safety_stock_df.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
logger.info('updating DOID')
new_drug_entries, missed_entries = doid_update(
ss_data_upload, type_list, rs_db, schema, logger)
logger.info('DOID updated')
drugs_not_in_doi = len(new_drug_entries)
drugs_missed = len(missed_entries)
drugs_updated = len(ss_data_upload) - len(missed_entries) - len(new_drug_entries)
s3 = S3()
df_uri1 = s3.save_df_to_s3(df=new_drug_entries,
file_name='DOID_new_drug_entries_{date}.csv'.format(date=str(run_date)))
df_uri2 = s3.save_df_to_s3(df=missed_entries,
file_name='DOID_missed_entries_{date}.csv'.format(date=str(run_date)))
status = True
except Exception as error:
err_msg = str(error)
logger.exception(str(error))
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"wh_doid_update ({env}): {result}",
mail_body=f"""
drugs updated successfully --> {drugs_updated}
drugs not updated --> {drugs_missed}
drugs not in doid --> {drugs_not_in_doi}
""",
to_emails=email_to, file_uris=[df_uri1, df_uri2])
else:
result = 'Failed'
email.send_email_file(subject=f"wh_doid_update ({env}): {result}",
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wh_doid_update.py | wh_doid_update.py |
import os
import sys
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from dateutil.tz import gettz
from scipy.stats import norm
from calendar import monthrange
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.helper.parameter.job_parameter import parameter
#tag = parameters
env = "dev"
os.environ['env'] = env
job_params = parameter.get_params(job_id=130)
email_to = job_params['email_to']
days = job_params['days']
lead_time_mean = job_params['lead_time_mean']
lead_time_std = job_params['lead_time_std']
max_review_period = job_params['max_review_period']
wh_id = 199
cap_ss_days = job_params['cap_ss_days']
service_level = job_params['service_level']
ordering_freq = job_params['ordering_freq']
logger = get_logger()
logger.info("Scripts begins. Env = " + env)
status = False
err_msg = ''
df_uri = ''
run_date = str(datetime.now().strftime("%Y-%m-%d"))
drugs_not_in_doi = 0
drugs_missed = 0
drugs_updated = 0
def get_launch_stock_per_store(rs_db, days, drugs):
new_stores_list_query = """
select
id as store_id,
date("opened-at") as opened_at
from
"prod2-generico".stores s
where
"opened-at" >= CURRENT_DATE - {days}
and id not in (281, 297)
""".format(days=days)
new_stores_list = rs_db.get_df(new_stores_list_query)
store_ids_list = tuple(new_stores_list['store_id'].astype(str))
# get shortbook launch orders
sb_orders_query = '''
select
distinct sb."store-id" as store_id,
sb."drug-id" as drug_id,
date(sb."created-at") as created_at,
sb.quantity as ordered_quantity,
date(s2."opened-at") as opened_at
from
"prod2-generico"."short-book-1" sb
left join "prod2-generico".stores s2 on
s2.id = sb."store-id"
where
"store-id" in {store_ids}
and date(sb."created-at") < date(s2."opened-at")
'''.format(store_ids=store_ids_list, days=days)
sb_orders = rs_db.get_df(sb_orders_query)
df = sb_orders.copy()
df = df[df['drug_id'].isin(drugs['drug_id'])]
df = df[['store_id', 'drug_id', 'ordered_quantity']]
df.drop_duplicates(inplace=True)
new_stores_count = sb_orders['store_id'].nunique()
df = df[['drug_id', 'ordered_quantity']]
launch_stock = df.groupby('drug_id').sum().reset_index()
launch_stock_per_store = launch_stock.copy()
launch_stock_per_store['ordered_quantity'] = \
launch_stock['ordered_quantity'] / new_stores_count
launch_stock_per_store.rename(
columns={'ordered_quantity': 'launch_stock_per_store'}, inplace=True)
return launch_stock_per_store
try:
rs_db = DB()
rs_db.open_connection()
# read inputs file to get parameters
logger.info('reading input file to get parameters')
params_table_query = """
select
"param-name" as param,
value
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" not in ('drug_lvl_fcst_inputs' , 's_and_op_factors')
"""
logger.info('input parameters read')
params_table = rs_db.get_df(params_table_query)
params_table = params_table.apply(pd.to_numeric, errors='ignore')
revenue_min = int(params_table.where(
params_table['param'] == 'revenue_min', axis=0).dropna()['value'])
revenue_max = int(params_table.where(
params_table['param'] == 'revenue_max', axis=0).dropna()['value'])
#getting expected new stores openings
params_table_query = """
select
"month-begin-dt" as month_begin_dt,
value as expected_nso
from
"prod2-generico"."wh-forecast-repln-input"
where
"param-name" = 'expected_nso'
"""
params_table = rs_db.get_df(params_table_query)
logger.info('expected_nso parameter read')
params_table = params_table.apply(pd.to_numeric, errors='ignore')
params_table['month_begin_dt'] = params_table['month_begin_dt'].astype(str)
current_month_date = (
datetime.now(tz=gettz('Asia/Kolkata')).date() -
timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
try:
expected_new_stores = int(params_table[
params_table[
'month_begin_dt'] == str(current_month_date)][
'expected_nso'])
except Exception as error:
expected_new_stores = 0
logger.info("expected new stores --> " + str(expected_new_stores))
# get active gaid drugs list
drugs_query = '''
select
wssm."drug-id" as drug_id,
d.composition,
d."drug-name" as drug_name,
d.company,
d."type",
d.category
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products')
and d.company = 'GOODAID'
'''
drugs = rs_db.get_df(drugs_query)
logger.info('active drugs list pulled from wssm')
# get 28 days sales for active gaid drugs
drug_sales_query = '''
select
"drug-id" as drug_id,
date("created-at") as created_at,
sum(quantity) as drug_sales_quantity
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
group by
"drug-id",
date("created-at")
'''.format(days=days, drug_ids=tuple(drugs['drug_id']))
sales_data_for_std = rs_db.get_df(drug_sales_query)
drugs_std = sales_data_for_std.groupby('drug_id').std().reset_index()
drugs_std = drugs_std.rename(columns={'drug_sales_quantity': 'demand_daily_deviation'})
drug_sales = sales_data_for_std.groupby('drug_id').sum().reset_index()
logger.info('drug sales data pulled from rs')
drug_sales['drug_sales_quantity'] = drug_sales[
'drug_sales_quantity'] * 28 / days
# get non-ethical composition level sale
composition_sales_query = '''
select
composition as composition,
sum(quantity) as composition_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
and "type" <> 'ethical'
group by
composition
'''.format(days=days, compositions=tuple(drugs['composition']))
composition_sales = rs_db.get_df(composition_sales_query)
logger.info('composition data pulled from rs')
composition_sales['composition_sales_quantity'] = composition_sales[
'composition_sales_quantity'] * 28 / days
# merging data
main_df = drugs.merge(drug_sales, on='drug_id', how='left')
main_df['drug_sales_quantity'].fillna(0, inplace=True)
main_df = main_df.merge(composition_sales, on='composition',
how='left')
main_df['composition_sales_quantity'].fillna(0, inplace=True)
# getting 50% of composition level sales
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity'] * 0.5
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity_50%'].round(0)
# calculate month-on-month sales growth
# getting last-to-last 28 day sales for calcuating growth factor
last_to_last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_to_last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 56
and date("created-at") < current_date - 28
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_to_last_sales = rs_db.get_df(last_to_last_sales_query)
logger.info('last-to-last 28 day sales data pulled from rs')
# getting last 28 day sales
last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_sales = rs_db.get_df(last_sales_query)
logger.info('last 28 day sales data pulled from rs')
# merging to main_df
main_df = main_df.merge(last_to_last_sales, on='drug_id', how='left')
main_df['last_to_last_28_day_sales'].fillna(0, inplace=True)
main_df = main_df.merge(last_sales, on='drug_id', how='left')
main_df['last_28_day_sales'].fillna(0, inplace=True)
main_df['growth_factor'] = main_df['last_28_day_sales'] / main_df[
'last_to_last_28_day_sales']
main_df['growth_factor'].fillna(1, inplace=True)
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] == np.inf, 1,
main_df['growth_factor'])
# growth factor capped at 150% - min at 100%
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] > 1.5, 1.5,
main_df['growth_factor'])
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] < 1, 1,
main_df['growth_factor'])
# growth factor foreced to 1 when 50% comp sales > drug sales
main_df['growth_factor'] = np.where(main_df[
'composition_sales_quantity_50%'] >
main_df[
'drug_sales_quantity'], 1,
main_df['growth_factor'])
main_df['s_op_factor'] = 1
# get avg gaid sales for 13-16 lakh revenue stores
# getting stores lists to compare with
stores_cmp_query = '''
select
"store-id" as store_id,
round(sum("revenue-value")) as revenue
from
"prod2-generico".sales
where
date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"store-id"
'''
stores_cmp = rs_db.get_df(stores_cmp_query)
stores_cmp = stores_cmp[stores_cmp['revenue'] > revenue_min]
stores_cmp = stores_cmp[stores_cmp['revenue'] < revenue_max]
stores_list_to_comp = tuple(stores_cmp['store_id'])
logger.info('list of stores with revenue between 1.3 and 1.6 mil -->'
+ str(stores_list_to_comp))
# adding expected_new_stores column
main_df['expected_new_stores'] = expected_new_stores
# getting avg sales
avg_store_sales_query = '''
select
composition ,
sum(quantity)/ {count} as avg_drug_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
and "type" <> 'ethical'
and "store-id" in {stores_list_to_comp}
group by
composition
'''.format(compositions=tuple(drugs['composition']), \
stores_list_to_comp=stores_list_to_comp, \
count=len(stores_list_to_comp))
avg_store_sales = rs_db.get_df(avg_store_sales_query)
logger.info('avg composition sales retrieved for sample stores')
avg_store_sales['avg_drug_sales_quantity'] = avg_store_sales[
'avg_drug_sales_quantity'].round()
# merge to main_df
main_df = main_df.merge(avg_store_sales, on='composition', how='left')
main_df['avg_drug_sales_quantity'].fillna(0, inplace=True)
# get final forecast figures
main_df['forecast'] = main_df[[
'drug_sales_quantity',
'composition_sales_quantity_50%']].max(axis=1)
main_df['forecast'] = main_df['forecast'] * main_df['growth_factor'] * \
main_df['s_op_factor'] + main_df[
'expected_new_stores'] * \
main_df['avg_drug_sales_quantity']
main_df['forecast'] = main_df['forecast'].round()
main_df['demand_daily'] = main_df['forecast'] / 28
main_df = main_df.merge(drugs_std, on='drug_id', how='left')
main_df['demand_daily_deviation'].fillna(0, inplace=True)
main_df['lead_time_mean'] = lead_time_mean
main_df['lead_time_std'] = lead_time_std
main_df['review_period'] = max_review_period
main_df['ordering_freq'] = ordering_freq
main_df['service_level'] = service_level
# calculate ss min max
main_df['ss_wo_cap'] = (norm.ppf(main_df['service_level']).round(2) * np.sqrt(
(
main_df['lead_time_mean'] *
main_df['demand_daily_deviation'] *
main_df['demand_daily_deviation']
) +
(
main_df['lead_time_std'] *
main_df['lead_time_std'] *
main_df['demand_daily'] *
main_df['demand_daily']
)
)
).round(0)
main_df['cap_ss_days'] = np.where(main_df['ss_wo_cap'] / main_df['forecast'] * 28 > cap_ss_days,
cap_ss_days, '')
main_df['safety_stock'] = np.where(main_df['ss_wo_cap'] / main_df['forecast'] * 28 > cap_ss_days,
main_df['drug_sales_quantity'] / 28 * cap_ss_days,
main_df['ss_wo_cap']).round(0)
main_df['rop_without_nso'] = (
main_df['safety_stock'] +
main_df['demand_daily'] *
(
main_df['lead_time_mean'] + main_df['review_period']
)
).round()
launch_stock_per_store = get_launch_stock_per_store(rs_db, 90, drugs)
main_df = main_df.merge(launch_stock_per_store, on='drug_id', how='left')
main_df['launch_stock_per_store'].fillna(0, inplace=True)
num_days = monthrange(current_month_date.year, current_month_date.month)[1]
main_df['reorder_point'] = main_df['rop_without_nso'] + \
np.round((main_df['lead_time_mean'] + main_df['review_period']) *
main_df['expected_new_stores'] / num_days) * \
main_df['launch_stock_per_store']
main_df['order_upto_point'] = (
main_df['reorder_point'] +
main_df['ordering_freq'] * main_df['demand_daily']
).round()
main_df['safety_stock_doh'] = main_df['safety_stock'] / main_df['forecast'] * 28
main_df['reorder_point_doh'] = main_df['reorder_point'] / main_df['forecast'] * 28
main_df['order_upto_point_doh'] = main_df['order_upto_point'] / main_df['forecast'] * 28
# get table structure to write to
to_upload_query = '''
select
*
from
"prod2-generico"."wh-safety-stock"
limit 1
'''
to_upload = rs_db.get_df(to_upload_query)
to_upload.columns = [c.replace('-', '_') for c in to_upload.columns]
to_upload.drop(0, axis=0, inplace=True)
to_upload['drug_id'] = main_df['drug_id']
to_upload['drug_name'] = main_df['drug_name']
to_upload['type'] = main_df['type']
to_upload['category'] = main_df['category']
to_upload['company'] = main_df['company']
# to_upload['bucket'] = main_df['bucket']
to_upload['fcst'] = main_df['forecast'].astype(int, errors='ignore')
to_upload['wh_id'] = wh_id
to_upload['forecast_type'] = 'goodaid_199'
to_upload['lead_time_mean'] = main_df['lead_time_mean']
to_upload['max_review_period'] = main_df['review_period'].astype(int, errors='ignore')
to_upload['demand_daily'] = main_df['demand_daily']
to_upload['std'] = main_df['demand_daily_deviation']
to_upload['safety_stock'] = main_df['safety_stock'].astype(int, errors='ignore')
to_upload['expected_nso'] = expected_new_stores
to_upload['rop_without_nso'] = main_df['rop_without_nso'].astype(int, errors='ignore')
to_upload['reorder_point'] = main_df['reorder_point'].astype(int, errors='ignore')
to_upload['order_upto_point'] = main_df['order_upto_point'].astype(int, errors='ignore')
to_upload['last_month_sales'] = main_df['drug_sales_quantity'].astype(int, errors='ignore')
to_upload['safety_stock_days'] = main_df['safety_stock_doh']
to_upload['reorder_point_days'] = main_df['reorder_point_doh']
to_upload['order_upto_days'] = main_df['order_upto_point_doh']
to_upload['reset_date'] = run_date
to_upload['month'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%m"))
to_upload['year'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y"))
to_upload['month_begin_dt'] = str(
datetime.now(tz=gettz('Asia/Kolkata')).date() - timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
to_upload['created_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['created_by'] = 'etl-automation'
to_upload['updated_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['updated_by'] = 'etl-automation'
to_upload['cap_ss_days'] = main_df['cap_ss_days']
to_upload['ss_wo_cap'] = main_df['ss_wo_cap'].astype(int, errors='ignore')
to_upload['lead_time_std'] = main_df['lead_time_std']
to_upload['ordering_freq'] = main_df['ordering_freq']
to_upload = to_upload.fillna('')
#write connection
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
s3.write_df_to_db(df=to_upload, table_name='wh-safety-stock', db=rs_db_write, schema='prod2-generico')
logger.info("wh-safety-stock table updated")
# WRITING ATTACHMENTS FOR SUCCESS
df_uri = s3.save_df_to_s3(df=main_df,
file_name='BHW_goodaid_forecast_{date}.csv'.format(date=str(run_date)))
# writing to doid
logger.info('writing to doid for ' +
str(int(to_upload[['drug_id']].nunique())) + ' drugs')
ss_data_upload = to_upload.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
type_list = tuple(drugs['type'].unique())
ss_data_upload = ss_data_upload.astype(float)
new_drug_entries, missed_entries = doid_update(ss_data_upload, type_list, rs_db, 'prod2-generico', logger, gaid_omit=False)
rs_db.connection.close()
drugs_not_in_doi = len(new_drug_entries)
drugs_missed = len(missed_entries)
drugs_updated = len(ss_data_upload) - len(missed_entries) - len(new_drug_entries)
rs_db.close_connection()
rs_db_write.close_connection()
status = True
except Exception as e:
err_msg = str(e)
logger.info('wh_goodaid_forecast_199 job failed')
logger.exception(e)
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"Bhiwandi Warehouse GOODAID forecast ({env}): {result}",
mail_body=f"""
drugs updated successfully --> {drugs_updated}
drugs not updated --> {drugs_missed}
drugs not in doid --> {drugs_not_in_doi}
""",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject=f"Bhiwandi Warehouse GOODAID forecast ({env}): {result}",
mail_body=f"Run time: {datetime.now(tz=gettz('Asia/Kolkata'))} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended")
"""
DDL
create table "prod2-generico"."wh-goodaid-forecast-input" (
"param-name" text ENCODE lzo,
value text ENCODE lzo,
"drug-id" text ENCODE lzo,
"lead_time_doh" int8 ENCODE az64,
"safety_stock_doh" int8 ENCODE az64,
"review_period" int8 ENCODE az64,
"start-date" date ENCODE az64,
"end-date" date ENCODE az64,
description text ENCODE lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-goodaid-forecast-input" owner to "admin";
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wh_goodaid_forecast_199.py | wh_goodaid_forecast_199.py |
import os
import sys
import argparse
import datetime
import pandas as pd
sys.path.append('../../../..')
from dateutil.tz import gettz
from zeno_etl_libs.db.db import DB, MSSql
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper import helper
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description
="Populates table wh-inventory-ss that takes daily snapshot of warehouse inventory.")
parser.add_argument('-e', '--env',
default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected],[email protected]",
type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
err_msg = ''
logger = get_logger()
logger.info("Script begins")
cur_date = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
status = False
try:
# MSSql connection
mssql = MSSql(connect_via_tunnel=False)
mssql_connection = mssql.open_connection()
# RS Connection
rs_db = DB()
rs_db.open_connection()
q1 = """
Select * FROM (select
199 as wh_id,
b.code as wms_drug_code,
b.Barcode as drug_id,
a2.Altercode as distributor_id,
a2.Name as distributor_name,
a.Acno as wms_distributor_code,
a.Vdt as purchase_date,
b.name as drug_name,
a.Srate as srate,
coalesce(a.TQty, 0) as total_quantity,
case
when a.Vno < 0 then 0
else coalesce(a.bqty, 0)
end as balance_quantity,
case
when a.Vno > 0 then 0
else coalesce(a.tqty, 0)
end as locked_quantity,
coalesce(a.TQty * a.cost, 0) as total_value,
case
when a.Vno < 0 then 0
else coalesce(a.bqty * a.cost, 0)
end as balance_value,
case
when a.Vno > 0 then 0
else coalesce(a.tqty * a.cost, 0)
end as locked_value,
a.Evdt as expiry,
b.Compname as company_name,
b.Compcode as company_code,
b.Pack as pack,
a.Cost as purchase_rate,
a.Pbillno as purchase_bill_no,
a.Psrlno as purchase_serial_no,
a.Batch as batch_number,
a.mrp as mrp,
b.Prate as prate,
m.name as "drug_type",
s.name as "composition",
a.Gdn2 as godown_qty,
a.BQty - a.Gdn2 as store_qty,
sp.NetAmt invoice_net_amt,
sp.Taxamt invoice_tax_amt,
sp.Disamt invoice_dis_amt,
sp.qty as invoice_qty,
sp.cgst,
sp.sgst,
sp.igst,
a.vno,
b.MinQty as shelf_min,
b.MaxQty as shelf_max
from
fifo a with (nolock)
right join item b with (nolock) on
a.itemc = b.code
left join Acm a2 with (nolock) on
a2.code = a.Acno
and a2.Slcd in ('SL', 'GL')
left join Master m with (nolock) on
b.ItemCat = m.code
left join Salt s with (nolock) on
b.Saltcode = s.Code
left join SalePurchase2 sp with (nolock) on
a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt
where
b.code > 0
and
(a.Psrlno in (
select
Psrlno
from
Esdata.dbo.salepurchase2 s2)
or a.Psrlno IN (SELECT sp2.Psrlno from Esdata2122.dbo.SalePurchase2 sp2
)) and b.Barcode not like '%[^0-9]%') a
Where a."vno"!=0 and (a."balance_quantity"+a."locked_quantity")>0
"""
logger.info("getting data from esdata1 tables")
bhw = pd.read_sql(q1, mssql_connection)
logger.info("Data pulled from esdata1 tables")
# MSSql2 connection
mssql2 = MSSql(connect_via_tunnel=False, db='Esdata_WS_2')
mssql2_connection = mssql2.open_connection()
q2 = """
Select * FROM (select
343 as wh_id,
b.code as wms_drug_code,
b.Barcode as drug_id,
a2.Altercode as distributor_id,
a2.Name as distributor_name,
a.Acno as wms_distributor_code,
a.Vdt as purchase_date,
b.name as drug_name,
a.Srate as srate,
coalesce(a.TQty, 0) as total_quantity,
case
when a.Vno < 0 then 0
else coalesce(a.bqty, 0)
end as balance_quantity,
case
when a.Vno > 0 then 0
else coalesce(a.tqty, 0)
end as locked_quantity,
coalesce(a.TQty * a.cost, 0) as total_value,
case
when a.Vno < 0 then 0
else coalesce(a.bqty * a.cost, 0)
end as balance_value,
case
when a.Vno > 0 then 0
else coalesce(a.tqty * a.cost, 0)
end as locked_value,
a.Evdt as expiry,
b.Compname as company_name,
b.Compcode as company_code,
b.Pack as pack,
a.Cost as purchase_rate,
a.Pbillno as purchase_bill_no,
a.Psrlno as purchase_serial_no,
a.Batch as batch_number,
a.mrp as mrp,
b.Prate as prate,
m.name as "drug_type",
s.name as "composition",
a.Gdn2 as godown_qty,
a.BQty - a.Gdn2 as store_qty,
sp.NetAmt invoice_net_amt,
sp.Taxamt invoice_tax_amt,
sp.Disamt invoice_dis_amt,
sp.qty as invoice_qty,
sp.cgst,
sp.sgst,
sp.igst,
a.vno,
b.MinQty as shelf_min,
b.MaxQty as shelf_max
from
fifo a with (nolock)
right join item b with (nolock) on
a.itemc = b.code
left join Acm a2 with (nolock) on
a2.code = a.Acno
and a2.Slcd in ('SL', 'GL')
left join Master m with (nolock) on
b.ItemCat = m.code
left join Salt s with (nolock) on
b.Saltcode = s.Code
left join SalePurchase2 sp with (nolock) on
a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt
where
b.code > 0
and a.Psrlno in (
select
Psrlno
from
SalePurchase2)
and b.Barcode not like '%[^0-9]%') b
Where b."vno"!=0 and (b."balance_quantity"+b."locked_quantity")>0
"""
logger.info("getting data from esdata2 tables")
gaw = pd.read_sql(q2, mssql2_connection)
logger.info("Data pulled from esdata2 tables")
# MSSql3 connection
mssql3 = MSSql(connect_via_tunnel=False, db='Esdata_TEPL')
mssql3_connection = mssql3.open_connection()
q3 = """
Select * FROM ( select
342 as wh_id,
b.code as wms_drug_code,
b.Barcode as drug_id,
a2.Altercode as distributor_id,
a2.Name as distributor_name,
a.Acno as wms_distributor_code,
a.Vdt as purchase_date,
b.name as drug_name,
a.Srate as srate,
coalesce(a.TQty, 0) as total_quantity,
case
when a.Vno < 0 then 0
else coalesce(a.bqty, 0)
end as balance_quantity,
case
when a.Vno > 0 then 0
else coalesce(a.tqty, 0)
end as locked_quantity,
coalesce(a.TQty * a.cost, 0) as total_value,
case
when a.Vno < 0 then 0
else coalesce(a.bqty * a.cost, 0)
end as balance_value,
case
when a.Vno > 0 then 0
else coalesce(a.tqty * a.cost, 0)
end as locked_value,
a.Evdt as expiry,
b.Compname as company_name,
b.Compcode as company_code,
b.Pack as pack,
a.Cost as purchase_rate,
a.Pbillno as purchase_bill_no,
a.Psrlno as purchase_serial_no,
a.Batch as batch_number,
a.mrp as mrp,
b.Prate as prate,
m.name as "drug_type",
s.name as "composition",
a.Gdn2 as godown_qty,
a.BQty - a.Gdn2 as store_qty,
sp.NetAmt invoice_net_amt,
sp.Taxamt invoice_tax_amt,
sp.Disamt invoice_dis_amt,
sp.qty as invoice_qty,
sp.cgst,
sp.sgst,
sp.igst,
a.vno,
b.MinQty as shelf_min,
b.MaxQty as shelf_max
from
fifo a with (nolock)
right join item b with (nolock) on
a.itemc = b.code
left join Acm a2 with (nolock) on
a2.code = a.Acno
and a2.Slcd in ('SL', 'GL')
left join Master m with (nolock) on
b.ItemCat = m.code
left join Salt s with (nolock) on
b.Saltcode = s.Code
left join SalePurchase2 sp with (nolock) on
a.Pbillno = sp.Pbillno
and a.Psrlno = sp.Psrlno
and a.Itemc = sp.Itemc
and sp.Vdt = a.Vdt
where
b.code > 0
and a.Psrlno in (
select
Psrlno
from
SalePurchase2)
and b.Barcode not like '%[^0-9]%') c
Where c."vno"!=0 and (c."balance_quantity"+c."locked_quantity")>0
"""
logger.info("getting data from esdata3 tables")
tepl = pd.read_sql(q3, mssql3_connection)
logger.info("Data pulled from esdata3 tables")
df1 = pd.concat([bhw, gaw,tepl])
# getting safety stock data
doi_query = """
select
doi."store-id" as "wh_id",
doi."drug-id" as "drug_id",
doi."safe-stock" as "reorder_point",
doi.min as "safety_stock",
doi.max as "order_upto_point",
'NA' as bucket,
'NA' as history_bucket,
'NA' as category
from
"prod2-generico"."prod2-generico"."drug-order-info" doi
left join "prod2-generico"."prod2-generico".drugs d on
doi."drug-id" = d.id
where
"store-id" in (199, 343, 342)
and d."company-id" != 6984
union
select
doid."store-id" as "wh_id",
doid."drug-id" as "drug_id",
doid."safe-stock" as "reorder_point",
doid.min as "safety_stock",
doid.max as "order_upto_point",
'NA' as bucket,
'NA' as history_bucket,
'NA' as category
from
"prod2-generico"."prod2-generico"."drug-order-info-data" doid
left join "prod2-generico"."prod2-generico".drugs d on
doid."drug-id" = d.id
where
doid."store-id" in (199, 343, 342)
and d."company-id" = 6984
"""
logger.info("Getting data from RS")
doi_data = rs_db.get_df(doi_query)
logger.info("Data pulled from RS")
# doi_data.columns = doi_data.columns.str.decode("utf-8")
doi_data.columns = [c.replace('-', '_') for c in doi_data.columns]
wh_portfolio_query = """
select
"drug-id"
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
wssm."drug-id" = d.id
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products', 'banned')
"""
wh_portfolio = rs_db.get_df(wh_portfolio_query)
wh_portfolio.columns = [c.replace('-', '_') for c in wh_portfolio.columns]
wh_portfolio["in_wh_portfolio"] = 1
wh_portfolio['drug_id'] = wh_portfolio['drug_id'].astype(str)
wh_inventory = df1.merge(wh_portfolio, on="drug_id", how='outer')
wh_inventory['in_wh_portfolio'] = wh_inventory['in_wh_portfolio'].fillna(0).astype(int)
wh_inventory['wh_id']=wh_inventory['wh_id'].fillna(199)
wh_inventory[['wms_drug_code','distributor_id','wms_distributor_code','company_code','vno']]=\
wh_inventory[['wms_drug_code','distributor_id','wms_distributor_code','company_code','vno']].fillna(-1)
# merging two data sets
doi_data['drug_id'] = doi_data['drug_id'].astype(str)
wh_inventory = wh_inventory.merge(doi_data, on=['drug_id', 'wh_id'], how='left')
wh_inventory[['bucket','history_bucket','category']].fillna('NA', inplace=True)
wh_inventory.dtypes
wh_inventory[['safety_stock','reorder_point','order_upto_point','shelf_min',
'shelf_max','invoice_qty'
]].fillna(0, inplace=True)
wh_inventory['snapshot_date'] = cur_date.strftime("%Y-%m-%d %H:%M:%S")
wh_inventory['created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
"%Y-%m-%d %H:%M:%S")
wh_inventory['created-by'] = 'etl-automation'
wh_inventory['updated-by'] = 'etl-automation'
wh_inventory['updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
"%Y-%m-%d %H:%M:%S")
wh_inventory[['total_quantity', 'balance_quantity', 'locked_quantity', 'total_value',
'balance_value', 'locked_value','safety_stock', 'reorder_point', 'order_upto_point', 'shelf_min',
'shelf_max', 'invoice_qty','godown_qty','store_qty','srate','purchase_serial_no'
]]=wh_inventory[['total_quantity', 'balance_quantity', 'locked_quantity', 'total_value',
'balance_value', 'locked_value','safety_stock', 'reorder_point', 'order_upto_point', 'shelf_min',
'shelf_max', 'invoice_qty','godown_qty','store_qty','srate','purchase_serial_no'
]].fillna(0)
wh_inventory.dtypes
# Writing data
wh_inventory[['wh_id', 'wms_drug_code','drug_id','wms_distributor_code',
'company_code','purchase_serial_no',
'safety_stock','reorder_point',
'order_upto_point',
'total_quantity','balance_quantity',
'locked_quantity',
'godown_qty',
'store_qty','invoice_qty','shelf_min','shelf_max','vno' ]] = \
wh_inventory[['wh_id', 'wms_drug_code','drug_id','wms_distributor_code',
'company_code','purchase_serial_no','safety_stock','reorder_point',
'order_upto_point',
'total_quantity','balance_quantity',
'locked_quantity',
'godown_qty',
'store_qty','invoice_qty','shelf_min','shelf_max','vno' ]].astype(int)
wh_inventory.columns = [c.replace('_', '-') for c in wh_inventory.columns]
wh_inventory = wh_inventory[
['wms-drug-code', 'drug-id', 'distributor-id', 'distributor-name',
'wms-distributor-code', 'purchase-date', 'drug-name', 'srate',
'total-quantity', 'balance-quantity', 'locked-quantity', 'total-value',
'balance-value', 'locked-value', 'expiry', 'company-name',
'company-code', 'pack', 'purchase-rate', 'purchase-bill-no',
'purchase-serial-no', 'batch-number', 'mrp', 'prate', 'drug-type',
'composition', 'bucket', 'history-bucket', 'category', 'safety-stock',
'reorder-point', 'order-upto-point', 'shelf-min', 'shelf-max',
'snapshot-date', 'godown-qty', 'store-qty', 'invoice-net-amt',
'invoice-tax-amt', 'invoice-dis-amt', 'invoice-qty', 'cgst', 'sgst',
'igst', 'vno', 'created-at',
'created-by', 'updated-at', 'updated-by', 'in-wh-portfolio', 'wh-id']]
s3 = S3()
logger.info("Writing data to wh-inventory-ss")
schema = "prod2-generico"
table_name = "wh-inventory-ss"
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
s3.write_df_to_db(df=wh_inventory[table_info['column_name']], table_name=table_name,
db=rs_db, schema='prod2-generico')
logger.info("wh-inventory-ss table updated")
status = True
except Exception as e:
err_msg = str(e)
logger.info('wms_inventory job failed')
logger.exception(e)
finally:
rs_db.close_connection()
mssql.close_connection()
mssql2.close_connection()
mssql3.close_connection()
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"wms_inventory ({env}): {result}",
mail_body=f"Run time: {cur_date}",
to_emails=email_to, file_uris=[])
else:
result = 'Failed'
email.send_email_file(subject=f"wms_inventory ({env}): {result}",
mail_body=f"Run time: {cur_date} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wms_inventory.py | wms_inventory.py |
import os
import sys
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.helper.parameter.job_parameter import parameter
#tag = parameters
env = "dev"
os.environ['env'] = env
job_params = parameter.get_params(job_id=125)
email_to = job_params['email_to']
logger = get_logger()
logger.info("Scripts begins. Env = " + env)
status = False
err_msg = ''
df_uri = ''
run_date = str(datetime.now().strftime("%Y-%m-%d"))
drugs_not_in_doi = 0
drugs_missed = 0
drugs_updated = 0
try:
rs_db = DB()
rs_db.open_connection()
# read inputs file to get parameters
logger.info('reading input file to get parameters')
params_table_query = """
select
"param-name" as param,
value
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" not in ('drug_lvl_fcst_inputs' , 's_and_op_factors')
"""
logger.info('input parameters read')
params_table = rs_db.get_df(params_table_query)
params_table = params_table.apply(pd.to_numeric, errors='ignore')
days = int(params_table.where(params_table['param'] == 'days',
axis=0).dropna()['value'])
expected_new_stores = int(params_table.where(
params_table['param'] == 'expected_new_stores',
axis=0).dropna()['value'])
wh_id = int(params_table.where(params_table['param'] == 'gaw_id',
axis=0).dropna()['value'])
revenue_min = int(params_table.where(
params_table['param'] == 'revenue_min', axis=0).dropna()['value'])
revenue_max = int(params_table.where(
params_table['param'] == 'revenue_max', axis=0).dropna()['value'])
# get active gaid drugs list
drugs_query = '''
select
wssm."drug-id" as drug_id,
d.composition,
d."drug-name" as drug_name,
d.company,
d."type",
d.category
from
"prod2-generico"."wh-sku-subs-master" wssm
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" not in ('discontinued-products')
and d.company = 'GOODAID'
'''
drugs = rs_db.get_df(drugs_query)
logger.info('active drugs list pulled from wssm')
# get 28 days sales for active gaid drugs
drug_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as drug_sales_quantity
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
group by
"drug-id"
'''.format(days=days, drug_ids=tuple(drugs['drug_id']))
drug_sales = rs_db.get_df(drug_sales_query)
logger.info('drug sales data pulled from rs')
drug_sales['drug_sales_quantity'] = drug_sales[
'drug_sales_quantity'] * 28 / days
# get non-ethical composition level sale
composition_sales_query = '''
select
composition as composition,
sum(quantity) as composition_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - {days}
and date("created-at") < current_date
and "type" <> 'ethical'
group by
composition
'''.format(days=days, compositions=tuple(drugs['composition']))
composition_sales = rs_db.get_df(composition_sales_query)
logger.info('composition data pulled from rs')
composition_sales['composition_sales_quantity'] = composition_sales[
'composition_sales_quantity'] * 28 / days
# merging data
main_df = drugs.merge(drug_sales, on='drug_id', how='left')
main_df['drug_sales_quantity'].fillna(0, inplace=True)
main_df = main_df.merge(composition_sales, on='composition',
how='left')
main_df['composition_sales_quantity'].fillna(0, inplace=True)
# getting 50% of composition level sales
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity'] * 0.5
main_df['composition_sales_quantity_50%'] = main_df[
'composition_sales_quantity_50%'].round(0)
# calculate month-on-month sales growth
# getting last-to-last 28 day sales for calcuating growth factor
last_to_last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_to_last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 56
and date("created-at") < current_date - 28
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_to_last_sales = rs_db.get_df(last_to_last_sales_query)
logger.info('last-to-last 28 day sales data pulled from rs')
# getting last 28 day sales
last_sales_query = '''
select
"drug-id" as drug_id,
sum(quantity) as last_28_day_sales
from
"prod2-generico".sales
where
"drug-id" in {drug_ids}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"drug-id"
'''.format(drug_ids=tuple(drugs['drug_id']))
last_sales = rs_db.get_df(last_sales_query)
logger.info('last 28 day sales data pulled from rs')
# merging to main_df
main_df = main_df.merge(last_to_last_sales, on='drug_id', how='left')
main_df['last_to_last_28_day_sales'].fillna(0, inplace=True)
main_df = main_df.merge(last_sales, on='drug_id', how='left')
main_df['last_28_day_sales'].fillna(0, inplace=True)
main_df['growth_factor'] = main_df['last_28_day_sales'] / main_df[
'last_to_last_28_day_sales']
main_df['growth_factor'].fillna(1, inplace=True)
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] == np.inf, 1,
main_df['growth_factor'])
# growth factor capped at 150% - min at 100%
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] > 1.5, 1.5,
main_df['growth_factor'])
main_df['growth_factor'] = np.where(main_df[
'growth_factor'] < 1, 1,
main_df['growth_factor'])
# growth factor foreced to 1 when 50% comp sales > drug sales
main_df['growth_factor'] = np.where(main_df[
'composition_sales_quantity_50%'] >
main_df[
'drug_sales_quantity'], 1,
main_df['growth_factor'])
# get s&op factor
logger.info('reading s&op factors table')
input_table_query = """
select
"drug-id" as drug_id,
value as s_op_factor,
"start-date" as start_date,
"end-date" as end_date
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" = 's_and_op_factors'
"""
s_op_table = rs_db.get_df(input_table_query)
logger.info('s&op factors table read')
s_op_table = s_op_table.apply(pd.to_numeric, errors='ignore')
s_op_table = s_op_table[
s_op_table['start_date'] <= datetime.now().date()]
s_op_table = s_op_table[
s_op_table['end_date'] >= datetime.now().date()]
s_op_table.drop('start_date', axis=1, inplace=True)
s_op_table.drop('end_date', axis=1, inplace=True)
main_df = main_df.merge(s_op_table, on='drug_id', how='left')
main_df['s_op_factor'].fillna(1, inplace=True)
# get avg gaid sales for 13-16 lakh revenue stores
# getting stores lists to compare with
stores_cmp_query = '''
select
"store-id" as store_id,
round(sum("revenue-value")) as revenue
from
"prod2-generico".sales
where
date("created-at") >= current_date - 28
and date("created-at") < current_date
group by
"store-id"
'''
stores_cmp = rs_db.get_df(stores_cmp_query)
stores_cmp = stores_cmp[stores_cmp['revenue'] > revenue_min]
stores_cmp = stores_cmp[stores_cmp['revenue'] < revenue_max]
stores_list_to_comp = tuple(stores_cmp['store_id'])
logger.info('list of stores with revenue between 1.3 and 1.6 mil -->'
+ str(stores_list_to_comp))
# adding expected_new_stores column
main_df['expected_new_stores'] = expected_new_stores
# getting avg sales
avg_store_sales_query = '''
select
composition ,
sum(quantity)/ {count} as avg_drug_sales_quantity
from
"prod2-generico".sales
where
composition in {compositions}
and date("created-at") >= current_date - 28
and date("created-at") < current_date
and "type" <> 'ethical'
and "store-id" in {stores_list_to_comp}
group by
composition
'''.format(compositions=tuple(drugs['composition']), \
stores_list_to_comp=stores_list_to_comp, \
count=len(stores_list_to_comp))
avg_store_sales = rs_db.get_df(avg_store_sales_query)
logger.info('avg composition sales retrieved for sample stores')
avg_store_sales['avg_drug_sales_quantity'] = avg_store_sales[
'avg_drug_sales_quantity'].round()
# merge to main_df
main_df = main_df.merge(avg_store_sales, on='composition', how='left')
main_df['avg_drug_sales_quantity'].fillna(0, inplace=True)
# get final forecast figures
main_df['forecast'] = main_df[[
'drug_sales_quantity',
'composition_sales_quantity_50%']].max(axis=1)
main_df['forecast'] = main_df['forecast'] * main_df['growth_factor'] * \
main_df['s_op_factor'] + main_df[
'expected_new_stores'] * \
main_df['avg_drug_sales_quantity']
main_df['forecast'] = main_df['forecast'].round()
# get input table and merge with main_df
logger.info('reading input table')
input_table_query = """
select
"drug-id" as drug_id,
lead_time_doh,
safety_stock_doh,
review_period
from
"prod2-generico"."wh-goodaid-forecast-input"
where
"param-name" = 'drug_lvl_fcst_inputs'
"""
input_table = rs_db.get_df(input_table_query)
logger.info('input table read')
input_table = input_table.apply(pd.to_numeric, errors='ignore')
input_table['reorder_point_doh'] = input_table['lead_time_doh'] + \
input_table['safety_stock_doh']
input_table['min_doh'] = input_table['safety_stock_doh']
input_table['order_upto_point_doh'] = input_table['lead_time_doh'] + \
input_table['safety_stock_doh'] + \
input_table['review_period']
main_df = main_df.merge(input_table, on='drug_id', how='left')
# populating missing rows with defaults
main_df['lead_time_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'lead_time_doh'].item(), inplace=True)
main_df['safety_stock_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'safety_stock_doh'].item(), inplace=True)
main_df['review_period'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'review_period'].item(), inplace=True)
main_df['reorder_point_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'reorder_point_doh'].item(), inplace=True)
main_df['min_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'min_doh'].item(), inplace=True)
main_df['order_upto_point_doh'].fillna(
input_table.loc[input_table['drug_id'] == 0,
'order_upto_point_doh'].item(), inplace=True)
# calculate ss min max
main_df['safety_stock'] = (main_df['forecast'] / 28 *
main_df['safety_stock_doh']).round()
main_df['reorder_point'] = (main_df['forecast'] / 28 *
main_df['reorder_point_doh']).round()
main_df['order_upto_point'] = (main_df['forecast'] / 28 *
main_df['order_upto_point_doh']).round()
# get table structure to write to
to_upload_query = '''
select
*
from
"prod2-generico"."wh-safety-stock"
limit 1
'''
to_upload = rs_db.get_df(to_upload_query)
to_upload.columns = [c.replace('-', '_') for c in to_upload.columns]
to_upload.drop(0, axis=0, inplace=True)
to_upload['drug_id'] = main_df['drug_id']
to_upload['drug_name'] = main_df['drug_name']
to_upload['type'] = main_df['type']
to_upload['category'] = main_df['category']
to_upload['company'] = main_df['company']
# to_upload['bucket'] = main_df['bucket']
to_upload['fcst'] = main_df['forecast'].astype(int, errors='ignore')
to_upload['wh_id'] = wh_id
to_upload['forecast_type'] = 'goodaid'
to_upload['lead_time_mean'] = main_df['lead_time_doh']
to_upload['max_review_period'] = main_df['review_period'].astype(int, errors='ignore')
to_upload['demand_daily'] = main_df['forecast'] / 28
to_upload['safety_stock'] = main_df['safety_stock'].astype(int, errors='ignore')
to_upload['expected_nso'] = expected_new_stores
to_upload['reorder_point'] = main_df['reorder_point'].astype(int, errors='ignore')
to_upload['order_upto_point'] = main_df['order_upto_point'].astype(int, errors='ignore')
to_upload['last_month_sales'] = main_df['drug_sales_quantity'].astype(int, errors='ignore')
to_upload['safety_stock_days'] = main_df['safety_stock_doh']
to_upload['reorder_point_days'] = main_df['reorder_point_doh']
to_upload['order_upto_days'] = main_df['order_upto_point_doh']
to_upload['reset_date'] = run_date
to_upload['month'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%m"))
to_upload['year'] = str(datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y"))
to_upload['month_begin_dt'] = str(
datetime.now(tz=gettz('Asia/Kolkata')).date() - timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
to_upload['created_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['created_by'] = 'etl-automation'
to_upload['updated_at'] = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
to_upload['updated_by'] = 'etl-automation'
to_upload = to_upload.fillna('')
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
s3.write_df_to_db(df=to_upload, table_name='wh-safety-stock',
db=rs_db_write, schema='prod2-generico')
logger.info("wh-safety-stock table updated")
# WRITING ATTACHMENTS FOR SUCCESS
df_uri = s3.save_df_to_s3(df=main_df,
file_name='GAW_goodaid_forecast_{date}.csv'.format(date=str(run_date)))
# writing to doid
logger.info('writing to doid for ' +
str(int(to_upload[['drug_id']].nunique())) + ' drugs')
ss_data_upload = to_upload.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
type_list = tuple(drugs['type'].unique())
ss_data_upload = ss_data_upload.astype(float)
new_drug_entries, missed_entries = doid_update(
ss_data_upload, type_list, rs_db, 'prod2-generico', logger, gaid_omit=False)
rs_db.connection.close()
drugs_not_in_doi = len(new_drug_entries)
drugs_missed = len(missed_entries)
drugs_updated = len(ss_data_upload) - len(missed_entries) - len(new_drug_entries)
rs_db.close_connection()
rs_db_write.close_connection()
status = True
except Exception as e:
err_msg = str(e)
logger.info('wh_goodaid_forecast_343 job failed')
logger.exception(e)
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject=f"GOODAID Warehouse forecast ({env}): {result}",
mail_body=f"""
drugs updated successfully --> {drugs_updated}
drugs not updated --> {drugs_missed}
drugs not in doid --> {drugs_not_in_doi}
""",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject=f"GOODAID Warehouse forecast ({env}): {result}",
mail_body=f"Run time: {datetime.now(tz=gettz('Asia/Kolkata'))} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended")
"""
DDL
create table "prod2-generico"."wh-goodaid-forecast-input" (
"param-name" text ENCODE lzo,
value text ENCODE lzo,
"drug-id" text ENCODE lzo,
"lead_time_doh" int8 ENCODE az64,
"safety_stock_doh" int8 ENCODE az64,
"review_period" int8 ENCODE az64,
"start-date" date ENCODE az64,
"end-date" date ENCODE az64,
description text ENCODE lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-goodaid-forecast-input" owner to "admin";
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wh_goodaid_forecast_343.py | wh_goodaid_forecast_343.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from calendar import monthrange
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MongoDB, MSSql
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger, send_logs_via_email
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.warehouse.data_prep.wh_data_prep \
import get_launch_stock_per_store
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="To generate distributor level forecast at warehouse for the next month.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]",
type=str, required=False)
parser.add_argument('-nso', '--nso_history_days', default=90, type=int,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
nso_history_days = args.nso_history_days
logger = get_logger()
logger.info("Script begins")
status = False
err_msg = ''
df_uri = ''
run_date = str(datetime.now().date())
current_month_date = (
datetime.now().date() -
timedelta(days=datetime.now().day - 1))
next_month_date = datetime(current_month_date.year + \
int(current_month_date.month / 12), \
((current_month_date.month % 12) + 1), 1).date()
num_days = monthrange(next_month_date.year, next_month_date.month)[1]
try:
df = pd.DataFrame()
rs_db = DB()
rs_db.open_connection()
# MSSql connection
mssql = MSSql(connect_via_tunnel=False)
mssql_connection = mssql.open_connection()
q1 = """
select
b.Barcode as drug_id,
sum(case when Vno < 0 then 0 else coalesce(a.bqty, 0) end) as balance_quantity,
sum(case when Vno > 0 then 0 else coalesce(a.Tqty, 0) end) as locked_quantity
from
fifo a
right join item b on
a.itemc = b.code
where
b.code > 0
and b.Barcode not like '%[^0-9]%'
and a.Psrlno in (
select
Psrlno
from
SalePurchase2 sp
where
Vtype = 'PB')
and a.TQty > 0
group by
b.Barcode,
b.name
"""
wh_inventory = pd.read_sql(q1, mssql_connection)
logger.info("data pulled from RS")
wh_inventory['drug_id'] = pd.to_numeric(wh_inventory['drug_id'])
wh_inventory = wh_inventory.astype(int, errors='ignore')
wh_inventory['total_quantity'] = wh_inventory['balance_quantity'] + wh_inventory['locked_quantity']
# get wh portfolio
drugs_list = rs_db.get_df(
'''
select
wssm."drug-id" as drug_id,
d."drug-name" as drug_name,
f.fcst,
f.ss,
f.rop,
f.oup
from
"prod2-generico"."wh-sku-subs-master" wssm
left join (
select
"drug-id",
fcst,
"safety-stock" as ss,
"reorder-point" as rop,
"order-upto-point" as oup
from
"prod2-generico"."wh-safety-stock" wss
where
"forecast-date" = (
select
max("forecast-date")
from
"prod2-generico"."wh-safety-stock")) f on
f."drug-id" = wssm."drug-id"
left join "prod2-generico".drugs d on
d.id = wssm."drug-id"
where
wssm."add-wh" = 'Yes'
and d."type" <> 'discontinued-products'
and d.company <> 'GOODAID'
''')
drugs_list.fillna(0, inplace=True)
drugs_list = drugs_list.astype(int, errors='ignore')
# getting params
logger.info('reading input file to get expected_nso')
params_table_raw = """
select
"month-begin-dt" as month_begin_dt,
value as expected_nso
from
"prod2-generico"."wh-forecast-repln-input"
where
"param-name" = 'expected_nso'
"""
params_table = rs_db.get_df(params_table_raw)
params_table = params_table.apply(pd.to_numeric, errors='ignore')
try:
expected_nso = int(params_table[
params_table['month_begin_dt'] == next_month_date]['expected_nso'])
except Exception as error:
expected_nso = 0
logger.info('expected_nso parameter read --> ' + str(expected_nso))
logger.info('nso_history_days --> ' + str(nso_history_days))
# getting launch stock per store
launch_stock_per_store = get_launch_stock_per_store(rs_db, nso_history_days)
logger.info('launch stock per store pulled')
drugs_list = drugs_list.merge(launch_stock_per_store, on='drug_id', how='left')
drugs_list['launch_stock_per_store'].fillna(0, inplace=True)
drugs_list['fcst'] += drugs_list['launch_stock_per_store'] * expected_nso
drugs_list['fcst'] = drugs_list['fcst'].round().astype(int)
del drugs_list['launch_stock_per_store']
df = drugs_list.copy()
df = df.merge(wh_inventory, on='drug_id', how='left')
df['below_rop'] = np.where(df['total_quantity'] <= df['rop'], True, False)
df.loc[df['below_rop'] == False, 'purchase_quantity'] = np.ceil(
(df['fcst'] - (df['total_quantity'] - df['rop'])) / (
df['oup'] - df['rop']) + 1) * (df['oup'] - df['rop'])
df.loc[df['below_rop'] == True, 'purchase_quantity'] = np.ceil(
df['oup'] - (df['total_quantity'] - 4 * df['fcst'] / num_days)) + (
df['oup'] - df['rop']) * np.ceil(
(df['fcst'] - np.ceil(df['oup'] - (
df['total_quantity'] - 4 * df['fcst'] / num_days))) / (
df['oup'] - df['rop']) + 1)
df['purchase_quantity'].fillna(0, inplace=True)
df.loc[df['purchase_quantity'] <= 0, 'purchase_quantity'] = 0
del df['below_rop']
df['purchase_quantity'] = df['purchase_quantity'].astype(int)
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
db = mg_client['generico-crm']
collection = db["wmsDrugDistributorMappingV2"].find(
{
"is_active" : "true"
},
{
"drug_id": "$drug_id",
"rank1": "$rank1",
"rank1_name": "$rank1_name"
}
)
dist_list = pd.DataFrame(list(collection))
s3 = S3()
df_uri = s3.save_df_to_s3(df=df, file_name='wh_dist_pur_fcst_{date}.csv'.format(date=str(next_month_date)))
status = True
except Exception as error:
err_msg = str(error)
logger.exception(str(error))
# Sending email
email = Email()
if status:
result = 'Success'
email.send_email_file(subject='''Warehouse distributor M+1 purchase forecast for {date} ({env}): {result}
'''.format(date=str(next_month_date), env=env, result=result),
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject='''Warehouse distributor M+1 purchase forecast for {date} ({env}): {result}
'''.format(date=str(next_month_date), env=env, result=result),
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wh_dist_pur_fcst.py | wh_dist_pur_fcst.py |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 21 21:45:59 2022
@author: [email protected]
@Purpose: To generate forecast and replenishment figures for Warehouse
"""
import os
import sys
import pandas as pd
from datetime import datetime, timedelta
from scipy.stats import norm
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.utils.warehouse.data_prep.wh_data_prep import wh_data_prep
from zeno_etl_libs.utils.warehouse.forecast.forecast_main import wh_forecast
from zeno_etl_libs.utils.warehouse.safety_stock.wh_safety_stock import \
wh_safety_stock_calc
from zeno_etl_libs.utils.ipc.doid_update_ss import doid_update
from zeno_etl_libs.helper.parameter.job_parameter import parameter
#tag = parameters
env = "dev"
os.environ['env'] = env
# runtime variables
job_params = parameter.get_params(job_id=117)
ss_runtime_var = {'lead_time_mean': job_params['lead_time_mean'],
'lead_time_std': job_params['lead_time_std'],
'service_level': job_params['service_level'],
'ordering_freq': job_params['ordering_freq'],
'max_review_period': job_params['max_review_period'],
'z': round(norm.ppf(job_params['service_level']), 2),
'for_next_month': job_params['for_next_month'],
'cap_ss_days': job_params['cap_ss_days'],
'use_fcst_error': job_params['use_fcst_error'],
'fcst_hist_to_use': job_params['fcst_hist_to_use'],
'debug_mode': job_params['debug_mode'],
'simulate_for': job_params['simulate_for']}
email_to = job_params['email_to']
debug_mode = job_params['debug_mode']
simulate_for = job_params['simulate_for']
err_msg = ''
df_uri = ''
schema = job_params['schema']
reset = job_params['reset']
wh_id = job_params['wh_id']
nso_history_days = job_params['nso_history_days']
status = False
logger = get_logger()
logger.info("Scripts begins")
logger.info("Run time variables --> " + str(ss_runtime_var))
# getting run date for the script
if debug_mode == 'Y' and simulate_for != '':
reset_date = simulate_for
current_month_date = (pd.to_datetime(simulate_for).date() - timedelta(days=pd.to_datetime(simulate_for).day - 1))
else:
reset_date = str(datetime.now(tz=gettz('Asia/Kolkata')).date())
current_month_date = (datetime.now(tz=gettz('Asia/Kolkata')).date() -
timedelta(days=datetime.now(tz=gettz('Asia/Kolkata')).day - 1))
if ss_runtime_var['for_next_month'] == 'Y':
forecast_date = str(
datetime(current_month_date.year +
int(current_month_date.month / 12),
((current_month_date.month % 12) + 1), 1).date())
else:
forecast_date = str(current_month_date)
logger.info(f"""
debug_mode --> {debug_mode}
reset_date --> {reset_date},
current_month_date --> {current_month_date},
forecast_date --> {forecast_date}
""")
try:
rs_db = DB()
rs_db.open_connection()
logger.info('reading input file to get expected_nso')
params_table_query = """
select
"month-begin-dt" as month_begin_dt,
value as expected_nso
from
"prod2-generico"."wh-forecast-repln-input"
where
"param-name" = 'expected_nso'
"""
params_table = rs_db.get_df(params_table_query)
logger.info('expected_nso parameter read')
params_table = params_table.apply(pd.to_numeric, errors='ignore')
params_table['month_begin_dt'] = params_table['month_begin_dt'].astype(str)
try:
expected_nso = int(params_table[
params_table[
'month_begin_dt'] == forecast_date][
'expected_nso'])
except Exception as error:
expected_nso = 0
logger.info(f"expected_nso --> {expected_nso}")
store_query = '''
select
"id",
name,
"opened-at" as opened_at
from
"prod2-generico".stores
where
"name" <> 'Zippin Central'
and "is-active" = 1
and "opened-at" != '0101-01-01 00:00:00'
and id not in (92, 52)
'''
stores = rs_db.get_df(store_query)
store_id_list = list(stores['id'])
new_drug_entries = pd.DataFrame()
missed_entries = pd.DataFrame()
# CONSIDERING DRUG TYPES FOR DATA LOAD
type_list = rs_db.get_df(
'select distinct type from "prod2-generico".drugs')
type_list = tuple(type_list[
~type_list.type.isin(
['', 'banned', 'discontinued-products'])][
'type'])
# RUNNING DATA PREPARATION
drug_sales_monthly, wh_drug_list, drug_history, demand_daily_deviation = wh_data_prep(
store_id_list, current_month_date, reset_date, type_list, rs_db, logger,
ss_runtime_var, schema)
drug_sales_monthly['drug_id'] = drug_sales_monthly['drug_id'].astype(int, errors='ignore')
drug_sales_monthly['year'] = drug_sales_monthly['year'].astype(int, errors='ignore')
drug_sales_monthly['month'] = drug_sales_monthly['month'].astype(int, errors='ignore')
drug_sales_monthly['net_sales_quantity'] = drug_sales_monthly['net_sales_quantity'].astype(int, errors='ignore')
drug_history = drug_history.astype(int, errors='ignore')
drug_sales_monthly['reset_date'] = reset_date
# FORECASTING
train, train_error, predict, wh_train, wh_train_error, wh_predict = wh_forecast(
drug_sales_monthly, wh_drug_list, drug_history, logger)
train['wh_id'] = wh_id
train_error['wh_id'] = wh_id
predict['wh_id'] = wh_id
wh_train['wh_id'] = wh_id
wh_train_error['wh_id'] = wh_id
wh_predict['wh_id'] = wh_id
train['forecast_date'] = forecast_date
train_error['forecast_date'] = forecast_date
predict['forecast_date'] = forecast_date
wh_train['forecast_date'] = forecast_date
wh_train_error['forecast_date'] = forecast_date
wh_predict['forecast_date'] = forecast_date
# SAFETY STOCK CALCULATIONS
last_actual_month = drug_sales_monthly['month_begin_dt'].max()
last_month_sales = drug_sales_monthly[
drug_sales_monthly['month_begin_dt'] == str(last_actual_month)]
last_month_sales = last_month_sales[['drug_id', 'net_sales_quantity']]
last_month_sales.rename(
columns={'net_sales_quantity': 'last_month_sales'}, inplace=True)
wh_safety_stock_df = wh_safety_stock_calc(
ss_runtime_var, wh_drug_list, wh_predict, last_month_sales, demand_daily_deviation, current_month_date,
forecast_date, reset_date, logger, expected_nso, nso_history_days, rs_db)
wh_safety_stock_df['wh_id'] = wh_id
wh_safety_stock_df['reset_date'] = str(reset_date)
rs_db.close_connection()
# WRITING TO POSTGRES
s3 = S3()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
created_at = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
wh_safety_stock_df['ptr'] = ''
wh_safety_stock_df['fcst'] = wh_safety_stock_df['fcst'].fillna(0).astype(int)
wh_safety_stock_df['safety_stock'] = wh_safety_stock_df['safety_stock'].fillna(0).astype(int)
wh_safety_stock_df['month'] = wh_safety_stock_df['month'].astype(int)
wh_safety_stock_df['year'] = wh_safety_stock_df['year'].astype(int)
wh_safety_stock_df['ss_wo_cap'] = wh_safety_stock_df['ss_wo_cap'].fillna(0).astype(int)
wh_safety_stock_df['reorder_point'] = wh_safety_stock_df['reorder_point'].fillna(0).astype(int)
wh_safety_stock_df['order_upto_point'] = wh_safety_stock_df['order_upto_point'].fillna(0).astype(int)
wh_safety_stock_df['shelf_min'] = wh_safety_stock_df['shelf_min'].fillna(0).astype(int)
wh_safety_stock_df['shelf_max'] = wh_safety_stock_df['shelf_max'].fillna(0).astype(int)
wh_safety_stock_df['rop_without_nso'] = wh_safety_stock_df['rop_without_nso'].fillna(0).astype(int)
wh_safety_stock_df['oup_without_nso'] = wh_safety_stock_df['oup_without_nso'].fillna(0).astype(int)
wh_safety_stock_df['created_at'] = created_at
wh_safety_stock_df['created_by'] = 'etl-automation'
wh_safety_stock_df['updated_at'] = created_at
wh_safety_stock_df['updated_by'] = 'etl-automation'
columns = [c.replace('-', '_') for c in ['drug-id', 'drug-name', 'type', 'category', 'company', 'ptr', 'bucket',
'history-bucket', 'fcst', 'final-fcst', 'forecast-type', 'model',
'month', 'month-begin-dt', 'std', 'year', 'wh-id', 'forecast-date',
'lead-time-mean', 'lead-time-std', 'max-review-period',
'ordering-freq',
'service-level', 'z-value', 'demand-daily', 'demand-daily-deviation',
'safety-stock', 'launch-stock-per-store', 'expected-nso',
'rop-without-nso', 'reorder-point', 'oup-without-nso',
'order-upto-point', 'shelf-min', 'shelf-max', 'last-month-sales',
'safety-stock-days',
'reorder-point-days', 'order-upto-days', 'reset-date', 'created-at',
'created-by', 'updated-at', 'updated-by', 'cap_ss_days', 'ss_wo_cap']]
wh_safety_stock_df = wh_safety_stock_df[columns]
if debug_mode == 'N':
# drug_sales_monthly
drug_sales_monthly['created-at'] = created_at
drug_sales_monthly['created-by'] = 'etl-automation'
drug_sales_monthly['updated-at'] = created_at
drug_sales_monthly['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=drug_sales_monthly, table_name='wh-drug-sales-monthly', db=rs_db_write,
schema='prod2-generico')
# train
train['type'] = 'separate'
train['created-at'] = created_at
train['created-by'] = 'etl-automation'
train['updated-at'] = created_at
train['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=train, table_name='wh-train', db=rs_db_write, schema='prod2-generico')
# wh_train
wh_train['type'] = 'ensemble'
wh_train['created-at'] = created_at
wh_train['created-by'] = 'etl-automation'
wh_train['updated-at'] = created_at
wh_train['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=wh_train, table_name='wh-train', db=rs_db_write, schema='prod2-generico')
# train_error
train_error['type'] = 'separate'
train_error['created-at'] = created_at
train_error['created-by'] = 'etl-automation'
train_error['updated-at'] = created_at
train_error['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=train_error, table_name='wh-train-error', db=rs_db_write, schema='prod2-generico')
# wh_train_error
wh_train_error['type'] = 'ensemble'
wh_train_error['created-at'] = created_at
wh_train_error['created-by'] = 'etl-automation'
wh_train_error['updated-at'] = created_at
wh_train_error['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=wh_train_error[train_error.columns], table_name='wh-train-error', db=rs_db_write,
schema='prod2-generico')
# predict
predict['type'] = 'separate'
predict['created-at'] = created_at
predict['created-by'] = 'etl-automation'
predict['updated-at'] = created_at
predict['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=predict, table_name='wh-predict', db=rs_db_write, schema='prod2-generico')
# wh_predict
wh_predict['type'] = 'ensemble'
wh_predict['created-at'] = created_at
wh_predict['created-by'] = 'etl-automation'
wh_predict['updated-at'] = created_at
wh_predict['updated-by'] = 'etl-automation'
s3.write_df_to_db(df=wh_predict, table_name='wh-predict', db=rs_db_write, schema='prod2-generico')
# wh_safety_stock_df
s3.write_df_to_db(df=wh_safety_stock_df, table_name='wh-safety-stock', db=rs_db_write,
schema='prod2-generico')
if reset == 'Y':
# UPLOADING SAFETY STOCK NUMBERS IN DRUG-ORDER-INFO
ss_data_upload = wh_safety_stock_df.query('order_upto_point > 0')[
['wh_id', 'drug_id', 'safety_stock', 'reorder_point',
'order_upto_point']]
ss_data_upload.columns = [
'store_id', 'drug_id', 'corr_min', 'corr_ss', 'corr_max']
new_drug_entries, missed_entries = doid_update(
ss_data_upload, type_list, rs_db_write, schema, logger)
logger.info('DOI updated as per request')
logger.info('missed entries --> ' + str(missed_entries))
logger.info('new_drug_entries entries --> ' + str(new_drug_entries))
else:
logger.info('DOID did not update as per request')
rs_db_write.close_connection()
df_uri = s3.save_df_to_s3(df=wh_safety_stock_df,
file_name='wh_safety_stock_{date}.csv'.format(date=str(forecast_date)))
status = True
except Exception as error:
err_msg = str(error)
logger.info(str(error))
raise error
email = Email()
if debug_mode == 'Y':
email_to = '[email protected],[email protected]'
if status:
result = 'Success'
email.send_email_file(subject=f"Warehouse forecast & replenishment ({env}): {result}",
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[df_uri])
else:
result = 'Failed'
email.send_email_file(subject=f"Warehouse forecast & replenishment ({env}): {result}",
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[])
# DDLs for tables
"""
create table "prod2-generico"."wh-forecast-repln-input" (
"param-name" text ENCODE lzo,
"month-begin-dt" date ENCODE az64,
value text ENCODE lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-forecast-repln-input" owner to "admin";
CREATE TABLE "prod2-generico"."wh-safety-stock" (
"drug-id" int8 ENCODE az64,
"drug-name" text ENCODE lzo,
"type" text ENCODE lzo,
category text ENCODE lzo,
company text ENCODE lzo,
ptr float8 ENCODE zstd,
bucket text ENCODE lzo,
"history-bucket" text ENCODE lzo,
fcst float8 ENCODE zstd,
"final-fcst" text ENCODE lzo,
"forecast-type" text ENCODE lzo,
model text ENCODE lzo,
"month" int8 ENCODE az64,
"month-begin-dt" date ENCODE az64,
std float8 ENCODE zstd,
"year" int8 ENCODE az64,
"wh-id" int8 ENCODE az64,
"forecast-date" text ENCODE lzo,
"lead-time-mean" int8 ENCODE az64,
"lead-time-std" int8 ENCODE az64,
"max-review-period" int8 ENCODE az64,
"ordering-freq" int8 ENCODE az64,
"service-level" float8 ENCODE zstd,
"z-value" float8 ENCODE zstd,
"demand-daily" float8 ENCODE zstd,
"demand-daily-deviation" float8 ENCODE zstd,
"safety-stock" float8 ENCODE zstd,
"reorder-point" float8 ENCODE zstd,
"order-upto-point" float8 ENCODE zstd,
"shelf-min" float8 ENCODE zstd,
"shelf-max" float8 ENCODE zstd,
"last-month-sales" float8 ENCODE zstd,
"safety-stock-days" float8 ENCODE zstd,
"reorder-point-days" float8 ENCODE zstd,
"order-upto-days" float8 ENCODE zstd,
"reset-date" text ENCODE lzo,
"uploaded-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"rop-without-nso" int8 ENCODE az64,
"launch-stock-per-store" float8 ENCODE zstd,
"expected-nso" int8 ENCODE az64,
"oup-without-nso" int8 ENCODE az64,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-safety-stock" owner to "admin";
CREATE TABLE "prod2-generico"."wh-drug-sales-monthly" (
"drug-id" int8 encode az64,
"month-begin-dt" timestamp without time zone ENCODE az64,
"year" int8 encode az64,
"month" int8 encode az64,
"net-sales-quantity" float8 encode zstd,
"first-bill-date" timestamp without time zone ENCODE az64,
"bill-month" date ENCODE az64,
"reset-date" text encode lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-drug-sales-monthly" owner to "admin";
create table "prod2-generico"."wh-train" (
"drug-id" int8 encode az64,
"month-begin-dt" text encode lzo,
"year" int8 encode az64,
"month" int8 encode az64,
fcst float8 encode zstd,
std float8 encode zstd,
actual float8 encode zstd,
ape float8 encode zstd,
ae float8 encode zstd,
model text encode lzo,
"history-bucket" text encode lzo,
"hyper-params" text encode lzo,
"forecast-type" text encode lzo,
"final-fcst" text encode lzo,
"wh-id" int8 encode az64,
"forecast-date" text encode lzo,
"type" text encode lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-train" owner to "admin";
CREATE TABLE "prod2-generico"."wh-train-error" (
"drug-id" int8 encode az64,
mae float8 encode zstd,
mape float8 encode zstd,
model text encode lzo,
"history-bucket" text encode lzo,
"forecast-type" text encode lzo,
"final-fcst" text encode lzo,
"wh-id" int8 encode az64,
"forecast-date" text encode lzo,
"type" text encode lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-train-error" owner to "admin";
CREATE TABLE "prod2-generico"."wh-predict" (
"drug-id" int8 encode az64,
"month-begin-dt" text encode lzo,
"year" int8 encode az64,
"month" int8 encode az64,
fcst float8 encode zstd,
std float8 encode zstd,
model text encode lzo,
"history-bucket" text encode lzo,
"forecast-type" text encode lzo,
"final-fcst" text encode lzo,
"wh-id" int8 encode az64,
"forecast-date" text encode lzo,
"type" text encode lzo,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-predict" owner to "admin";
CREATE TABLE "prod2-generico"."wh-safety-stock" (
"drug-id" int8 encode az64,
"drug-name" text encode lzo,
"type" text encode lzo,
category text encode lzo,
company text encode lzo,
ptr float8 encode zstd,
bucket text encode lzo,
"history-bucket" text encode lzo,
fcst int8 encode az64,
"final-fcst" text encode lzo,
"forecast-type" text encode lzo,
model text encode lzo,
"month" int8 encode az64,
"month-begin-dt" date encode az64,
std float8 encode zstd,
"year" int8 encode az64,
"wh-id" int8 encode az64,
"forecast-date" date encode az64,
"lead-time-mean" float8 encode zstd,
"lead-time-std" float8 encode zstd,
"max-review-period" float8 encode zstd,
"ordering-freq" float8 encode zstd,
"service-level" float8 encode zstd,
"z-value" float8 encode zstd,
"demand-daily" float8 encode zstd,
"demand-daily-deviation" float8 encode zstd,
"safety-stock" int8 encode az64,
"launch-stock-per-store" float8 encode zstd,
"expected-nso" float8 encode zstd,
"rop-without-nso" int8 encode az64,
"reorder-point" int8 encode az64,
"oup-without-nso" int8 encode az64,
"order-upto-point" int8 encode az64,
"shelf-min" int8 encode az64,
"shelf-max" int8 encode az64,
"last-month-sales" int8 encode az64,
"safety-stock-days" float8 encode zstd,
"reorder-point-days" float8 encode zstd,
"order-upto-days" float8 encode zstd,
"reset-date" date encode az64,
"created-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"created-by" VARCHAR default 'etl-automation' ENCODE lzo,
"updated-at" TIMESTAMP without TIME zone default getdate() ENCODE az64,
"updated-by" VARCHAR default 'etl-automation' ENCODE lzo
);
ALTER TABLE "prod2-generico"."wh-safety-stock" owner to "admin";
""" | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wh_forecast_reset.py | wh_forecast_reset.py |
import os
import sys
import argparse
import pandas as pd
from datetime import datetime, timedelta
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB, MSSql
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="To update wh_to_store_ff table.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
logger = get_logger()
logger.info("Scripts begins")
status = False
schema = 'prod2-generico'
err_msg = ''
# getting run date for the script
run_date = datetime.now(tz=gettz('Asia/Kolkata')).strftime("%Y-%m-%d %H:%M:%S")
try:
mssql = MSSql(connect_via_tunnel=False)
mssql_connection = mssql.open_connection()
q = """
select
a.adnlordno,
a.qty as [ordered_quantity],
case
when OrderDt is null then null else CONVERT(DATETIME, concat(CONVERT(date, OrderDt), ' ', a.mTime), 0)
end as [wms_ordered_at],
case
when S2.VDT is null then null else CONVERT(DATETIME, concat(CONVERT(date, S2.VDT), ' ', s1.mTime), 0)
end as [wms_invoiced_at],
case
when ds.FinalDate is null then null else CONVERT(DATETIME, concat(CONVERT(date, ds.FinalDate), ' ', ds.FinalTime), 0)
end as [wms_dispatched_at],
a.itemc,
a.ordno,
S1.gstVno AS [invoice_number],
BTM.Name AS [wms_invoice_status],
CONVERT(INT, A1.ALTERCODE) AS [store_id],
ap.picktime,
A1.NAME AS [wms_store_name],
CONVERT(INT, I.BARCODE) AS [drug_id],
I.NAME AS [wms_drug_name],
'' as [batch_no],
-- S2.BATCH AS [batch_no],
CONVERT(DECIMAL(18, 0), S2.QTY) AS [wms_qty],
CONVERT(DECIMAL(18, 0), S2.FQTY) AS [wms_free_qty],
CASE
WHEN CONVERT(DECIMAL(18, 0), (S2.QTY + S2.FQTY))>a.qty THEN a.qty
ELSE CONVERT(DECIMAL(18, 0), (S2.QTY + S2.FQTY))
END AS [wms_actual_qty],
(S2.NETAMT + (S2.IGSTAmt + S2.CGSTAmt + S2.SGSTAmt)) AS [wms_net_value],
(S2.IGSTAmt + S2.CGSTAmt + S2.SGSTAmt) AS [wms_net_value_tax],
s1.uid as [checker_name],
mst.name as [picker_name],
mr.name as [route],
case
when S1.Acno = 59353 then 'npi'
else 'non_npi'
end as npi_drug,
I.Location as rack_location,
CASE
when I.Location >= 11001011 and I.Location <= 11306041 then 'npi_rack'
else 'non_npi_rack'
end as npi_rack,
s1.Vno,
b.qtyrecd as picked_quantity
from
(
select
*
from
PorderUPD
union
select
*
from
Porder) a
left join ProofSp2 b on
b.Itemc = a.itemc
and b.Ordno = a.Ordno
and b.Vdt = a.PurVdt
and b.Vtype = a.PurVtype
left join ProofSp1 c on
b.Vno = c.vno
and b.Vtype = c.Vtype
and b.Vdt = c.Vdt
left join (
select
Vno,
Vdt,
Vtype,
Itemc,
acno,
slcd,
area,
max(Psrlno) as [Psrlno],
sum(Qty) as [Qty],
sum(fqty) as [fqty],
sum(NetAmt) as [NetAmt],
sum(IGSTAmt) as [IGSTAmt],
sum(CGSTAmt) as [CGSTAmt],
sum(SGSTAmt) as [SGSTAmt]
from
SalePurchase2
group by
Vno,
Vdt,
Vtype,
Itemc,
acno,
slcd,
area ) as s2 on
s2.vno = c.RefVno
and s2.Itemc = a.itemc
and s2.Vtype = c.RefVtype
and s2.Vdt = c.RefVdt
left join SalePurchase1 S1 on
c.RefVno = S1.Vno and
c.acno = S1.Acno and
c.RefVdt = S1.Vdt
left JOIN ACM A1 ON
a.ACNO = A1.CODE
-- AND S2.SLCD = A.SLCD
left join (
select
vno,
vdt,
vtype,
itemc,
max(PickTime) as PickTime,
max(PickerID) as PickerID
from
App_SP2
group by
vno,
vdt,
vtype,
itemc
union
select
vno,
vdt,
vtype,
itemc,
max(PickTime) as PickTime,
max(PickerID) as PickerID
from
App_SP2Upd
group by
vno,
vdt,
vtype,
itemc
) ap on
ap.vno = c.Vno
and ap.vdt = c.Vdt
and ap.Vtype = c.Vtype
and AP.itemc = a.Itemc
left join DispatchStmt ds on
ds.Vdt = s2.Vdt
and ds.Vno = s2.Vno
and ds.Vtype = s2.Vtype
LEFT JOIN MASTER M ON
S2.AREA = M.CODE
AND M.SLCD = 'AR'
LEFT JOIN ACMEXTRA AE ON
A1.CODE = AE.CODE
AND A1.SLCD = AE.SLCD
left JOIN Item I ON
a.ITEMC = I.CODE
left JOIN COMPANY C1 ON
I.COMPCODE = C1.CODE
-- left JOIN
-- FIFO F ON S2.PSRLNO = F.PSRLNO
LEFT JOIN MASTER CC ON
CC.CODE = AE.CUSTCAT
AND CC.SLCD = 'CC'
LEFT JOIN BillTrackMst BTM ON
S1.SyncTag = BTM.Srl
left join (
select
code,
name
from
MASTER
where
slcd = 'SM') as mst on
mst.code = ap.PickerID
left join MASTER mr on mr.code = a1.route
WHERE
OrderDt >= cast(DATEADD(month, -1, GETDATE()) - day(GETDATE()) + 1 as date)
and AdnlOrdNo is not null
and isnumeric(I.BARCODE) = 1
"""
df = pd.read_sql(q, mssql_connection)
df['sbid'] = df['adnlordno'].str[-8:].astype(int)
sbid = tuple(df['sbid'])
rs_db_read = DB()
rs_db_read.open_connection()
q2 = """
select
id as sbid,
case
when "auto-short" = 1 then 'as/ms'
else 'pr'
end as order_type
from
"prod2-generico"."short-book-1"
where
id in {}
""".format(sbid)
as_pr = rs_db_read.get_df(q2)
df = df.merge(as_pr, on='sbid', how='left')
q3 = """
select
id as drug_id,
type,
case
when company = 'GOODAID' then 'goodaid'
when "type" = 'ethical' then 'ethical'
when "type" = 'generic' then 'generic'
else 'other'
end as sub_type
from
"prod2-generico".drugs
"""
drugs = rs_db_read.get_df(q3)
q4 = """
select
id as store_id,
case
when "franchisee-id" = 1 then 'coco'
else 'fofo'
end as store_type
from
"prod2-generico"."stores-master" sm
"""
stores = rs_db_read.get_df(q4)
df = df.merge(stores, on='store_id', how='left')
df['run_time'] = run_date
q5 = """
select
a.id as sbid,
coalesce(fofo_approved_at."presaved_approved_at", '0101-01-01 00:00:00.000') as presaved_approved_at,
a."created-at" as sb_created_at,
a."ordered-at" as sb_ordered_at,
a."re-ordered-at" as sb_reordered_at,
coalesce(s.delivered_at, '0101-01-01 00:00:00.000') as sb_delivered_at
from
"prod2-generico"."prod2-generico"."short-book-1" a
left join (
select
sbol."short-book-id" ,
min(sbol."created-at") as "presaved_approved_at"
from
"prod2-generico"."prod2-generico"."short-book-order-logs" sbol
left join "prod2-generico"."prod2-generico"."short-book-1" sb2 on
sb2.id = sbol."short-book-id"
left join "prod2-generico"."prod2-generico".stores s2 on
s2.id = sb2."store-id"
where
s2."franchisee-id" != 1
and sbol.status not in ('presaved', 'lost', 'failed', 'declined', 'deleted')
group by
sbol."short-book-id"
) fofo_approved_at
on
fofo_approved_at."short-book-id" = a.id
left join
(
select
s."short-book-id",
MAX(b."delivered-at") as "delivered_at"
from
"prod2-generico"."short-book-invoice-items" s
join "prod2-generico"."invoice-items" c on
s."invoice-item-id" = c."id"
join "prod2-generico"."invoices" b on
c."invoice-id" = b.id
where
DATE("approved-at") >= date(date_trunc('month', current_date) - interval '1 month')
group by
s."short-book-id") s on
a."id" = s."short-book-id"
where
id in {}
""".format(sbid)
timestamps = rs_db_read.get_df(q5)
timestamps.fillna('', inplace=True)
df = df.merge(timestamps, on='sbid', how='left')
q6 = """
select
Vno,
max(TagNo) as dispatch_no
from
DispatchStmt ds
where
Vtype = 'SB'
group by
Vno
"""
dispatch_no = pd.read_sql(q6, mssql_connection)
df = df.merge(dispatch_no, on='Vno', how='left')
del df['Vno']
df['picked_qty'] = df['picked_quantity']
del df['picked_quantity']
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
logger.info('writing data to table wh-to-store-ff')
s3.write_df_to_db(df, 'wh-to-store-ff', rs_db_write, schema=schema)
logger.info('deleting previous data')
rs_db_write.execute("""
delete
from
"prod2-generico"."wh-to-store-ff"
where
run_time <> (
select
max(run_time)
from
"prod2-generico"."wh-to-store-ff" )
""")
logger.info('wh-to-store-ff table updated')
status = True
except Exception as error:
err_msg = str(error)
logger.exception(str(error))
email = Email()
if not status:
result = 'Failed'
email.send_email_file(subject=f"wh_to_store_ff ({env}): {result}",
mail_body=f"Run time: {datetime.now()} {err_msg}",
to_emails=email_to, file_uris=[])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/warehouse/wh_to_store_ff.py | wh_to_store_ff.py |
import os
import sys
import pandas as pd
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.utils.doid_write import doid_custom_write
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
import argparse
def main(s3_file_end_path, ss_col_name, rop_col_name, oup_col_name, s3,
debug_mode, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
missed_entries = pd.DataFrame()
try:
# Read csv file from S3 into pandas datafame
logger.info("Reading file from S3 into pd.DataFrame")
file_path = s3.download_file_from_s3(s3_file_end_path)
df_upload = pd.read_csv(file_path)
logger.info(f"Input DF shape: {df_upload.shape}")
if debug_mode == 'N':
# Upload values into DOID
logger.info("Updating new values into DOID")
missed_entries = doid_custom_write(df_upload, logger,
ss_col=ss_col_name,
rop_col=rop_col_name,
oup_col=oup_col_name)
# Delete file from S3
logger.info("Deleting uploaded file from S3")
s3_file_uri = "s3://aws-glue-temporary-921939243643-ap-south-1/" + s3_file_end_path
s3.delete_s3_obj(uri=s3_file_uri)
status = 'Success'
logger.info(f"DOID manual update code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"DOID manual update code execution status: {status}")
return status, missed_entries
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="Y", type=str,
required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-s3fn', '--s3_file_end_path',
default="doid_manual_update_csv/file_name.csv", type=str,
required=False)
parser.add_argument('-ss', '--ss_col_name',
default="ss", type=str, required=False)
parser.add_argument('-rop', '--rop_col_name',
default="rop", type=str, required=False)
parser.add_argument('-oup', '--oup_col_name',
default="oup", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
debug_mode = args.debug_mode
email_to = args.email_to
s3_file_end_path = args.s3_file_end_path
ss_col_name = args.ss_col_name
rop_col_name = args.rop_col_name
oup_col_name = args.oup_col_name
logger = get_logger()
s3 = S3()
status, missed_entries = main(s3_file_end_path, ss_col_name, rop_col_name,
oup_col_name, s3, debug_mode, logger)
missed_entries_uri = s3.save_df_to_s3(
missed_entries, file_name=f"missed_entries_manual_update.csv")
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"DOID Manual Update (GLUE-{env}): {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Job Params: {args}
""",
to_emails=email_to, file_uris=[missed_entries_uri])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/doid_manual_update/doid_manual_update.py | doid_manual_update.py |
import argparse
import os
import sys
import pandas as pd
from zeno_etl_libs.db.db import PostGreWrite, DB
from zeno_etl_libs.helper import helper
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stage, prod)")
parser.add_argument('-fr', '--full_run', default="yes", type=str, required=False)
parser.add_argument('-dfs', '--db_fetch_size', default=1000, type=int, required=False)
parser.add_argument('-ibs', '--insert_batch_size', default=100, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
full_run = args.full_run
db_fetch_size = args.db_fetch_size
insert_batch_size = args.insert_batch_size
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info(f"full_run: {full_run}")
logger.info(f"db_fetch_size: {db_fetch_size}")
logger.info(f"insert_batch_size: {insert_batch_size}")
""" opening the Redshift connection """
rs_db = DB()
rs_db.open_connection()
""" opening the postgres connection """
pg_db_w = PostGreWrite()
pg_db_w.open_connection()
pg_schema = "public"
rs_schema = "prod2-generico" if env == "dev" else "prod2-generico"
rs_table = "patients-metadata-2"
pg_table = rs_table.replace("-", "_")
pg_temp_table = pg_table + "_temp"
query = f""" SELECT max(updated_at) as "last_updated_at" FROM "{pg_schema}"."patients_metadata_2" """
df = pd.read_sql_query(query, pg_db_w.connection)
date_filter = ""
if df.last_updated_at[0] is not None:
date_filter = f""" where "updated-at" >= '{df.last_updated_at[0]}' """
table_info = helper.get_table_info(db=rs_db, table_name=rs_table, schema=rs_schema)
columns = ["id",
"value-segment",
"value-segment-calculation-date",
"behaviour-segment",
"behaviour-segment-calculation-date",
"last-bill-date",
"number-of-bills",
"total-spend",
"average-bill-value",
"is-chronic",
"total-quantity",
"quantity-generic",
"quantity-generic-pc",
"hd-bills",
"referred-count",
"latest-nps-rating",
"latest-nps-rating-comment",
"latest-nps-rating-date",
"latest-nps-rating-store-id",
"latest-nps-rating-store-name",
"first-bill-date",
"is-goodaid"]
def get_patients_metadata_from_rs(batch=1):
limit = db_fetch_size
""" Query to get patients-metadata from RS """
query = f""" SELECT "{'","'.join(columns)}" FROM "{rs_schema}"."{rs_table}" {date_filter}
order by "updated-at" limit {limit} offset {(batch - 1) * limit} """
logger.info(f"Batch: {batch}, limit:{limit} ")
df: pd.DataFrame = rs_db.get_df(query=query)
df.columns = [c.replace('-', '_') for c in df.columns]
# fix data types
for col in ['total_spend', 'average_bill_value', 'quantity_generic_pc']:
df[col] = df[col].fillna(0.0).astype(float)
for col in ['behaviour_segment_calculation_date', 'value_segment_calculation_date',
'last_bill_date', 'latest_nps_rating_date', 'first_bill_date']:
df[col] = pd.to_datetime(df[col], errors='ignore')
for col in ['number_of_bills', 'total_quantity', 'quantity_generic', 'hd_bills',
'referred_count', 'latest_nps_rating', 'latest_nps_rating_store_id']:
df[col] = df[col].fillna(0).astype(int)
for col in ['is_chronic', 'is_goodaid']:
df[col] = df[col].fillna(False).astype(bool)
logger.info("fetched data from RS DB successfully.")
return df
try:
# clean the temp table
query = f""" delete from {pg_schema}.{pg_temp_table}; """
pg_db_w.engine.execute(query)
# insert_batch_size = 10000
batch = 1
while True:
df = get_patients_metadata_from_rs(batch=batch)
if df.empty:
logger.info("Nothing to sync since last update.")
break
"Insert into the PostGre temp table"
small_batch_counter = 1
for ga_df in helper.batch(df, insert_batch_size):
ga_df.to_sql(name=pg_temp_table, con=pg_db_w.engine, if_exists='append', chunksize=500,
method='multi', index=False)
logger.info(f"small_batch_counter: {small_batch_counter}")
small_batch_counter += 1
logger.info("Inserted data in Postgres DB temp table.")
if full_run.lower() != 'yes':
break
batch += 1
"""Sync temp and main table"""
# 1. Delete the common records
query = f"""
delete from {pg_schema}.{pg_table} as tgt using {pg_schema}.{pg_temp_table} as src
where tgt.id = src.id ;
"""
pg_db_w.engine.execute(query)
# 2. Insert the temp table records to main table
query = f""" insert into {pg_schema}.{pg_table} select * from {pg_schema}.{pg_temp_table}; """
pg_db_w.engine.execute(query)
logger.info("Synced data in Postgres DB successfully")
except Exception as e:
# logger.exception(e)
raise e
finally:
pg_db_w.close_connection()
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patients-metadata-postgre-sync/patients-metadata-postgre-sync.py | patients-metadata-postgre-sync.py |
import argparse
import os
import sys
import pandas as pd
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.google.sheet.sheet import GoogleSheet
from datetime import datetime as dt
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-sd', '--start_date', default="0", type=str, required=False)
parser.add_argument('-ed', '--end_date', default="0", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
# Start date and end date parameters (can be changed later)
try:
start_date = args.start_date
start_date = str(dt.strptime(start_date, "%Y-%m-%d").date())
end_date = args.end_date
end_date = str(dt.strptime(end_date, "%Y-%m-%d").date())
except ValueError:
start_date = dt.today().replace(day=1).strftime("%Y-%m-%d")
end_date = dt.today().strftime("%Y-%m-%d")
schema = 'prod2-generico'
table_name = 'mktg-store-targets'
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
read_schema = 'prod2-generico'
# Delete records of current month from table data
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f"""
DELETE
FROM
"{read_schema}"."mktg-store-targets"
WHERE
DATE("month-ref-date") >= '{start_date}'
AND DATE("month-ref-date") <= '{end_date}'
"""
logger.info(truncate_query)
rs_db.execute(truncate_query)
logger.info('Delete for recent month done')
gs = GoogleSheet()
mktg_store_targets = gs.download({
"spreadsheet_id": "1AZQF5DF6bQjX3rEtgdZ2BUI7htoNUnf2DaKFFHLaiMg",
"sheet_name": "MT",
"listedFields": []})
mktg_store_targets = pd.DataFrame(mktg_store_targets)
mktg_store_targets['month-ref-date'] = mktg_store_targets['month-ref-date'].apply(pd.to_datetime, errors='coerce')
mktg_store_targets_today = mktg_store_targets[(mktg_store_targets['month-ref-date'] >= start_date) &
(mktg_store_targets['month-ref-date'] <= end_date)]
# data type correction
mktg_store_targets_today['store-id'] = mktg_store_targets_today['store-id'].astype(int)
mktg_store_targets_today['target-sales'] = pd.to_numeric(mktg_store_targets_today['target-sales'], errors='coerce')
mktg_store_targets_today['target-acq'] = pd.to_numeric(mktg_store_targets_today['target-acq'], errors='coerce')
# etl
mktg_store_targets_today['created-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
mktg_store_targets_today['created-by'] = 'etl-automation'
mktg_store_targets_today['updated-at'] = dt.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
mktg_store_targets_today['updated-by'] = 'etl-automation'
# Write to csv
s3.save_df_to_s3(df=mktg_store_targets_today[table_info['column_name']], file_name='marketing_store_targets.csv')
# upload to db
s3.write_df_to_db(df=mktg_store_targets_today[table_info['column_name']], table_name=table_name, db=rs_db,
schema=schema) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/mktg-store-targets/mktg-store-targets.py | mktg-store-targets.py |
import os
import sys
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper import helper
from dateutil.tz import gettz
import json
import datetime
import argparse
import pandas as pd
import numpy as np
import traceback
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected]", type=str, required=False)
parser.add_argument('-yc', '--year_cutoff', default='2019', type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
year_cutoff = args.year_cutoff
os.environ['env'] = env
logger = get_logger(level='INFO')
rs_db = DB()
rs_db.open_connection()
rs_db_write = DB(read_only=False)
rs_db_write.open_connection()
s3 = S3()
start_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
logger.info('Script Manager Initialized')
logger.info(f"env: {env}")
logger.info("email_to - " + email_to)
logger.info("year_cutoff - " + year_cutoff)
logger.info("")
# date parameter
logger.info("code started at {}".format(datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime(
'%Y-%m-%d %H:%M:%S')))
logger.info("")
stores_query = """
select
distinct id as "store-id"
from
"prod2-generico".stores s """
stores = rs_db.get_df(stores_query)
logger.info("Fetched distinct stores")
q_aa = """
select
ri."id" as "return-id",
ri."inventory-id",
ri."returned-quantity",
rtd."store-id",
str."name" as "store-name",
drug."drug-name",
h."name" as "dist-name",
dn."serial",
ri."net",
case
when date(iv."dispatched-at") = '0101-01-01' then 1
else 0
end as flag,
iv."approved-at" as "invoice-approved-at",
iv."dispatched-at" as "invoice-dispatched-at",
rtd."created-at",
ri."approved-at",
dn."dispatched-at",
ri."settled-at" as "ri-settled-at",
dn."settled-at" as "dn-settled-at",
dn."accounted-at",
ri."status" as "return-status",
dn."status" as "dispatch-status",
concat(DATE_PART(mon, rtd."created-at"), concat( '-', DATE_PART(y, rtd."created-at"))) as "created-period",
concat(DATE_PART(mon, dn."dispatched-at"), concat('-', DATE_PART(y, dn."dispatched-at"))) as "dispatch-period",
ri."return-reason",
date(inv."expiry") as "expiry",
h."credit-period" ,
concat(DATE_PART(mon, ri."settled-at"), concat('-', DATE_PART(y, ri."settled-at"))) as "settled-period",
ri."discard-reason",
date(ri."discarded-at") as "discarded-at",
rtd."created-by",
ri."approved-by",
iv."invoice-number",
date(iv."invoice-date") as "invoice-date",
sdm."forward-dc-id" as "DC",
case
when (date(iv."dispatched-at")= '0101-01-01'
or iv."dispatched-at" > rtd."created-at") then 'DC'
else 'Store'
end as "Origin",
h."type" as "distributor-type",
case
when h."credit-period">0 then 'credit'
else 'non-credit'
end as "distributor credit",
case
when ri."debit-note-reference" is null
and ri."status" = 'saved' then '1.Saved'
when ri."debit-note-reference" is null
and ri."status" = 'approved' then '2.Approved'
when ri."debit-note-reference" is not null
and dn."status" = 'Saved' then '3.DN Saved'
when ri."debit-note-reference" is not null
and dn."status" = 'dispatched' then '4.DN Dispatched'
when ri."debit-note-reference" is not null
and dn."status" = 'Settled' then '5.DN Settled'
when ri."debit-note-reference" is not null
and dn."status" = 'Accounted' then '6.DN Accounted'
when ri."status" = 'discarded' then '7.discarded'
else 'Status issue'
end as "Comprehensive status",
case
when (date(iv."dispatched-at")= '0101-01-01'
or iv."dispatched-at" > rtd."created-at")
and extract(y from iv."dispatched-at") > {year_cutoff}
and ri."return-reason" in (
'reason-product-damaged',
'reason-not-ordered',
'reason-to-be-returned',
'reason-wrong-product',
'reason-softcopy-excess',
'reason-near-expiry',
'reason-product-expired',
'reason-na',
'reason-already-returned',
'reason-customer-refused',
'reason-wrongly-ordered',
'reason-excess-supplied',
'reason-non-moving',
'reason-wrong-mrp',
'reason-wrong-expiry') then 'DC Salable returns'
when (date(iv."dispatched-at")= '0101-01-01'
or iv."dispatched-at" > rtd."created-at")
and extract(y from iv."dispatched-at") > {year_cutoff}
and ri."return-reason" in (
'reason-product-short',
'reason-short-from-dc') then 'DC Short returns'
when ri."return-reason" in ('reason-product-damaged', 'reason-near-expiry', 'reason-product-expired', 'reason-wrong-expiry') then 'Store Expiry'
when ri."return-reason" in ('reason-not-ordered', 'reason-to-be-returned', 'reason-wrong-product', 'reason-softcopy-excess', 'reason-na', 'reason-already-returned', 'reason-customer-refused', 'reason-wrongly-ordered', 'reason-excess-supplied', 'reason-non-moving', 'reason-wrong-mrp') then 'Store Salable'
when ri."return-reason" in ('reason-product-short', 'reason-short-from-dc') then 'Store Short'
else 'issue'
end as "Comprehensive reasons"
from
"prod2-generico"."return-items" ri
left join "prod2-generico"."returns-to-dc" rtd on
ri."return-id" = rtd."id"
left join "prod2-generico"."debit-notes" dn on
ri."debit-note-reference" = dn."id"
left join "prod2-generico"."stores" str on
rtd."store-id" = str."id"
left join "prod2-generico"."inventory" inv on
inv."id" = ri."inventory-id"
left join "prod2-generico"."drugs" drug on
drug."id" = inv."drug-id"
left join "prod2-generico"."invoices" iv on
iv."id" = inv."invoice-id"
left join "prod2-generico"."distributors" h on
h."id" = iv."distributor-id"
left join "prod2-generico"."store-dc-mapping" sdm on
sdm."store-id" = str."id"
and h."type" = sdm."drug-type"
where
ri."status" not in ('reverted',
'deleted')
""".format(year_cutoff=year_cutoff)
df2 = rs_db.get_df(q_aa)
logger.info("Fetched return data")
df2['net'] = df2['net'].astype(float)
# df2[['expiry','invoice-date',]].astype(datetime.datetime.date())
status2 = False
# pd.to_datetime(df2['expiry'])
try:
schema = 'prod2-generico'
table_name = 'wc-returns'
table_info = helper.get_table_info(db=rs_db_write, table_name=table_name, schema=schema)
truncate_query = """
delete from "prod2-generico"."wc-returns"
"""
rs_db_write.execute(truncate_query)
logger.info(str(table_name) + ' table deleted')
s3.write_df_to_db(df=df2[table_info['column_name']], table_name=table_name, db=rs_db_write,
schema=schema)
logger.info(str(table_name) + ' table uploaded')
status2 = True
except:
status2 = False
if status2 is True:
status = 'Success'
else:
status = 'Failed'
# logger.close()
end_time = datetime.datetime.now(tz=gettz('Asia/Kolkata'))
difference = end_time - start_time
min_to_complete = round(difference.total_seconds() / 60, 2)
email = Email()
email.send_email_file(subject=f"{env}-{status} : {table_name} table updated",
mail_body=f"{table_name} table updated, Time for job completion - {min_to_complete} mins ",
to_emails=email_to, file_uris=[])
# Closing the DB Connection
rs_db.close_connection()
rs_db_write.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/wc-returns/wc-returns.py | wc-returns.py |
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.db.db import DB
import argparse
import pandas as pd
import datetime
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="stage", type=str, required=False)
parser.add_argument('-et', '--email_to', default="[email protected],[email protected]", type=str, required=False)
parser.add_argument('-si', '--store_id', default=2, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
email_to = args.email_to
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
stores_query = f'''
select
*
from
"prod2-generico"."stores"
'''
rs_db.execute(query=stores_query, params=None)
stores: pd.DataFrame = rs_db.cursor.fetch_dataframe()
stores.columns = [col.replace('-', '_') for col in stores.columns]
drug_query = f'''
select
id,
"drug-name",
company,
composition,
type,
pack ,
"pack-form",
"pack-of",
"available-in"
from
"prod2-generico"."drugs"
'''
rs_db.execute(query=stores_query, params=None)
drugs: pd.DataFrame = rs_db.cursor.fetch_dataframe()
drugs.columns = [col.replace('-', '_') for col in drugs.columns]
run_date = str(datetime.datetime.now().date())
store_file_name = 'stores_state_{}.xlsx'.format(str(run_date))
drugs_file_name = 'drugs_state_{}.xlsx'.format(str(run_date))
# Uploading the file to s3
store_uri = s3.save_df_to_s3(df=stores, file_name=store_file_name)
drugs_uri = s3.save_df_to_s3(df=stores, file_name=drugs_file_name)
# Sending email
subject = ''' Stores and Drugs Master Snapshot '''
mail_body = '''Stores and Drugs Master Snapshot - {}
'''.format(run_date)
file_uris = [store_uri, drugs_uri]
email = Email()
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=file_uris)
# deleteing the old files
for uri in file_uris:
s3.delete_s3_obj(uri=uri) | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/stores_drugs_master_automailer/stores_drugs_master_automailer.py | stores_drugs_master_automailer.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "patient-requests-metadata"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}" where date("created-at")>= date(date_trunc('month', current_date) - interval '6 month'); """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id" ,
"created-at",
"created-by",
"updated-by",
"updated-at",
"year-created-at",
"month-created-at",
"patient-id",
"doctor-id",
"store-id",
"bill-id",
"drug-id",
"zeno-order-id",
"drug-name",
"pso-requested-quantity",
"pso-inventory-quantity",
"order-number",
"order-source",
"order-type",
"patient-request-id",
"payment-type",
"promo-id",
"pso-status",
"fulfilled-to-consumer",
"type",
"category",
"company",
"company-id",
"composition",
"composition-master-id",
"lp-fulfilled-qty",
"sb-id" ,
"ff-distributor",
"ordered-distributor-id",
"quantity",
"required-quantity",
"ordered-at",
"completed-at",
"invoiced-at",
"dispatched-at",
"received-at",
"sb-status",
"decline-reason",
"inventory-at-ordering",
"re-ordered-at",
"dc-ff-time",
"store-received-ff-time",
"consumer-ff-time",
"order-raised-at-dc",
"order-raised-at-distributor",
"billed-at",
"store-name",
"store-manager",
"line-manager",
"abo",
"city",
"store-b2b",
"substituted",
"gross-quantity",
"gross-revenue-value",
"net-quantity",
"net-revenue-value",
"selling-rate",
"store-delivered-at",
"franchisee-short-book",
"pr-created-at",
"sb-created-at",
"acquired" ,
"old-new-static" ,
"completion-type" ,
"slot-id" ,
"slot-type",
"turnaround-time",
"group"
)
select
pso."id" as "id",
pso."created-at" as "created-at",
pso."created-by" as "created-by",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',
GETDATE()) as "updated-at",
extract(year
from
pso."created-at") as "year-created-at",
extract(month
from
pso."created-at") as "month-created-at",
pso."patient-id" as "patient-id" ,
pso."doctor-id" as "doctor-id" ,
pso."store-id" as "store-id" ,
pso."bill-id" as "bill-id" ,
pso."drug-id" as "drug-id",
pso."zeno-order-id" as "zeno-order-id",
pso."drug-name" as "drug-name" ,
pso."requested-quantity" as "pso-requested-quantity",
pso."inventory-quantity" as "pso-inventory-quantity",
pso."order-number" as "order-number" ,
pso."order-source" as "order-source" ,
pso."order-type" as "order-type" ,
pso."patient-request-id" as "patient-request-id" ,
pso."payment-type" as "payment-type" ,
pso."promo-id" as "promo-id",
pso.status as "pso-status",
(case
when ms."gross-quantity" > 0 then 1
else 0
end) as "fulfilled-to-consumer",
d2."type" ,
d2."category" ,
d2."company" ,
d2."company-id" as "company-id" ,
d2."composition" ,
d2."composition-master-id" as "composition-master-id",
NVL(prlp."lp-fulfilled-qty",
0) as "lp-fulfilled-qty",
sb."id" as "sb-id",
sb."distributor-id" as "ff-distributor",
sb."ordered-distributor-id" as "ordered-distributor-id",
sb."quantity" as "quantity" ,
sb."required-quantity" as "required-quantity" ,
case
when sb."ordered-at" = '0101-01-01' then null
else sb."ordered-at"
end as "ordered-at",
case
when sb."completed-at" = '0101-01-01' then null
else sb."completed-at"
end as "completed-at",
case
when sb."invoiced-at" = '0101-01-01' then null
else sb."invoiced-at"
end as "invoiced-at",
case
when sb."dispatched-at" = '0101-01-01' then null
else sb."dispatched-at"
end as "dispatched-at",
case
when sb."received-at" = '0101-01-01' then null
else sb."received-at"
end as "received-at",
sb."status" as "sb-status",
sb."decline-reason" as "decline-reason",
sb."inventory-at-ordering" as "inventory-at-ordering" ,
case
when sb."re-ordered-at" = '0101-01-01' then null
else sb."re-ordered-at"
end as "re-ordered-at",
(case
when (pso."created-at" = '0101-01-01'
or msda."store-delivered-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
msda."store-delivered-at")
end) as "dc-ff-time",
(case
when (pso."created-at" = '0101-01-01'
or sb."received-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
sb."received-at")
end) as "store-received-ff-time",
(case
when (pso."created-at" = '0101-01-01'
or b2."created-at" = '0101-01-01') then null
else datediff(hour,
pso."created-at",
b2."created-at")
end) as "consumer-ff-time",
(case
when sb."quantity">0 then 1
else 0
end) as "order-raised-at-dc",
(case
when ("ordered-at" = '0101-01-01'
or "ordered-at" is null) then 0
else 1
end) as "order-raised-at-distributor",
b2."created-at" as "billed-at",
msm."store" as "store-name",
msm."store-manager",
msm."line-manager",
msm."abo",
msm."city",
msm."store-b2b",
case
when msc."generic-flag" is null then 'not-available'
when msc."generic-flag" is not null
and d2."type" = 'generic' then 'substituted'
when msc."generic-flag" is not null
and d2."type" != 'generic' then 'not-substituted'
else 'not-available'
end as "substituted",
ms."gross-quantity",
ms."gross-revenue-value",
ms."net-quantity",
ms."net-revenue-value",
case
when (pso."selling-rate" is null
or pso."selling-rate" = 0)
and d2."type" = 'generic' then 35
when (pso."selling-rate" is null
or pso."selling-rate" = 0)
and d2."type" != 'generic' then 100
else pso."selling-rate"
end as "selling-rate",
msda."store-delivered-at",
sb."franchisee-short-book" as "franchisee-short-book",
pra."created-at" as "pr-created-at",
sb."created-at" as "sb-created-at" ,
msm."acquired" ,
msm."old-new-static" ,
pra."completion-type" ,
pso."slot-id" ,
ss."slot-type",
pso."turnaround-time",
d1."group"
from
"prod2-generico"."patients-store-orders" pso
left join (
select
prlp."patient-request-id" , sum("fulfilled-quantity") as "lp-fulfilled-qty"
from
"prod2-generico"."patient-request-local-purchase" prlp
inner join "prod2-generico"."patients-store-orders" pso on
NVL(pso."patient-request-id",
0) = prlp."patient-request-id"
group by
prlp."patient-request-id" ) as prlp on
prlp."patient-request-id" = NVL(pso."patient-request-id",
0)
left join "prod2-generico"."patient-requests" pra on
pra."id" = NVL(pso."patient-request-id",
0)
left join "prod2-generico"."patient-requests-short-books-map" mprsb on
NVL(pso."patient-request-id",
0) = mprsb."patient-request-id"
left join "prod2-generico"."short-book-1" sb on
sb.id = mprsb."short-book-id"
left join "prod2-generico"."store-delivered" msda on
mprsb."short-book-id" = msda."id"
left join "prod2-generico"."bills-1" b2 on
b2.id = NVL(pso."bill-id",
0)
left join "prod2-generico"."drugs" d2 on
d2."id" = pso."drug-id"
left join "prod2-generico"."drug-unique-composition-mapping" d1 on
pso."drug-id" = d1."drug-id"
left join "prod2-generico"."substitutable-groups" msc on
msc."id" = d1."group"
left join "prod2-generico"."sales-agg" ms on
ms."bill-id" = pso."bill-id"
and ms."drug-id" = pso."drug-id"
left join "prod2-generico"."stores-master" msm on
pso."store-id" = msm.id
left join "prod2-generico"."store-slots" ss on
pso."slot-id" = ss.id
where date(pso."created-at")>=date(date_trunc('month', current_date) - interval '6 month');
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
# ##Vacuum Clean
#
# clean = f"""
# VACUUM full "prod2-generico"."patient-requests-metadata";
# """
# db.execute(query=clean)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
logger.info("running job for patient request")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/patient-request/patient-request.py | patient-request.py |
import argparse
import os
import sys
import pandas as pd
from pandas.io.json import json_normalize
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.fresh_service.fresh_service import FreshService
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-l', '--limit', default=None, type=int, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
limit = args.limit
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB(read_only=False)
rs_db.open_connection()
s3 = S3()
all_tickets_data = pd.DataFrame()
page = 1
check = 1
# get the last updated at
query = f"""
select
max("updated-at") as "max-updated-at"
from
"prod2-generico"."freshservice-tickets"
"""
max_update_date_df: pd.DataFrame = rs_db.get_df(query=query)
max_update_date = None
if not max_update_date_df.empty:
max_update_date = max_update_date_df.values[0][0]
fs = FreshService()
while True:
tickets = fs.get_tickets(page=page, updated_since=None)
df = json_normalize(tickets)
if limit and limit < page:
logger.info(f"fetched given pages: {limit}")
# this break is for testing purpose
break
if len(df) > 0:
logger.info(f"page no: {page}, length len(df): {len(df)}")
page += 1
df['created-by'] = 'etl-automation'
df['updated-by'] = 'etl-automation'
all_tickets_data = all_tickets_data.append(df)
else:
logger.info("Fetched all tickets successfully")
break
all_tickets_data.columns = [c.replace("_", "-") for c in all_tickets_data.columns]
# Fixing the data types
for col in ['created-at', 'updated-at', 'due-by']:
all_tickets_data[col] = pd.to_datetime(all_tickets_data[col], errors='coerce')
for col in ['requester-id', 'responder-id', 'group-id', 'id', 'owner-id', 'priority', 'urgency', 'assoc-problem-id',
'assoc-change-id', 'assoc-asset-id', 'display-id']:
# all_tickets_data[col] = all_tickets_data[col].astype('str', errors='ignore')
all_tickets_data[col] = all_tickets_data[col].astype('Int64', errors='ignore')
col_length = {"to-emails": 240, 'description': 49000, 'description-html': 15000, 'subject': 500}
for col in col_length.keys():
all_tickets_data[col] = all_tickets_data[col].apply(lambda x: str(x)[0:col_length[col]])
# all_tickets_data.info()
schema = 'prod2-generico'
table_name = 'freshservice-tickets'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
if isinstance(table_info, type(None)):
raise Exception(f"table: {table_name} do not exist, create the table first")
logger.info(f"Table:{table_name} exists and input data has all columns")
# FIXME: Why download all tickets every time, every API call has $ attached to it. use filter in the API call
# link: https://api.freshservice.com/#filter_tickets
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" '''
rs_db.execute(truncate_query)
s3.write_df_to_db(df=all_tickets_data[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
logger.info("Pushed tickets successfully")
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/fresh-service-tickets/fresh-service-tickets.py | fresh-service-tickets.py |
import argparse
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB
def main(db):
table_name = "store-delivered"
db.execute(query="begin ;")
db.execute(query=f""" delete from "prod2-generico"."{table_name}"; """)
query = f"""
insert
into
"prod2-generico"."{table_name}" (
"id",
"created-by",
"created-at",
"updated-by",
"updated-at",
"store-delivered-at",
"needs-recompute-aggvar",
"num-rec"
)
select
s."short-book-id" as "id",
'etl-automation' as "created-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "created-at",
'etl-automation' as "updated-by",
convert_timezone('Asia/Calcutta',GETDATE()) as "updated-at",
MAX(b."delivered-at") as "store-delivered-at",
NULL AS "needs-recompute-aggvar",
NULL AS "num-rec"
from
"prod2-generico"."short-book-invoice-items" s
join "prod2-generico"."invoice-items" c on
s."invoice-item-id" = c."id"
join "prod2-generico"."invoices" b on
c."invoice-id" = b.id
group by
s."short-book-id";
"""
db.execute(query=query)
""" committing the transaction """
db.execute(query=" end; ")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
""" For single transaction """
rs_db.connection.autocommit = False
""" calling the main function """
main(db=rs_db)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/store-delivered-at/store-delivered-at.py | store-delivered-at.py |
import argparse
import json
import sys
import os
sys.path.append('../../../..')
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.db.db import DB, MongoDB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.helper import helper
import pandas as pd
import datetime
from datetime import datetime as dt
from datetime import timedelta
from dateutil.tz import gettz
import dateutil
def main(rs_db, mg_client, s3, data):
schema = 'prod2-generico'
table_name = 'ecomm-inbound-call-report'
date_field = 'created-at'
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema=schema)
# job_data_params = {"end": "2021-12-31", "start": "2021-12-01", "full_run": 1, "alternate_range": 0}
# params
job_data_params = data
if job_data_params['full_run']:
start = '2017-05-13'
elif job_data_params['alternate_range']:
start = job_data_params['start']
else:
start = str(dt.today().date() - timedelta(days=1))
# convert date to pymongo format
start = dateutil.parser.parse(start)
# Read Generico crm table
db = mg_client['generico-crm']
collection = db["exotelReportLogs"].find({
"exotelNumber": {"$regex": '2071171644'},
"direction": "inbound",
"$or": [{"toName": {"$regex": '2071171644'}}, {"toName": 'ZenoCRM1 Incoming'}],
"status": {
"$in": [
"missed-call",
"completed"
]
}
})
callog_inbound = pd.DataFrame(list(collection))
callog_inbound = callog_inbound[
['_id', 'exotelId', 'exotelNumber', 'from', 'FromName', 'to', 'toName', 'status', 'createdAt']]
dict = {'_id': 'id',
'exotelId': 'exotel-id',
'exotelNumber': 'exotel-number',
'FromName': 'from-name',
'toName': 'to-name',
'startTime': 'start-time',
'endTime': 'end-time',
'createdAt': 'created-at'}
callog_inbound.rename(columns=dict, inplace=True)
callog_inbound['etl-created-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
callog_inbound['etl-updated-at'] = datetime.datetime.now(tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
callog_inbound['created-by'] = 'etl-automation'
callog_inbound['updated-by'] = 'etl-automation'
if isinstance(table_info, type(None)):
logger.info(f"table: {table_name} do not exist")
else:
truncate_query = f''' DELETE FROM "{schema}"."{table_name}" where "{date_field}">'{start}' '''
logger.info(truncate_query)
rs_db.execute(truncate_query)
""" seek the data """
logger.info(callog_inbound.head(1))
logger.info(table_info)
file_s3_uri_save = s3.save_df_to_s3(df=callog_inbound[table_info['column_name']], file_name="callog_inbound.csv")
s3.write_to_db_from_s3_csv(table_name=table_name,
file_s3_uri=file_s3_uri_save,
db=rs_db, schema=schema)
s3.write_df_to_db(df=callog_inbound[table_info['column_name']], table_name=table_name, db=rs_db, schema=schema)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-d', '--data', default=None, type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
data = args.data
os.environ['env'] = env
logger = get_logger()
logger.info(f"data: {data}")
data = json.loads(data) if data else {}
logger.info(f"env: {env}")
rs_db = DB()
rs_db.open_connection()
mg_db = MongoDB()
mg_client = mg_db.open_connection("generico-crm")
s3 = S3()
""" calling the main function """
main(rs_db=rs_db, mg_client=mg_client, s3=s3, data=data)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/ecomm_inbound_call_report/ecomm_inbound_call_report.py | ecomm_inbound_call_report.py |
import os
import sys
import pandas as pd
import datetime as dt
import numpy as np
import statistics as stats
from dateutil.tz import gettz
sys.path.append('../../../..')
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
import argparse
def main(debug_mode, rs_db, read_schema, write_schema, table_name, s3, logger):
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
try:
# get drug-patient data from mos
logger.info(
"Getting historical data from sales table, bill_flag='gross'")
q_mos_drugs = f"""
select "drug-id" , "bill-id" , "patient-id" , quantity ,
"revenue-value" as sales, date("created-at") as bill_date
from "{read_schema}".sales s
where "bill-flag" = 'gross'
and DATEDIFF(day, date("created-at"), current_date) < 730
and "store-id" not in (243)
"""
df_mos_drugs = rs_db.get_df(q_mos_drugs)
df_mos_drugs.columns = [c.replace('-', '_') for c in
df_mos_drugs.columns]
df_mos_drugs["bill_date"] = pd.to_datetime(df_mos_drugs["bill_date"])
df_drugs = df_mos_drugs.drop(
["bill_id", "patient_id", "quantity", "sales",
"bill_date"], axis=1).drop_duplicates()
dd_qty_sales = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"quantity": "sum", "sales": "sum"})
################################
# get purchase interval of drugs
################################
logger.info("Calculating patient-drug-interval")
df_mos_drugs["bill_date1"] = df_mos_drugs["bill_date"]
grp_pts_drug = df_mos_drugs.groupby(["patient_id", "drug_id"],
as_index=False).agg(
{"bill_date": "count", "bill_date1": "max"})
grp_pts_drug.rename(
{"bill_date": "bill_counts", "bill_date1": "latest_date"}, axis=1,
inplace=True)
# only drugs with atleast 4 bills taken
grp_pts_drug = grp_pts_drug.loc[grp_pts_drug["bill_counts"] > 3]
df_mos_drugs = df_mos_drugs.drop("bill_date1", axis=1)
# only latest 10 patient considered for purchase interval calculation
grp_drugs = grp_pts_drug.groupby(["drug_id"], as_index=False).agg(
{'patient_id': latest_20})
pts_drugs_to_consider = grp_drugs.explode('patient_id')
pts_drugs_to_consider = pts_drugs_to_consider.merge(df_mos_drugs,
on=["patient_id",
"drug_id"],
how="left")
interval_pts_drug = pts_drugs_to_consider.groupby(
["patient_id", "drug_id"],
as_index=False).agg(
{"bill_date": pts_drug_interval})
interval_pts_drug.rename({"bill_date": "purchase_interval"}, axis=1,
inplace=True)
drug_intv = interval_pts_drug.groupby("drug_id", as_index=False).agg(
{"purchase_interval": median})
# handling edge cases
drug_intv["purchase_interval"] = np.where(
drug_intv["purchase_interval"] == 0, 180,
drug_intv["purchase_interval"])
drug_intv["purchase_interval"] = np.where(
drug_intv["purchase_interval"] > 180, 180,
drug_intv["purchase_interval"])
logger.info("patient-drug-interval calculation finished")
df_drugs = df_drugs.merge(dd_qty_sales, on="drug_id", how="left")
df_drugs.rename({"quantity": "qty_sold_l2y", "sales": "revenue_l2y"},
axis=1, inplace=True)
df_drugs = df_drugs.merge(drug_intv, on="drug_id", how="left")
df_drugs["purchase_interval"] = df_drugs["purchase_interval"].fillna(
180)
dd = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"bill_id": count_unique})
df_drugs = df_drugs.merge(dd, on="drug_id", how="left")
df_drugs.rename({"bill_id": "num_bills_l2y"}, axis=1, inplace=True)
dd = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"quantity": mode})
df_drugs = df_drugs.merge(dd, on="drug_id", how="left")
df_drugs.rename({"quantity": "mode"}, axis=1, inplace=True)
dd = df_mos_drugs.groupby("drug_id", as_index=False).agg(
{"quantity": median})
df_drugs = df_drugs.merge(dd, on="drug_id", how="left")
df_drugs.rename({"quantity": "median"}, axis=1, inplace=True)
df_drugs["std_qty"] = np.where(df_drugs["mode"] > df_drugs["median"],
df_drugs["median"], df_drugs["mode"])
df_drugs["std_qty"] = np.where(df_drugs["num_bills_l2y"] <= 10, 1,
df_drugs["std_qty"])
df_drugs["std_qty"] = np.where(df_drugs["std_qty"] > 30, 1,
df_drugs["std_qty"])
df_drugs["std_qty"] = df_drugs["std_qty"].fillna(1)
df_drugs["revenue_l2y"] = df_drugs["revenue_l2y"].fillna(0)
df_drugs["qty_sold_l2y"] = df_drugs["qty_sold_l2y"].fillna(0)
df_drugs["std_qty"] = df_drugs["std_qty"].astype(int)
df_drugs["revenue_l2y"] = df_drugs["revenue_l2y"].astype(float)
df_drugs["qty_sold_l2y"] = df_drugs["qty_sold_l2y"].astype(int)
df_drugs.dropna(subset=['drug_id', 'num_bills_l2y'], inplace=True)
df_drugs["drug_id"] = df_drugs["drug_id"].astype(int)
df_drugs["num_bills_l2y"] = df_drugs["num_bills_l2y"].astype(int)
df_drugs["avg_selling_rate"] = df_drugs["revenue_l2y"] / df_drugs[
"qty_sold_l2y"]
################################
# get avg-ptr and drug-type info
################################
logger.info("Calculating other fields")
# get PTR info for all drugs
q_inv = f"""
SELECT "drug-id" as drug_id , AVG(ptr) as avg_ptr
from "{read_schema}"."inventory-1" i
where DATEDIFF(day, date("created-at"), current_date) < 730
group by "drug-id"
"""
df_inv = rs_db.get_df(q_inv)
df_drugs = df_drugs.merge(df_inv, on="drug_id", how="left")
# get necessary drug info from drugs master
q_drugs = f"""
SELECT id as drug_id, type
from "{read_schema}".drugs d
"""
df_drug_info = rs_db.get_df(q_drugs)
df_drugs = df_drugs.merge(df_drug_info, on="drug_id", how="left")
# default ptr value for generic=35 and rest=100
df_drugs["avg_ptr"] = np.where(
(df_drugs["avg_ptr"].isna()) & (df_drugs["type"] == "generic"), 35,
df_drugs["avg_ptr"])
df_drugs["avg_ptr"] = np.where(
(df_drugs["avg_ptr"].isna()) & (df_drugs["type"] != "generic"), 100,
df_drugs["avg_ptr"])
# required format for RS wrtie
df_drugs = df_drugs[['drug_id', 'qty_sold_l2y', 'revenue_l2y',
'num_bills_l2y', 'std_qty', 'purchase_interval',
'avg_ptr', 'avg_selling_rate']]
df_drugs.columns = [c.replace('_', '-') for c in df_drugs.columns]
df_drugs['created-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_drugs['created-by'] = 'etl-automation'
df_drugs['updated-at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
df_drugs['updated-by'] = 'etl-automation'
logger.info("All calculations complete")
if debug_mode == 'N':
logger.info(f"Truncating {table_name} in {write_schema}")
truncate_query = f"""
truncate table "{write_schema}"."{table_name}"
"""
rs_db.execute(truncate_query)
logger.info(f"Truncating {table_name} in {write_schema} successful")
logger.info("Writing table to RS-DB")
s3.write_df_to_db(df=df_drugs, table_name=table_name,
db=rs_db, schema=write_schema)
logger.info("Writing table to RS-DB completed")
else:
logger.info("Writing to RS-DB skipped")
status = 'Success'
logger.info(f"Drug-Std-Info code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Drug-Std-Info code execution status: {status}")
return status
def pts_drug_interval(pd_arr):
"""Purchase interval between buying on patient-drug level
considering median interval"""
df = pd.DataFrame(pd_arr, columns=["bill_date"])
df = df.sort_values(by='bill_date', ascending=True)
df["delta"] = (df['bill_date']-df['bill_date'].shift())
df = df.dropna()
median_days = df["delta"].median().days
return median_days
def latest_20(pd_arr):
"""To consider only latest 20 patients who bought drug in more than 4 qty
objective: to reduce run time"""
pts_list = list(pd_arr)[-20:]
return pts_list
def count_unique(pd_arr):
return len(pd_arr.unique())
def mode(pd_arr):
return min(pd_arr.mode())
def median(pd_arr):
return stats.median(pd_arr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-d', '--debug_mode', default="Y", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
debug_mode = args.debug_mode
email_to = args.email_to
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
table_name = 'drug-std-info'
logger = get_logger()
rs_db = DB()
s3 = S3()
# open RS connection
rs_db.open_connection()
""" calling the main function """
status = main(debug_mode=debug_mode, rs_db=rs_db, read_schema=read_schema,
write_schema=write_schema, table_name=table_name, s3=s3,
logger=logger)
# close RS connection
rs_db.close_connection()
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
reset_date = dt.date.today().strftime("%Y-%m-%d")
email.send_email_file(
subject=f"Drug-STD-Info Update (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
Job Params: {args}
""",
to_emails=email_to)
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/drug_std_info/drug_std_info.py | drug_std_info.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime as dt
from dateutil.tz import gettz
from fractions import Fraction
sys.path.append('../../../..')
from zeno_etl_libs.utils.distributor_ranking.distributor_ranking_calc import ranking_calc_dc, ranking_calc_franchisee
from zeno_etl_libs.utils.distributor_ranking.ranking_intervention import ranking_override_dc, ranking_override_franchisee
from zeno_etl_libs.utils.distributor_ranking.postprocess_ranking import postprocess_ranking_dc, postprocess_ranking_franchisee
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB, MySQL
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
def main(debug_mode, weights_as, weights_pr, as_low_volume_cutoff,
pr_low_volume_cutoff, low_volume_cutoff_franchisee, volume_fraction,
time_interval, time_interval_franchisee, rank_override_dc_active,
rank_override_franchisee_active, db_read, db_write, read_schema,
write_schema, s3, logger):
mysql_write = MySQL(read_only=False)
logger.info(f"Debug Mode: {debug_mode}")
status = 'Failed'
reset_date = dt.date.today()
# weights format is [lead time, margin, bounce rate, ff, lost recency, success recency]
weights_as = [float(Fraction(i)) for i in list(weights_as.values())]
weights_pr = [float(Fraction(i)) for i in list(weights_pr.values())]
# define empty variable in case of code fail
dc_evaluated = []
franchisee_stores_evaluated = []
logger.info("Checking input weights")
try:
assert(sum(weights_as) == 1)
assert(sum(weights_pr) == 1)
logger.info("Weight inputs summing up to one")
except:
logger.info("Weights not summing up to one, reverting to defaults")
weights_as = [2 / 13, 1 / 13, 4 / 13, 4 / 13, 1 / 13, 1 / 13]
weights_pr = [6 / 15, 1 / 15, 3 / 15, 3 / 15, 1 / 15, 1 / 15]
try:
# calculate ranks
logger.info("Calculating Zippin DC-level Ranking")
features_rank_dc = ranking_calc_dc(
time_interval=time_interval, weights_as=weights_as,
weights_pr=weights_pr, as_low_volume_cutoff=as_low_volume_cutoff,
pr_low_volume_cutoff=pr_low_volume_cutoff,
volume_fraction=volume_fraction,
db=db_read, read_schema=read_schema, logger=logger)
logger.info("Completed Zippin DC-level Ranking")
logger.info("Calculating Franchisee Store-level Ranking")
features_rank_franchisee = ranking_calc_franchisee(
time_interval=time_interval_franchisee,
weights_as=weights_as, weights_pr=weights_pr,
low_volume_cutoff=low_volume_cutoff_franchisee,
volume_fraction=volume_fraction,
db=db_read, read_schema=read_schema, logger=logger)
logger.info("Completed Franchisee Store-level Ranking")
logger.info('Number of dc-drug_id combinations evaluated :' +
str(features_rank_dc[features_rank_dc['request_type'] == 'AS/MS'].shape[0]))
logger.info('Number of franchisee store-drug_id combinations evaluated :' +
str(features_rank_franchisee[features_rank_franchisee['request_type'] == 'AS/MS'].shape[0]))
if rank_override_dc_active == 'Y':
logger.info("Rank override DC level begins")
features_rank_dc = ranking_override_dc(
features_rank_dc, db_read, read_schema, logger,
override_type_list=['AS/MS'])
logger.info("Rank override DC level successful")
if rank_override_franchisee_active == 'Y':
logger.info("Rank override franchisee store level begins")
features_rank_franchisee = ranking_override_franchisee(
features_rank_franchisee, db_read, read_schema, logger,
override_type_list=['AS/MS', 'PR'])
logger.info("Rank override franchisee store level successful")
# postprocess features for dc level ranking
tech_input_dc_level = postprocess_ranking_dc(features_rank_dc,
volume_fraction)
# postprocess features for franchisee store level ranking
tech_input_franchisee_level = postprocess_ranking_franchisee(
features_rank_franchisee, volume_fraction)
# combine both dc-level and frachisee-level ranking
tech_input = pd.concat([tech_input_dc_level, tech_input_franchisee_level])
# combine volume fraction split for cases where total distributors < 3
volume_fraction_split = tech_input['volume_fraction'].str.split(
pat='-', expand=True).rename(
columns={0: 'volume_fraction_1',
1: 'volume_fraction_2',
2: 'volume_fraction_3'})
tech_input['volume_fraction_1'] = volume_fraction_split[
'volume_fraction_1'].astype(float)
tech_input['volume_fraction_2'] = volume_fraction_split[
'volume_fraction_2'].astype(float)
tech_input['volume_fraction_3'] = volume_fraction_split[
'volume_fraction_3'].astype(float)
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_3'].isna(),
tech_input['volume_fraction_2'] +
tech_input['volume_fraction_3'],
tech_input['volume_fraction_2'])
tech_input['volume_fraction_3'] = np.where(
tech_input['final_dist_3'].isna(), 0,
tech_input['volume_fraction_3'])
tech_input['volume_fraction_1'] = np.where(
tech_input['final_dist_2'].isna(),
tech_input['volume_fraction_1'] +
tech_input['volume_fraction_2'],
tech_input['volume_fraction_1'])
tech_input['volume_fraction_2'] = np.where(
tech_input['final_dist_2'].isna(), 0,
tech_input['volume_fraction_2'])
tech_input['volume_fraction'] = tech_input['volume_fraction_1'].astype(
'str') + '-' + tech_input['volume_fraction_2'].astype(
'str') + '-' + tech_input['volume_fraction_3'].astype('str')
tech_input = tech_input[
['dc_id', 'store_id', 'franchisee_id', 'drug_id',
'drug_type', 'request_type', 'volume_fraction',
'final_dist_1', 'final_dist_2', 'final_dist_3']]
############ adhoc changes by tech, table restructure ############
tech_input = tech_input.reset_index(
drop=True).reset_index().rename(columns={'index': 'id'})
tech_input[['volume_fraction_1', 'volume_fraction_2',
'volume_fraction_3']] = tech_input[
'volume_fraction'].str.split('-', 3, expand=True)
tech_input.loc[tech_input['request_type'] == 'AS/MS',
'request_type'] = 'manual-short/auto-short'
tech_input.loc[tech_input['request_type'] ==
'PR', 'request_type'] = 'patient-request'
volume_fraction_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['volume_fraction_1',
'volume_fraction_2',
'volume_fraction_3']).sort_values(by='id')
distributor_melt = pd.melt(tech_input, id_vars=['id'],
value_vars=['final_dist_1',
'final_dist_2',
'final_dist_3']).sort_values(by='id').rename(columns={'value': 'distributor_id'})
distributor_ranking_rule_values = pd.merge(distributor_melt,
volume_fraction_melt,
left_index=True,
right_index=True,
suffixes=('', '_y'))
distributor_ranking_rule_values = distributor_ranking_rule_values[
['id', 'distributor_id', 'value']].rename(
columns={'id': 'distributor_ranking_rule_id'}).reset_index(
drop=True)
distributor_ranking_rule_values = distributor_ranking_rule_values.reset_index().rename(columns={'index': 'id'})
# drop null values in distributor_id(for cases where historical distributors are < 3)
distributor_ranking_rule_values = distributor_ranking_rule_values[
~distributor_ranking_rule_values['distributor_id'].isna()]
# convert distributor_id in int format
distributor_ranking_rule_values['distributor_id'] = \
distributor_ranking_rule_values['distributor_id'].astype(int)
distributor_ranking_rules = tech_input[['id', 'drug_id', 'dc_id',
'franchisee_id', 'store_id',
'drug_type', 'request_type']]
# for email info
dc_evaluated = distributor_ranking_rules["dc_id"].unique().tolist()
franchisee_stores_evaluated = distributor_ranking_rules["store_id"].unique().tolist()
# adding required fields
distributor_ranking_rules['rule_start_date'] = reset_date
distributor_ranking_rules['is_active'] = 1
distributor_ranking_rules['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
distributor_ranking_rules['created_by'] = 'etl-automation'
features_rank_dc.loc[:, 'reset_date'] = reset_date
features_rank_dc['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
features_rank_dc['created_by'] = 'etl-automation'
features_rank_franchisee.loc[:, 'reset_date'] = reset_date
features_rank_franchisee['created_at'] = dt.datetime.now(
tz=gettz('Asia/Kolkata')).strftime('%Y-%m-%d %H:%M:%S')
features_rank_franchisee['created_by'] = 'etl-automation'
# formatting column names
distributor_ranking_rule_values.columns = [c.replace('_', '-') for c in distributor_ranking_rule_values.columns]
distributor_ranking_rules.columns = [c.replace('_', '-') for c in distributor_ranking_rules.columns]
features_rank_dc.columns = [c.replace('_', '-') for c in features_rank_dc.columns]
features_rank_franchisee.columns = [c.replace('_', '-') for c in features_rank_franchisee.columns]
if debug_mode == 'N':
logger.info("Writing table to RS-DB")
logger.info("Writing to table: distributor-features-dc")
s3.write_df_to_db(df=features_rank_dc,
table_name='distributor-features-dc',
db=db_write, schema=write_schema)
logger.info("Writing to table: distributor-features-franchisee")
s3.write_df_to_db(df=features_rank_franchisee,
table_name='distributor-features-franchisee',
db=db_write, schema=write_schema)
logger.info("Writing table to RS-DB completed!")
mysql_write.open_connection()
logger.info("Updating table to MySQL")
try:
index_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rules`',
con=mysql_write.connection).values[0]) + 1
redundant_increment = int(
pd.read_sql(
'select max(id) from `distributor-ranking-rule-values`',
con=mysql_write.connection).values[0]) + 1
except:
index_increment = 1
redundant_increment = 1
logger.info(f"Incremented distributor-ranking-rules by {index_increment}")
logger.info(f"Incremented distributor-ranking-rule-values by {redundant_increment}")
distributor_ranking_rules['id'] = distributor_ranking_rules['id'] + index_increment
distributor_ranking_rule_values['distributor-ranking-rule-id'] = distributor_ranking_rule_values[
'distributor-ranking-rule-id'] + index_increment
distributor_ranking_rule_values['id'] = distributor_ranking_rule_values['id'] + redundant_increment
logger.info("Setting existing rules to inactive")
mysql_write.engine.execute("UPDATE `distributor-ranking-rules` SET `is-active` = 0")
logger.info("Writing to table: distributor-ranking-rules")
distributor_ranking_rules.to_sql(
name='distributor-ranking-rules',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
logger.info("Writing to table: distributor-ranking-rule-values")
distributor_ranking_rule_values.to_sql(
name='distributor-ranking-rule-values',
con=mysql_write.engine,
if_exists='append', index=False,
method='multi', chunksize=10000)
logger.info("Updating table to MySQL completed!")
mysql_write.close()
else:
logger.info("Writing to RS-DB & MySQL skipped")
status = 'Success'
logger.info(f"Distributor Ranking code execution status: {status}")
except Exception as error:
logger.exception(error)
logger.info(f"Distributor Ranking code execution status: {status}")
return status, reset_date, dc_evaluated, franchisee_stores_evaluated
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
parser.add_argument('-et', '--email_to',
default="[email protected]", type=str,
required=False)
parser.add_argument('-d', '--debug_mode', default="Y", type=str, required=True)
parser.add_argument('-ti', '--time_interval', default=90, type=int, required=False)
parser.add_argument('-tif', '--time_interval_franchisee', default=180, type=int, required=False)
parser.add_argument('-was', '--weights_as',
default={'lead_time':'2/13', 'margin':'1/13', 'bounce_rate':'4/13','ff':'4/13', 'lost_recency':'1/13', 'success_recency':'1/13' },
type=str, required=False)
parser.add_argument('-wpr', '--weights_pr',
default={'lead_time':'6/15', 'margin':'1/15', 'bounce_rate':'3/15','ff':'3/15','lost_recency':'1/15', 'success_recency':'1/15'},
type=str, required=False)
parser.add_argument('-aslvc', '--as_low_vol_cutoff', default=0.02, type=float,
required=False)
parser.add_argument('-prlvc', '--pr_low_vol_cutoff', default=0.01, type=float,
required=False)
parser.add_argument('-lvcf', '--low_vol_cutoff_franchisee', default=0.0, type=float,
required=False)
parser.add_argument('-vf', '--vol_frac', default="0.5-0.3-0.2", type=str,
required=False)
parser.add_argument('-rodc', '--rank_override_dc', default="N", type=str,
required=False)
parser.add_argument('-rof', '--rank_override_franchisee', default="N", type=str,
required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
email_to = args.email_to
debug_mode = args.debug_mode
weights_as = args.weights_as
weights_pr = args.weights_pr
as_low_volume_cutoff = args.as_low_vol_cutoff
pr_low_volume_cutoff = args.pr_low_vol_cutoff
low_volume_cutoff_franchisee = args.low_vol_cutoff_franchisee
volume_fraction = args.vol_frac
time_interval = args.time_interval
time_interval_franchisee = args.time_interval_franchisee
rank_override_dc_active = args.rank_override_dc
rank_override_franchisee_active = args.rank_override_franchisee
logger = get_logger()
s3 = S3()
rs_db_read = DB(read_only=True)
rs_db_write = DB(read_only=False)
read_schema = 'prod2-generico'
write_schema = 'prod2-generico'
# open RS connection
rs_db_read.open_connection()
rs_db_write.open_connection()
""" calling the main function """
status, reset_date, dc_evaluated, franchisee_stores_evaluated = main(
debug_mode, weights_as, weights_pr,
as_low_volume_cutoff, pr_low_volume_cutoff, low_volume_cutoff_franchisee,
volume_fraction, time_interval, time_interval_franchisee,
rank_override_dc_active, rank_override_franchisee_active, rs_db_read,
rs_db_write, read_schema, write_schema, s3, logger)
# close RS connection
rs_db_read.close_connection()
rs_db_write.close_connection()
# SEND EMAIL ATTACHMENTS
logger.info("Sending email attachments..")
email = Email()
email.send_email_file(
subject=f"Distributor Ranking Reset (SM-{env}) {reset_date}: {status}",
mail_body=f"""
Debug Mode: {debug_mode}
DC's Evaluated: {dc_evaluated}
Franchisee Stores Evaluated: {franchisee_stores_evaluated}
Job Params: {args}
""",
to_emails=email_to)
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/distributor_ranking/distributor_ranking_main.py | distributor_ranking_main.py |
import os
import sys
import argparse
import pandas as pd
import numpy as np
import datetime
sys.path.append('../../../..')
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.logger import get_logger
from zeno_etl_libs.helper.email.email import Email
from zeno_etl_libs.helper.parameter.job_parameter import parameter
parser = argparse.ArgumentParser(
description="To send consolidated PR FF and OOS numbers.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
job_params = parameter.get_params(job_id=90)
email_to = job_params['email_to']
logger = get_logger()
logger.info("Script begins")
status = False
try:
# RS Connection
rs_db = DB()
rs_db.open_connection()
query = """
SELECT
a."patient-id" as "patient-id",
(a."month-created-at") as "month",
(a."year-created-at") as "year",
CURRENT_TIMESTAMP AS "refreshed_at",
a."store-name" AS "store-name",
a."drug-name" AS "drug-name",
case
when date_part(hour, a."created-at") <= '14' then '1stR'
else '2ndR'
end as "Round",
case
when DATE(a."invoiced-at") is null then
'Pending'
when date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 0
AND date_part(hour, a."invoiced-at") <= '21' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 0 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 1
AND date_part(hour, a."invoiced-at") <= '21' then
'ontime'
when date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 0 then
'ontime'
when date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 1
AND date_part(hour, a."invoiced-at") <= '16' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 0
AND date_part(hour, a."invoiced-at") <= '21' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."invoiced-at") <= 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 2
AND date_part(hour, a."invoiced-at") <= '16' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."invoiced-at") <= 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."invoiced-at") = 2
AND date_part(hour, a."invoiced-at") <= '16' then
'ontime'
else
'delayed' end AS "fullfilment on invoice",
Case
when DATE(a."dispatched-at") is null then
'Pending'
when date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 0 then
'ontime'
when date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 1
AND date_part(hour, a."dispatched-at") <= '10' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 0 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 2
AND date_part(hour, a."dispatched-at") <= '10' then
'ontime'
when date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 0 then
'ontime'
when date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 1
AND date_part(hour, a."dispatched-at") <= '17' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 0 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 1
AND date_part(hour, a."dispatched-at") <= '10' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") <= 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 2
AND date_part(hour, a."dispatched-at") <= '17' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."dispatched-at") <= 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."dispatched-at") = 2
AND date_part(hour, a."dispatched-at") <= '17' then
'ontime' else
'delayed' end AS "fullfilment on dispatch",
case when DATE(a."store-delivered-at") is null then
'Pending'
when date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 0 then
'ontime'
when date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 1
AND date_part(hour, a."store-delivered-at") <= '11' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 0 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 2
AND date_part(hour, a."store-delivered-at") <= '11' then
'ontime'
when date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 0 then
'ontime'
when date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 1
AND date_part(hour, a."store-delivered-at") <= '19' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 0 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 1
AND date_part(hour, a."store-delivered-at") <= '12' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") <= 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 2
AND date_part(hour, a."store-delivered-at") <= '19' then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") <= 1 then
'ontime'
when (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."store-delivered-at") = 2
AND date_part(hour, a."store-delivered-at") <= '19'then
'ontime' else
'delayed' end AS "fullfilment on delivery",
case when DATE(a."ordered-at") is not null
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 0
AND date_part(hour, a."ordered-at") <= '15' then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Saturday' , 'Sunday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 0 then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Saturday' , 'Sunday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 1
AND date_part(hour, a."ordered-at") <= '15' then
'ontime'
when DATE(a."ordered-at") is not null
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 0 then
'ontime'
when DATE(a."ordered-at") is not null
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 1
AND date_part(hour, a."ordered-at") <= '01' then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 0
AND date_part(hour, a."ordered-at") <= '15' then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 0 then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 1 then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 2
AND date_part(hour, a."ordered-at") <= '01' then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 0 then
'ontime'
when DATE(a."ordered-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."created-at", a."ordered-at") = 1
AND date_part(hour, a."ordered-at") <= '01' then
'ontime'
when DATE(a."ordered-at") is null then
'not ordered' else
'delayed' end AS "ordered timing",
case when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is null
AND date_part(hour, a."created-at") <= '14'
AND DATEDIFF(day, a."created-at", a."completed-at") = 0
AND (date_part(hour, a."completed-at")) <= '21' then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."completed-at") = 0 then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) NOT IN ('Sunday' , 'Saturday')
AND date_part(hour, a."created-at") > '23'
AND DATEDIFF(day, a."created-at", a."completed-at") = 1
AND (date_part(hour, a."completed-at")) <= '21' then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."completed-at") = 0 then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."completed-at") = 1
AND (date_part(hour, a."completed-at")) <= '16' then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND DATEDIFF(day, a."created-at", a."completed-at") = 0
AND date_part(hour, a."created-at") <= '14'
AND (date_part(hour, a."completed-at")) <= '21' then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."completed-at") <= 1 then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Saturday'
AND date_part(hour, a."created-at") > '14'
AND DATEDIFF(day, a."created-at", a."completed-at") = 2
AND (date_part(hour, a."completed-at")) <= '16' then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND (trim(' ' FROM to_char(a."created-at", 'Day'))) = 'Sunday'
AND DATEDIFF(day, a."completed-at", a."created-at") <= 1 then
'completed-early'
when DATE(a."invoiced-at") is null
AND DATE(a."completed-at") is not null
AND DATEDIFF(day, a."created-at", a."completed-at") = 2
AND (date_part(hour, a."completed-at")) <= '16' then
'completed-early'
else
'no issue' end AS "completed issues",
a."sb-status" AS "status",
a."pso-requested-quantity" AS "requested-quantity",
a."quantity" AS "quantity",
a."required-quantity" AS "required-quantity",
DATE(a."created-at") AS "created-at",
DATE(a."ordered-at") AS "ordered-at",
DATE(a."invoiced-at") AS "invoiced-at",
DATE(a."dispatched-at") AS "dispatched-at",
DATE(a."received-at") AS "received-at",
DATE(a."completed-at") AS "completed-at",
DATE(dtm."delivered-at") AS "delivered-at" ,
a."created-at" AS "created-time",
a."ordered-at" AS "ordered-time",
a."invoiced-at" AS "invoiced-time",
a."dispatched-at" AS "dispatch-time",
dtm."delivered-at" AS "delivered-time",
a."completed-at" AS "completed-time",
a."decline-reason" AS "decline reason",
a."type",
a."store-id",
a."drug-id",
a."company",
a."franchisee-short-book",
e."drug-grade",
f."name" AS "received distributor",
case when a."store-id" >= 146 then 'new' else 'old' end AS "store-type",
j."forward-dc-id",
ss."name" AS "dc_name",
a."store-delivered-at",
case when p."patient-category" !='others' then 1 else 0 end as premium_flag
FROM
"prod2-generico"."patient-requests-metadata" a
LEFT JOIN
"prod2-generico"."drug-order-info" e ON e."store-id" = a."store-id"
AND e."drug-id" = a."drug-id"
LEFT JOIN
"prod2-generico"."distributors" f ON NVL(a."ordered-distributor-id",0) = f."id"
LEFT JOIN
(SELECT
*
FROM
"prod2-generico"."store-dc-mapping"
WHERE
"drug-type" = 'ethical') j ON j."store-id" = a."store-id"
LEFT JOIN
"prod2-generico"."stores" ss ON ss."id" = j."forward-dc-id"
left join
"prod2-generico"."delivery-tracking-metadata" dtm
on dtm.id=a.id
left join
"prod2-generico"."patients" p
on a."patient-id" =p.id
WHERE
DATE(a."created-at") >= case when extract(day from current_date) >= 7 then (current_date - extract(day from current_date) + 1) else current_date - 7 end
and (a."quantity" > 0 or a."completion-type" = 'stock-transfer')
AND a."sb-status" NOT IN ('deleted', 'presaved')
"""
raw = rs_db.get_df(query)
logger.info("data pulled from RS")
pr = raw.copy()
# OOS query
oos_network_q = """
select
"closing-date",
avg("oos-count")* 100 as oos_perc_network
from
"prod2-generico"."out-of-shelf-drug-level" oosdl
where
"closing-date" >= CURRENT_DATE - interval '7 day'
and "max-set" = 'Y'
group by
"closing-date"
"""
oos_premium_q = """
select
"closing-date",
avg("oos-count")* 100 as oos_perc_premium
from
"prod2-generico"."out-of-shelf-drug-level" oosdl
where
"closing-date" >= CURRENT_DATE - interval '7 day'
and "customer-type" = 'Premium'
and "max-set" = 'Y'
group by
"closing-date"
"""
oos_net = rs_db.get_df(oos_network_q)
oos_pre = rs_db.get_df(oos_premium_q)
pr.columns = [c.replace('-', '_') for c in pr.columns]
oos_net.columns = [c.replace('-', '_') for c in oos_net.columns]
oos_pre.columns = [c.replace('-', '_') for c in oos_pre.columns]
rs_db.connection.close()
pr = pr[pr['completed issues'] == 'no issue']
pr = pr[['store_name', 'created_at', 'fullfilment on delivery',
'quantity']]
pr = pr.groupby(['store_name', 'created_at',
'fullfilment on delivery']).sum()
pr.reset_index(inplace=True)
#store level calculations
pr_pivot_store_lvl = pr.pivot_table(index=['store_name','created_at'],
values='quantity',
columns = 'fullfilment on delivery',
aggfunc='sum', fill_value=0)
pr_pivot_store_lvl['total_quantity'] = pr_pivot_store_lvl[
'ontime'] + pr_pivot_store_lvl[
'delayed'] + pr_pivot_store_lvl['Pending']
pr_pivot_store_lvl['ff%'] = pr_pivot_store_lvl[
'ontime']/pr_pivot_store_lvl['total_quantity'] * 100
pr_pivot_store_lvl['ff%'] = pr_pivot_store_lvl['ff%'].round(1)
pr_pivot_store_lvl['below_80'] = np.where(pr_pivot_store_lvl[
'ff%']<80, 1, 0)
pr_pivot_store_lvl.reset_index(inplace=True)
pr_pivot_store_lvl_80 = pr_pivot_store_lvl[['created_at', 'below_80']]
pr_pivot_store_lvl_80 = pr_pivot_store_lvl_80.groupby([
'created_at']).sum()
total_stores = pr_pivot_store_lvl.pivot_table(index='created_at',
values = 'store_name',
aggfunc='count')
total_stores.rename(columns={'store_name':'total_stores'},
inplace=True)
pr_pivot_day_lvl = pr.pivot_table(index='created_at',
values='quantity',
columns = 'fullfilment on delivery',
aggfunc='sum', fill_value=0)
pr_pivot_day_lvl['total_quantity'] = pr_pivot_day_lvl[
'ontime'] + pr_pivot_day_lvl[
'delayed'] + pr_pivot_day_lvl['Pending']
pr_pivot_day_lvl['ff%'] = pr_pivot_day_lvl[
'ontime']/pr_pivot_day_lvl['total_quantity'] * 100
pr_pivot_day_lvl['ff%'] = pr_pivot_day_lvl['ff%'].round(0)
pr_pivot_day_lvl.reset_index(inplace=True)
pr_pivot_day_lvl = pr_pivot_day_lvl[['created_at', 'ff%']]
final_pr = pr_pivot_day_lvl.merge(pr_pivot_store_lvl_80,
on='created_at', how='left')
final_pr = final_pr.merge(total_stores, on='created_at', how='left')
final_pr.reset_index(inplace=True)
final_pr = final_pr[['created_at', 'ff%', 'below_80', 'total_stores']]
ff = pd.DataFrame()
ff['created_at'] = final_pr['created_at']
ff['ff%'] = final_pr['ff%']
ff['below_80'] = final_pr['below_80']
ff['total_stores'] = final_pr['total_stores']
#OOS pre-processing
oos_net['oos_perc_network'] = oos_net['oos_perc_network'].round(2)
oos_pre['oos_perc_premium'] = oos_pre['oos_perc_premium'].round(2)
status = True
except Exception as e:
logger.info('pr_oos_automailer job failed')
logger.exception(e)
# Sending email
email = Email()
run_date = str(datetime.datetime.now().strftime("%Y-%m-%d"))
subject = "PR Day-wise Fullfillment MTD / OOS Network & Premium"
if status is True:
mail_body = f"""PR Fulfillment : \n {ff} \n\n
OOS NETWORK : \n {oos_net} \n\n
OOS Premium : \n {oos_pre}""".format(ff=ff.to_string(index=False),
oos_net=oos_net.to_string(index=False),
oos_pre=oos_pre.to_string(index=False))
else:
mail_body = f"pr_ff_automailer ({env}) unsuccessful: {datetime.datetime.now()}"
email.send_email_file(subject=subject, mail_body=mail_body, to_emails=email_to, file_uris=[])
logger.info("Script ended") | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/pr_oos_automailer/pr_oos_automailer.py | pr_oos_automailer.py |
```
# this is include zeno_etl_libs in the python search path on the run time
import sys
sys.path.append('./../..')
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
import argparse
import io
import pandas as pd
import requests
from zeno_etl_libs.config import set_env_config, EnvNames
from zeno_etl_libs.db.db import DB, RedshiftSQLAlchemy
"""
Environment: Activate the env as per the requirement
"""
# env = EnvNames.production
# env = EnvNames.staging
env = EnvNames.development
config = set_env_config(env=env)
rs_db = DB(secrets=config.secrets)
rs_db.open_connection()
rs_db_sa = RedshiftSQLAlchemy(secrets=config.secrets)
rs_db_sa.open_connection()
s3 = S3(aws_access_key_id=config.secrets['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=config.secrets['AWS_SECRET_ACCESS_KEY_ID'])
report_type = 'installs_report'
ingore_columns = ["google-play-referrer"]
def get_data():
appsflyer_uri = "https://hq.appsflyer.com/export"
app_id = 'com.zenohealth.android'
params = {
'api_token': '70b710e8-59d1-4121-9aae-f520e4d0cebf',
'from': '2022-02-01',
'to': '2022-02-01',
'timezone': 'Asia/Kolkata',
'maximum_rows': 10000,
'additional_fields': 'device_model,keyword_id,store_reinstall,deeplink_url,oaid,install_app_store,'
'contributor1_match_type,contributor2_match_type,contributor3_match_type,match_type,'
'device_category,gp_referrer,gp_click_time,gp_install_begin,amazon_aid,keyword_match_type,'
'att,conversion_type,campaign_type,is_lat'
}
url = '{}/{}/{}/v5'.format(appsflyer_uri, app_id, report_type)
payload = {}
res = requests.request("GET", url, data=payload, params=params)
if res.status_code == 200:
df = pd.read_csv(io.StringIO(res.text))
# print(df)
return df
else:
if res.status_code == 404:
print('There is a problem with the request URL. Make sure that it is correct')
else:
print('There was a problem retrieving data: ', res.text)
def check_table_exists(dbcon, tablename):
dbcur = dbcon.cursor()
dbcur.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = '{0}'
""".format(tablename.replace('\'', '\'\'')))
if dbcur.fetchone()[0] == 1:
dbcur.close()
return True
dbcur.close()
return False
def create_table_from_df(df, table_name, connection, schema="prod2-generico"):
try:
df.head(5).to_sql(
name=table_name,
con=connection,
index=False,
if_exists='fail',
schema=schema)
query = f"""truncate table "{schema}"."{table_name}"; """
connection.execute(query)
print(f"Created table: {table_name}, successfully.")
except Exception as e:
print(f"Error creating table: {e}")
df = get_data()
df.columns = [i.lower().replace(" ", "-") for i in df.columns]
table_name = "appsflyer-" + report_type.replace("_","-")
check_table_exists(dbcon=rs_db.connection, tablename=table_name)
table_info = helper.get_table_info(db=rs_db, table_name=table_name, schema='prod2-generico')
"""correcting the column order"""
df = df[table_info['column_name']]
create_table_from_df(df=df, table_name=table_name, connection=rs_db_sa.connection)
s3.write_df_to_db(df=df, table_name=table_name, db=rs_db, schema="prod2-generico")
```
| zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/appsflyer-raw-data/appsflyer-sync.ipynb | appsflyer-sync.ipynb |
import os
import sys
sys.path.append('../../../..')
import argparse
import datetime as dt
import io
import pandas as pd
import requests
from zeno_etl_libs.helper import helper
from zeno_etl_libs.helper.aws.s3 import S3
from zeno_etl_libs.db.db import DB
from zeno_etl_libs.logger import get_logger
class AppFlyerReportDataSync:
def __init__(self, report_type, rs_db, s3, created_date=None):
self.appsflyer_uri = "https://hq.appsflyer.com/export"
self.api_token = '70b710e8-59d1-4121-9aae-f520e4d0cebf'
self.report_data_df = pd.DataFrame() # starting with empty frame
self.report_type = report_type
self.ignore_columns = ["google-play-referrer"]
self.rs_db = rs_db
self.table_name = "appsflyer-" + report_type.replace("_", "-")
self.s3 = s3
self.schema = 'prod2-generico'
yesterday = dt.datetime.now() + dt.timedelta(days=-1)
""" default date filter is yesterday """
created_date = created_date if created_date else yesterday
created_date = created_date.strftime("%Y-%m-%d")
self.created_date = created_date
self.from_date = self.created_date
self.to_date = self.created_date
self.logger = get_logger()
def get_app_data(self, report_type, app_id, maximum_rows=10000):
params = {
'api_token': self.api_token,
'from': self.from_date,
'to': self.to_date,
'timezone': 'Asia/Kolkata',
'maximum_rows': maximum_rows,
'additional_fields': 'device_model,keyword_id,store_reinstall,deeplink_url,oaid,install_app_store,'
'contributor1_match_type,contributor2_match_type,contributor3_match_type,match_type,'
'device_category,gp_referrer,gp_click_time,gp_install_begin,amazon_aid,keyword_match_type,'
'att,conversion_type,campaign_type,is_lat'
}
url = '{}/{}/{}/v5'.format(self.appsflyer_uri, app_id, report_type)
payload = {}
res = requests.request("GET", url, data=payload, params=params)
if res.status_code == 200:
df = pd.read_csv(io.StringIO(res.text))
return df
else:
if res.status_code == 404:
self.logger.info('There is a problem with the request URL. Make sure that it is correct')
else:
self.logger.info('There was a problem retrieving data: {}'.format(res.text))
def get_report_data(self):
app_ids = ['com.zenohealth.android', 'id1550245162']
for app_id in app_ids:
if isinstance(self.report_data_df, type(None)) or self.report_data_df.empty:
self.report_data_df = self.get_app_data(report_type=self.report_type, app_id=app_id)
else:
self.report_data_df = self.report_data_df.append(
[self.get_app_data(report_type=self.report_type, app_id=app_id)])
self.logger.info(f"Downloaded app data, report_type: {self.report_type}, app_id: {app_id}")
if self.report_data_df is not None:
self.report_data_df.columns = [i.lower().replace(" ", "-") for i in self.report_data_df.columns]
""" dropping the unwanted columns """
i_cols = []
for i_col in self.ignore_columns:
if i_col in self.report_data_df.columns:
i_cols.append(i_col)
if i_cols:
self.report_data_df = self.report_data_df.drop(i_cols, axis=1)
self.logger.info(f"Dropped unwanted columns: {i_cols}")
self.report_data_df['created-date'] = self.created_date
return self.report_data_df
def check_table_exists(self):
cursor = self.rs_db.cursor
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = '{0}'
""".format(self.table_name.replace('\'', '\'\'')))
if cursor.fetchone()[0] == 1:
# cursor.close()
self.logger.info(f"Table, {self.table_name}, already exists: {True}")
return True
# cursor.close()
self.logger.info(f"Table, {self.table_name}, already exists: {False}")
return False
def upload_data(self):
""" get the report data """
self.get_report_data()
if self.report_data_df is None:
self.logger.info(f"No data found, report_type, {self.report_type}, created_date:{self.created_date}")
elif self.report_data_df.empty:
self.logger.info(f"No data found, report_type, {self.report_type}, created_date:{self.created_date}")
else:
""" create the report table if not exists """
if self.check_table_exists() is False:
# rs_db_engine.create_report_table_using_df(df=self.report_data_df, table_name=self.table_name,
# schema=self.schema)
raise Exception(f""" create the table first: {self.table_name} """)
else:
query = f"""
delete from "{self.schema}"."{self.table_name}" where "created-date" = '{self.created_date}';
"""
self.rs_db.execute(query)
table_info = helper.get_table_info(db=self.rs_db, table_name=self.table_name, schema=self.schema)
"""correcting the column order"""
self.report_data_df = self.report_data_df[table_info['column_name']]
self.s3.write_df_to_db(df=self.report_data_df, table_name=self.table_name, db=self.rs_db,
schema=self.schema)
self.logger.info(f"Data upload successful, report_type: {self.report_type}")
def main(rs_db, s3, from_date=None, to_date=None, reports=None):
"""
function syns app flyer data to Redshift database for one day at a time
"""
logger = get_logger()
yesterday = dt.datetime.now() + dt.timedelta(days=-1)
from_date = dt.datetime.strptime(from_date, '%Y-%m-%d') if from_date else yesterday
to_date = dt.datetime.strptime(to_date, '%Y-%m-%d') if to_date else yesterday
logger.info(f"from_date: {from_date}, to_date: {to_date}")
if to_date < from_date:
raise Exception(f"Wrong, from_date: {from_date} and to_date: {to_date} provided")
created_date = from_date
while from_date <= created_date <= to_date:
report_types = reports.split(",") if reports else ['installs_report']
for report_type in report_types:
logger.info(f"starting report_type: {report_type}, for created_date: {created_date}")
af = AppFlyerReportDataSync(report_type=report_type, rs_db=rs_db, s3=s3, created_date=created_date)
af.upload_data()
""" next day """
created_date = created_date + dt.timedelta(days=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This is ETL script.")
parser.add_argument('-e', '--env', default="dev", type=str, required=False,
help="This is env(dev, stag, prod)")
parser.add_argument('-fd', '--from_date', default=None, type=str, required=False)
parser.add_argument('-td', '--to_date', default=None, type=str, required=False)
parser.add_argument('-r', '--reports', default=None, type=str, required=False)
args, unknown = parser.parse_known_args()
env = args.env
os.environ['env'] = env
logger = get_logger()
from_date = args.from_date
to_date = args.to_date
reports = args.reports
logger.info(f"env: {env} reports: {reports}")
rs_db = DB()
rs_db.open_connection()
s3 = S3()
""" calling the main function """
main(rs_db, s3, from_date, to_date, reports)
# Closing the DB Connection
rs_db.close_connection() | zeno-etl-libs | /zeno_etl_libs-1.0.126.tar.gz/zeno_etl_libs-1.0.126/glue-jobs/src/scripts/appsflyer-raw-data/appsflyer_raw_data.py | appsflyer_raw_data.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.