Spaces:
Runtime error
Runtime error
Chandan Dwivedi
commited on
Commit
·
e35e2dc
1
Parent(s):
d336524
adding Genrative AI email genearion support
Browse files- .DS_Store +0 -0
- app.py +35 -7
- config.py +22 -0
- main_app.py +0 -15
- requirements.txt +5 -1
- utils.py +58 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app.py
CHANGED
@@ -7,6 +7,8 @@ from io import StringIO
|
|
7 |
import boto3
|
8 |
from urlextract import URLExtract
|
9 |
import time
|
|
|
|
|
10 |
# from joblib import dump, load
|
11 |
|
12 |
import joblib
|
@@ -18,7 +20,6 @@ import os
|
|
18 |
#from ipyfilechooser import FileChooser
|
19 |
|
20 |
#from IPython.display import display
|
21 |
-
from io import BytesIO
|
22 |
from bs4 import BeautifulSoup
|
23 |
import matplotlib.pyplot as plt
|
24 |
import numpy as np
|
@@ -380,7 +381,7 @@ if st.button('Generate Predictions'):
|
|
380 |
print(
|
381 |
"Sorry, Current model couldn't provide predictions on the target variable you selected.")
|
382 |
else:
|
383 |
-
st.markdown('
|
384 |
character_cnt), unsafe_allow_html=True)
|
385 |
# st.info('The model predicts that it achieves a {} of {}%'.format(target, str(round(output_rate*100,2))))
|
386 |
if target == 'conversion_rate':
|
@@ -388,7 +389,7 @@ if st.button('Generate Predictions'):
|
|
388 |
else:
|
389 |
target_vis = 'Open_Rate'
|
390 |
|
391 |
-
st.markdown('
|
392 |
target_vis, str(round(output_rate*100, 3))), unsafe_allow_html=True)
|
393 |
selected_industry_code = industry_code_dict.get(industry)
|
394 |
|
@@ -440,10 +441,10 @@ if st.button('Generate Predictions'):
|
|
440 |
if selected_variable == "Click_Through_Rate":
|
441 |
selected_variable = "Conversion_Rate"
|
442 |
|
443 |
-
st.markdown('
|
444 |
selected_variable), unsafe_allow_html=True)
|
445 |
if len(df_reco_opt_rank) == 0:
|
446 |
-
st.markdown('
|
447 |
selected_variable), unsafe_allow_html=True)
|
448 |
else:
|
449 |
#for _, row in df_reco_opt_rank.iterrows():
|
@@ -461,8 +462,13 @@ if st.button('Generate Predictions'):
|
|
461 |
selected_variable_number = row[3]
|
462 |
chars.append(int(Character_Count))
|
463 |
sel_var_values.append(round(selected_variable_number, 3)*100)
|
464 |
-
st.write(f"·Number of Characters: {int(Character_Count)}, Target Rate: {round(round(selected_variable_number, 3)*100, 3)}", "%")
|
465 |
st.write("\n")
|
|
|
|
|
|
|
|
|
|
|
466 |
|
467 |
if len(chars) > 1:
|
468 |
#fig = plt.figure()
|
@@ -473,7 +479,7 @@ if st.button('Generate Predictions'):
|
|
473 |
#ax.bar_label(bars)
|
474 |
|
475 |
ax.set_yticks(np.arange(len(chars)))
|
476 |
-
ax.set_yticklabels(
|
477 |
ax.set_title('Character Counts vs. Target Variable Rates', fontsize=18)
|
478 |
ax.set_ylabel('Character Counts', fontsize=16)
|
479 |
ax.set_xlabel('Target Rates %', fontsize=16)
|
@@ -490,7 +496,29 @@ if st.button('Generate Predictions'):
|
|
490 |
st.plotly_chart(fig, use_container_width=True)
|
491 |
|
492 |
st.write("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
493 |
#st.write(np.array(chars))
|
|
|
|
|
494 |
chars_out = dict(zip(chars, sel_var_values))
|
495 |
sorted_chars_out = sorted(chars_out.items(), key=lambda x: x[1], reverse=True)
|
496 |
|
|
|
7 |
import boto3
|
8 |
from urlextract import URLExtract
|
9 |
import time
|
10 |
+
from utils import *
|
11 |
+
|
12 |
# from joblib import dump, load
|
13 |
|
14 |
import joblib
|
|
|
20 |
#from ipyfilechooser import FileChooser
|
21 |
|
22 |
#from IPython.display import display
|
|
|
23 |
from bs4 import BeautifulSoup
|
24 |
import matplotlib.pyplot as plt
|
25 |
import numpy as np
|
|
|
381 |
print(
|
382 |
"Sorry, Current model couldn't provide predictions on the target variable you selected.")
|
383 |
else:
|
384 |
+
st.markdown('##### Current Character Count in Your Email is: <span style="color:yellow">{}</span>'.format(
|
385 |
character_cnt), unsafe_allow_html=True)
|
386 |
# st.info('The model predicts that it achieves a {} of {}%'.format(target, str(round(output_rate*100,2))))
|
387 |
if target == 'conversion_rate':
|
|
|
389 |
else:
|
390 |
target_vis = 'Open_Rate'
|
391 |
|
392 |
+
st.markdown('##### The model predicts that it achieves a <span style="color:yellow">{}</span> of <span style="color:yellow">{}</span>%'.format(
|
393 |
target_vis, str(round(output_rate*100, 3))), unsafe_allow_html=True)
|
394 |
selected_industry_code = industry_code_dict.get(industry)
|
395 |
|
|
|
441 |
if selected_variable == "Click_Through_Rate":
|
442 |
selected_variable = "Conversion_Rate"
|
443 |
|
444 |
+
st.markdown('##### To get higher, <span style="color:yellow">{}</span>, the model recommends the following options:'.format(
|
445 |
selected_variable), unsafe_allow_html=True)
|
446 |
if len(df_reco_opt_rank) == 0:
|
447 |
+
st.markdown('##### You ve already achieved the highest, <span style="color:yellow">{}</span>, with the current character count!'.format(
|
448 |
selected_variable), unsafe_allow_html=True)
|
449 |
else:
|
450 |
#for _, row in df_reco_opt_rank.iterrows():
|
|
|
462 |
selected_variable_number = row[3]
|
463 |
chars.append(int(Character_Count))
|
464 |
sel_var_values.append(round(selected_variable_number, 3)*100)
|
465 |
+
# st.write(f"·Number of Characters: {int(Character_Count)}, Target Rate: {round(round(selected_variable_number, 3)*100, 3)}", "%")
|
466 |
st.write("\n")
|
467 |
+
df_modelpred=pd.DataFrame(list(zip(chars, sel_var_values)), columns=["Number of Characters", "Target_Rate"])
|
468 |
+
# st.checkbox("Use container width", value=False, key="use_container_width")
|
469 |
+
# st.dataframe(df_modelpred.style.highlight_max(axis=0), use_container_width=st.session_state.use_container_width)
|
470 |
+
df_modelpred.sort_values(by='Target_Rate', ascending=False, inplace = True)
|
471 |
+
st.dataframe(df_modelpred)
|
472 |
|
473 |
if len(chars) > 1:
|
474 |
#fig = plt.figure()
|
|
|
479 |
#ax.bar_label(bars)
|
480 |
|
481 |
ax.set_yticks(np.arange(len(chars)))
|
482 |
+
ax.set_yticklabels(tuple(chars), fontsize=14)
|
483 |
ax.set_title('Character Counts vs. Target Variable Rates', fontsize=18)
|
484 |
ax.set_ylabel('Character Counts', fontsize=16)
|
485 |
ax.set_xlabel('Target Rates %', fontsize=16)
|
|
|
496 |
st.plotly_chart(fig, use_container_width=True)
|
497 |
|
498 |
st.write("\n")
|
499 |
+
chars_out = dict(zip(chars, sel_var_values))
|
500 |
+
sorted_chars_out = sorted(chars_out.items(), key=lambda x: x[1], reverse=True)
|
501 |
+
prefrence_variables=res=["charcter counts: "+str(x)+", Target Rate: "+str(y) for x,y in zip(chars,sel_var_values)]
|
502 |
+
preference = st.selectbox(
|
503 |
+
'Please select your preferences',
|
504 |
+
prefrence_variables,
|
505 |
+
index=1
|
506 |
+
)
|
507 |
+
if st.button('Generate AI Recommended Email'):
|
508 |
+
if(preference is None):
|
509 |
+
st.error('Please upload a email (HTML format)')
|
510 |
+
else:
|
511 |
+
ai_generated_email=generate_example_email_with_context(email_body, campaign, industry, target, sorted_chars_out, preference)
|
512 |
+
st.markdown('##### Here is the recommended Generated Email for you:')
|
513 |
+
st.markdown('####### {}:'.format(ai_generated_email),unsafe_allow_html=True)
|
514 |
+
preference= "charcter counts: "+str(573)+", Target Rate: "+str(37.2)
|
515 |
+
ai_generated_email=generate_example_email_with_context(email_body, campaign, industry, target, sorted_chars_out, preference)
|
516 |
+
print("ai_generated_email: ",ai_generated_email)
|
517 |
+
st.markdown('##### Here is the recommended Generated Email for you:')
|
518 |
+
st.markdown('####### {}'.format(ai_generated_email),unsafe_allow_html=True)
|
519 |
#st.write(np.array(chars))
|
520 |
+
|
521 |
+
|
522 |
chars_out = dict(zip(chars, sel_var_values))
|
523 |
sorted_chars_out = sorted(chars_out.items(), key=lambda x: x[1], reverse=True)
|
524 |
|
config.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Configurations
|
3 |
+
'''
|
4 |
+
import os
|
5 |
+
import argparse
|
6 |
+
|
7 |
+
class Config(object):
|
8 |
+
OPEN_API_KEY='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'
|
9 |
+
OPENAI_MODEL_TYPE='text-davinci-003'
|
10 |
+
OPENAI_MODEL_TEMP=0.3
|
11 |
+
MODELCC_DATA_S3PATH='s3://emailcampaigntrainingdata/modelCC'
|
12 |
+
TRAINING_DATA_S3PATH='s3://emailcampaigntrainingdata/trainingdata'
|
13 |
+
MODEL_FILE_NAME='modelCC.sav'
|
14 |
+
MODEL_ALLOCATION_PREFIX='sagemakermodelcc'
|
15 |
+
MODEL_BUCKET_NAME='sagemakermodelcc'
|
16 |
+
DATASET_TRAINING='email_dataset_training'
|
17 |
+
DATASET_TRAINING_RAW='email_dataset_training_raw'
|
18 |
+
MODELCC_TRAINING_DATA='training.csv'
|
19 |
+
MODELCC_TEST_DATA='Xtest.csv'
|
20 |
+
MODELCC_TEST_LABEL='ytest.csv'
|
21 |
+
|
22 |
+
config=Config()
|
main_app.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
|
3 |
-
st.set_page_config(layout="wide")
|
4 |
-
|
5 |
-
st.markdown(
|
6 |
-
"""
|
7 |
-
<style>
|
8 |
-
body {
|
9 |
-
background-image: linear-gradient(#2e7bcf,#2e7bcf);
|
10 |
-
color: white;
|
11 |
-
}
|
12 |
-
</style>
|
13 |
-
""",
|
14 |
-
unsafe_allow_html=True,
|
15 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
altair<5
|
2 |
pickle5
|
3 |
numpy
|
@@ -11,4 +14,5 @@ urlextract
|
|
11 |
bs4
|
12 |
matplotlib
|
13 |
plotly
|
14 |
-
streamlit==1.25.0
|
|
|
|
1 |
+
# transformers
|
2 |
+
# torch
|
3 |
+
streamlit
|
4 |
altair<5
|
5 |
pickle5
|
6 |
numpy
|
|
|
14 |
bs4
|
15 |
matplotlib
|
16 |
plotly
|
17 |
+
streamlit==1.25.0
|
18 |
+
openai==0.8.0
|
utils.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
from io import BytesIO
|
3 |
+
from config import config
|
4 |
+
|
5 |
+
openai.api_key = config.OPEN_API_KEY
|
6 |
+
|
7 |
+
|
8 |
+
def ask_chat_gpt(prompt, model=config.OPENAI_MODEL_TYPE, temp=0, max_tokens=500):
|
9 |
+
response = openai.Completion.create(
|
10 |
+
engine=model,
|
11 |
+
prompt=prompt,
|
12 |
+
max_tokens=max_tokens,
|
13 |
+
stop=None,
|
14 |
+
temperature=temp,
|
15 |
+
)
|
16 |
+
message = response.choices[0].text
|
17 |
+
return message.strip()
|
18 |
+
|
19 |
+
def chat_gpt_user_input_loop():
|
20 |
+
prompt = "Ask me anything on regarding email optimization. "
|
21 |
+
user_input = input(prompt)
|
22 |
+
response = ask_chat_gpt(prompt + user_input)
|
23 |
+
chat_gpt_user_input_loop()
|
24 |
+
|
25 |
+
|
26 |
+
def generate_example_email_with_context(email_body, selected_campaign_type, selected_industry, selected_variable, chars_out, dropdown_cc):
|
27 |
+
if len(chars_out) == 1:
|
28 |
+
if str(chars_out[0][0]) in dropdown_cc:
|
29 |
+
generate_email_prompt = "Rewrite this email keeping relevant information (people, date, location): " + email_body + "." "Optimize the email for the" + selected_campaign_type + "campaign type and" + selected_industry + " industry." + "The email body should be around" + str(chars_out[0][0]+200) + "characters in length."
|
30 |
+
generate_email_response = ask_chat_gpt(generate_email_prompt, temp=config.OPENAI_MODEL_TEMP, max_tokens=chars_out[0][0] + 200)
|
31 |
+
return generate_email_response
|
32 |
+
|
33 |
+
if len(chars_out) == 2:
|
34 |
+
if str(chars_out[0][0]) in dropdown_cc:
|
35 |
+
generate_email_prompt = "Rewrite this email keeping relevant information (people, date, location): " + email_body + "." "Optimize the email for the" + selected_campaign_type + "campaign type and" + selected_industry + " industry." + "The email body should be around" + str(chars_out[0][0]+200) + "characters in length."
|
36 |
+
generate_email_response = ask_chat_gpt(generate_email_prompt, temp=config.OPENAI_MODEL_TEMP, max_tokens=chars_out[0][0] + 200)
|
37 |
+
return generate_email_response
|
38 |
+
|
39 |
+
if str(chars_out[1][0]) in dropdown_cc:
|
40 |
+
generate_email_prompt = "Rewrite this email keeping relevant information (people, date, location): " + email_body + "." "Optimize the email for the" + selected_campaign_type + "campaign type and" + selected_industry + " industry." + "The email body should be around" + str(chars_out[1][0]+200) + "characters in length." + "Add more information and description as needed."
|
41 |
+
generate_email_response = ask_chat_gpt(generate_email_prompt, temp=config.OPENAI_MODEL_TEMP, max_tokens=chars_out[1][0] + 200)
|
42 |
+
return generate_email_response
|
43 |
+
|
44 |
+
if len(chars_out) == 3:
|
45 |
+
if str(chars_out[0][0]) in dropdown_cc:
|
46 |
+
generate_email_prompt = "Rewrite this email keeping relevant information (people, date, location): " + email_body + "." "Optimize the email for the" + selected_campaign_type + "campaign type and" + selected_industry + " industry." + "The email body should be around" + str(chars_out[0][0]+200) + "characters in length."
|
47 |
+
generate_email_response = ask_chat_gpt(generate_email_prompt, temp=config.OPENAI_MODEL_TEMP, max_tokens=chars_out[0][0] + 200)
|
48 |
+
return generate_email_response
|
49 |
+
|
50 |
+
if str(chars_out[1][0]) in dropdown_cc:
|
51 |
+
generate_email_prompt = "Rewrite this email keeping relevant information (people, date, location): " + email_body + "." "Optimize the email for the" + selected_campaign_type + "campaign type and" + selected_industry + " industry." + "The email body should be around" + str(chars_out[1][0]+200) + "characters in length." + "Add more information and description as needed."
|
52 |
+
generate_email_response = ask_chat_gpt(generate_email_prompt, temp=config.OPENAI_MODEL_TEMP, max_tokens=chars_out[1][0] + 200)
|
53 |
+
return generate_email_response
|
54 |
+
|
55 |
+
if str(chars_out[2][0]) in dropdown_cc:
|
56 |
+
generate_email_prompt = "Rewrite this email keeping relevant information (people, date, location): " + email_body + "." "Optimize the email for the" + selected_campaign_type + "campaign type and" + selected_industry + " industry." + "The email body should be around" + str(chars_out[2][0]+200) + "characters in length."
|
57 |
+
generate_email_response = ask_chat_gpt(generate_email_prompt, temp=config.OPENAI_MODEL_TEMP, max_tokens=chars_out[2][0] + 200)
|
58 |
+
return generate_email_response
|