Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
import pandas as pd | |
import requests | |
import datetime | |
import plotly.express as px | |
cold_host = os.getenv("backend_cold") | |
hook_host = os.getenv("hook_host") # Corrected here | |
rengagment_host = os.getenv("rengagement_host") | |
CompanyBackendService=cold_host+'/receive_companies/' | |
UserBackendService=cold_host+'/receive_users/' | |
RengagementBackendService=rengagment_host+'/query/' | |
HookBackendService=hook_host+'/query/' | |
def add_https_to_urls(df, column_name): | |
""" | |
Adds 'https://' to URLs in the specified column of a DataFrame if they don't already start with a valid protocol. | |
Corrects URLs starting with 'http:/' or 'https:/'. | |
Handles missing values, trims whitespace, and is case-insensitive. | |
Parameters: | |
df (pandas.DataFrame): The DataFrame containing the URLs. | |
column_name (str): The name of the column with URLs. | |
""" | |
# Define a helper function to add or correct protocols | |
def correct_protocol(url): | |
if pd.isna(url) or url.strip() == '': | |
return url # Return as is if the URL is NaN or empty | |
url = url.strip() # Trim whitespace | |
lower_url = url.lower() | |
if lower_url.startswith('http:/') and not lower_url.startswith('http://'): | |
return 'http://' + url[6:] | |
elif lower_url.startswith('https:/') and not lower_url.startswith('https://'): | |
return 'https://' + url[7:] | |
elif not lower_url.startswith(('http://', 'https://')): | |
return 'https://' + url | |
return url | |
# Apply the helper function to the specified column | |
df[column_name] = df[column_name].apply(correct_protocol) | |
return df | |
def CompanySpecificClient(email_receiver): | |
input_data_companies = None | |
submitted_companies = False | |
uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="CompanyUploader") | |
opt_out_scraping = st.checkbox("Opt out of scraping",key="CompanyScraper") | |
with st.form(key='Comapny_form'): | |
if uploaded_file is not None: | |
try: | |
# Detect file type and read accordingly | |
file_type = uploaded_file.name.split('.')[-1] | |
if file_type == 'csv': | |
df = pd.read_csv(uploaded_file) | |
elif file_type == 'xlsx': | |
df = pd.read_excel(uploaded_file) | |
# Check if 'Website' column exists | |
if 'Website' not in df.columns: | |
all_columns = df.columns.tolist() | |
website_column = st.selectbox("Select the column for Website:", all_columns,key="CompanyWebsite") | |
else: | |
website_column = 'Website' | |
# Check if 'Company Name for Emails' column exists | |
if 'Company Name for Emails' not in df.columns: | |
all_columns = df.columns.tolist() | |
company_column= st.selectbox("Select the column for Company Name for Emails:", all_columns,key="CompanyName") | |
else: | |
company_column = 'Company Name for Emails' | |
if 'Apollo description' not in df.columns: | |
all_columns = df.columns.tolist() | |
Apollo_description= st.selectbox("Select the column for Apollo description:", all_columns,key="ApolloDescription") | |
else: | |
Apollo_description = 'Apollo description' | |
if opt_out_scraping: | |
if 'Company Description' not in df.columns: | |
all_columns = df.columns.tolist() | |
description_column = st.selectbox("Select the column for Description:", all_columns,key="CompanyDescription") | |
df.rename(columns={description_column: 'scraped_content'}, inplace=True) | |
else: | |
df.rename(columns={'Company Description': 'scraped_content'}, inplace=True) | |
input_data_companies = df | |
except Exception as E : | |
st.error("An error occured while processing the file") | |
prompt_notes= st.text_input("If applicable please mention the network name",key="CompanyPromptNotes") | |
# If the button is clicked, it will return True for this run | |
button_clicked = st.form_submit_button("Submit for processing") | |
# 2. Update session state for the button | |
if button_clicked: | |
submitted_companies = True | |
# Set the session state to the new value | |
prompt_notes = prompt_notes | |
# 3. Use the session state variable to determine if the button was previously clicked | |
if submitted_companies and input_data_companies is not None: | |
df = input_data_companies | |
if not opt_out_scraping: | |
df[website_column] = df[website_column].astype(str) | |
df=df[[website_column,company_column,Apollo_description]] | |
df.columns = ["Website","Company Name for Emails","Apollo description"] | |
df = df.drop_duplicates(subset="Website", keep='first') | |
df = df.dropna().loc[~(df == '').all(axis=1)] | |
else: | |
df[website_column] = df[website_column].astype(str) | |
df=df[[website_column,company_column,Apollo_description,"scraped_content"]] | |
df.columns = ["Website","Company Name for Emails","Apollo description","scraped_content"] | |
df = df.drop_duplicates(subset="Website", keep='first') | |
df = df.dropna().loc[~(df == '').all(axis=1)] | |
df = df.dropna(how='all') | |
df = df.loc[~(df == '').all(axis=1)] | |
df=add_https_to_urls(df, 'Website') | |
st.write(df) | |
# Convert DataFrame to CSV for transmission | |
csv = df.to_csv(index=False) | |
# Construct the data to send | |
data_to_send = {"prompt_notes": prompt_notes, "dataframe": csv,"email_receiver":email_receiver,"filename": uploaded_file.name} | |
# Sending the POST request to FastAPI | |
response = requests.post(CompanyBackendService, json=data_to_send) | |
if response.status_code == 200: | |
st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") | |
else: | |
st.error("Data transmission failed. Please verify that your file contains the labels 'Company Website' and 'Company Name'. Additionally, ensure that your file is valid and contains records and try again , if the problem persists please contact us at [email protected]") | |
return None | |
def UserSpecificClient(email_receiver): | |
input_data=None | |
submitted=None | |
column_selections = {} | |
uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="UserUploader") | |
opt_out_scraping = st.checkbox("Opt out of scraping",key="userSraping") | |
with st.form(key='User_Form'): | |
if uploaded_file is not None: | |
try: | |
# Detect file type and read accordingly | |
file_type = uploaded_file.name.split('.')[-1] | |
if file_type == 'csv': | |
try: | |
df = pd.read_csv(uploaded_file) | |
except: | |
df = pd.read_csv(uploaded_file, encoding='ISO-8859-1') | |
# Check if 'Person Linkedin Url' column exists | |
required_essential_columns = ['First Name','Company Name for Emails','Email'] | |
missing_essential_columns = [col for col in required_essential_columns if col not in df.columns] | |
required_scraping_columns=['Title','Website','Last Name','Person Linkedin Url'] | |
missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns] | |
for col in missing_essential_columns: | |
all_columns = df.columns.tolist() | |
selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col) | |
column_selections[col] = selected_column | |
# Generate selectboxes for missing scraping columns if not opting out | |
if not opt_out_scraping: | |
for col in missing_scraping_columns: | |
all_columns = df.columns.tolist() | |
selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col) | |
column_selections[col] = selected_column | |
# Process the column renaming based on the selections | |
for col, selected_column in column_selections.items(): | |
df.rename(columns={selected_column: col}, inplace=True) | |
if opt_out_scraping: | |
if 'User Description' not in df.columns: | |
all_columns = df.columns.tolist() | |
description_column = st.selectbox("Select the column for Description:", all_columns,key="userdescription") | |
df.rename(columns={description_column: 'Scrapped Profile'}, inplace=True) | |
else: | |
df.rename(columns={'User Description': 'Scrapped Profile'}, inplace=True) | |
# Check if "Person Linkedin Url" is in the DataFrame | |
if 'Person Linkedin Url' not in df.columns: | |
# Use the DataFrame index to generate a unique value for each row | |
# You can adjust this to create a more complex identifier | |
df['Person Linkedin Url'] = 'LI_' + df.index.astype(str) | |
input_data = df | |
except Exception as E: | |
st.write(E) | |
st.error("An error occurred while processing the file") | |
# If the button is clicked, it will return True for this run | |
button_clicked = st.form_submit_button("Submit") | |
# Update session state for the button | |
if button_clicked: | |
submitted = True | |
# Use the session state variable to determine if the button was previously clicked | |
if submitted and input_data is not None: | |
df = input_data | |
df = df.drop_duplicates(subset="Person Linkedin Url", keep='first') | |
if opt_out_scraping: | |
df=df[['First Name','Company Name for Emails','Person Linkedin Url','Scrapped Profile','Email']] | |
else: | |
df=df[['First Name', 'Last Name', 'Title', 'Website','Company Name for Emails','Person Linkedin Url','Email']] | |
df=add_https_to_urls(df, 'Website') | |
# Convert DataFrame to CSV for transmission | |
df = df.dropna(how='all') | |
df = df.loc[~(df == '').all(axis=1)] | |
st.write(df) | |
csv = df.to_csv(index=False) | |
# Construct the data to send | |
data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"email_template":"False","filename": uploaded_file.name} | |
# Sending the POST request to FastAPI | |
response = requests.post(UserBackendService, json=data_to_send) | |
if response.status_code == 200: | |
st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") | |
else: | |
st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at [email protected]") | |
def hooks(email_address): | |
account_stages = [ | |
"Pre-Sales", | |
"Pre-Sales (Unresponsive, After Call)", | |
"Pre-Sales (Long-Term/ Cold)", | |
"Sales Opportunity", | |
"Closed Lost (Opportunity)", | |
"Current Client", | |
"Pre-Sales (Short-Term/ Hot)", | |
"Pre-Sales (Mid-Term/ Warm)", | |
"Project Cancelled" | |
] | |
query_types = ["blog", "announcement", "AI_trend"] | |
# Get the current number of queries from query params | |
if 'num_queries' not in st.session_state: | |
st.session_state.num_queries = 1 | |
# Creating a form | |
with st.form(key='blog2lead_form'): | |
selected_stages = st.multiselect("**Select Account Stages (optional)**", options=account_stages,key="Account stages multi-select") | |
queries = {} | |
query_types_selected = [] | |
# Add query fields based on the current number of queries | |
for i in range(st.session_state.num_queries): | |
cols = st.columns([3, 1]) # Adjust the width ratio here | |
with cols[0]: | |
query_label = ["first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eighth", "ninth", "tenth"][i] | |
query = st.text_input(f"**Enter your {query_label} hook**", key=f"query_{i}", help="you can enter your hook directly or a url") | |
with cols[1]: | |
query_type = st.selectbox(f"**Select {query_label} hook type**", query_types, key=f"type_{i}") | |
if query.strip(): | |
queries[query] = query_type | |
# Button to add more query fields | |
add_query = st.form_submit_button(label='Add another query') | |
submit_button = st.form_submit_button(label='Submit') | |
if add_query: | |
st.session_state.num_queries += 1 | |
if submit_button: | |
if queries and email_address: | |
# Define your data payload to send | |
queries = {k: v for k, v in queries.items() if k and v} | |
data_to_send = { | |
"queries": queries, | |
"email_receiver": email_address, | |
} | |
# Add the filter to the payload only if selected_stages is not empty | |
if selected_stages: | |
data_to_send["filter"] = { | |
"Account Stage": {"$in": selected_stages} | |
} | |
# Sending the POST request to FastAPI | |
response = requests.post(HookBackendService, json=data_to_send) | |
# Handling the response | |
if response.status_code == 200: | |
st.info("Your request has been processed successfully.") | |
else: | |
st.error("Data transmission failed. Please try again later.") | |
else: | |
st.error("Please fill out all fields.") | |
def RengagmentEmail(email_receiver): | |
input_data_emails = None | |
submitted_emails = False | |
uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the required columns", type=["csv"], key="Re-engagment email file uploader") | |
with st.form(key='Email_form'): | |
if uploaded_file is not None: | |
try: | |
# Detect file type and read accordingly | |
file_type = uploaded_file.name.split('.')[-1] | |
if file_type == 'csv': | |
df = pd.read_csv(uploaded_file) | |
elif file_type == 'xlsx': | |
df = pd.read_excel(uploaded_file) | |
# Check if required columns exist | |
required_columns = ['To Email', 'Subject', 'Body HTML', 'Reply Message', 'To Company', 'website'] | |
missing_columns = [col for col in required_columns if col not in df.columns] | |
if missing_columns: | |
st.error(f"Missing columns: {', '.join(missing_columns)}") | |
else: | |
input_data_emails = df | |
except Exception as E: | |
st.error("An error occurred while processing the file") | |
# If the button is clicked, it will return True for this run | |
button_clicked = st.form_submit_button("Submit for processing") | |
# Update session state for the button | |
if button_clicked: | |
submitted_emails = True | |
# Use the session state variable to determine if the button was previously clicked | |
if submitted_emails and input_data_emails is not None: | |
df = input_data_emails | |
df = df.dropna(how='all') | |
df = df.loc[~(df == '').all(axis=1)] | |
st.write(df) | |
# Convert DataFrame to CSV for transmission | |
csv = df.to_csv(index=False) | |
# Construct the data to send | |
data_to_send = {"dataframe": csv, "email_receiver": email_receiver, "filename": uploaded_file.name} | |
# Sending the POST request to FastAPI | |
response = requests.post(RengagementBackendService, json=data_to_send) | |
if response.status_code == 200: | |
st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") | |
else: | |
st.error("Data transmission failed. Please verify that your file contains the required columns and try again. If the problem persists, please contact us.") | |
return None | |
def fetch_analytics_data(host, start_of_last_week, end_of_last_week, start_of_this_week, end_of_this_week): | |
""" | |
Fetches analytics data from the specified host, processes it, and aggregates by week. | |
Parameters: | |
host (str): The host URL to query. | |
start_of_last_week (datetime): The start date of last week. | |
end_of_last_week (datetime): The end date of last week. | |
start_of_this_week (datetime): The start date of this week. | |
end_of_this_week (datetime): The end date of this week. | |
Returns: | |
pd.DataFrame: The processed and aggregated analytics data for this week and last week. | |
""" | |
endpoint = f"{host}/analytics/" | |
response = requests.get(endpoint) | |
if response.status_code != 200: | |
return pd.DataFrame() | |
data = response.json() | |
# Process the fetched data | |
records = [] | |
for email_receiver, timestamps in data.items(): | |
for timestamp, count in timestamps.items(): | |
records.append({ | |
'email_receiver': email_receiver, | |
'timestamp': pd.to_datetime(timestamp), | |
'Total Companies/Executives engaged': count | |
}) | |
df = pd.DataFrame(records) | |
if df.empty: | |
return df | |
# Convert timestamp to datetime.date | |
df['day'] = df['timestamp'].dt.date | |
# Filter by date range | |
this_week_df = df[(df['day'] >= start_of_this_week) & (df['day'] <= end_of_this_week)] | |
this_week_df = this_week_df.groupby('email_receiver').agg({'Total Companies/Executives engaged': 'sum'}).reset_index() | |
last_week_df = df[(df['day'] >= start_of_last_week) & (df['day'] <= end_of_last_week)] | |
last_week_df = last_week_df.groupby('email_receiver').agg({'Total Companies/Executives engaged': 'sum'}).reset_index() | |
# Combine this week and last week data | |
this_week_df['Week'] = 'This Week' | |
last_week_df['Week'] = 'Last Week' | |
combined_df = pd.concat([this_week_df, last_week_df]) | |
return combined_df | |
def display_analytics(): | |
""" | |
Displays the analytics data with a time filter and plots. | |
""" | |
cold_host = os.getenv("backend_cold") | |
rengagement_host = os.getenv("rengagement_host") | |
hook_host = os.getenv("hook_host") | |
# Calculate the start and end of this week and last week | |
today = datetime.date.today() | |
start_of_this_week = today - datetime.timedelta(days=today.weekday()) | |
end_of_this_week = start_of_this_week + datetime.timedelta(days=6) | |
start_of_last_week = start_of_this_week - datetime.timedelta(days=7) | |
end_of_last_week = start_of_last_week + datetime.timedelta(days=6) | |
cold_df = fetch_analytics_data(cold_host, start_of_last_week, end_of_last_week, start_of_this_week, end_of_this_week) | |
rengagement_df = fetch_analytics_data(rengagement_host, start_of_last_week, end_of_last_week, start_of_this_week, end_of_this_week) | |
hook_df = fetch_analytics_data(hook_host, start_of_last_week, end_of_last_week, start_of_this_week, end_of_this_week) | |
# Calculate the total count of all Total Companies/Executives engaged | |
total_engaged = (cold_df['Total Companies/Executives engaged'].sum() if not cold_df.empty else 0) + \ | |
(rengagement_df['Total Companies/Executives engaged'].sum() if not rengagement_df.empty else 0) + \ | |
(hook_df['Total Companies/Executives engaged'].sum() if not hook_df.empty else 0) | |
# Plotting the data | |
def plot_data(df, title): | |
if df.empty: | |
st.warning(f"No data available for {title}.") | |
return | |
fig = px.bar(df, x='email_receiver', y='Total Companies/Executives engaged', color='Week', barmode='group', | |
title=title, labels={'email_receiver': 'Email Receiver', 'Total Companies/Executives engaged': 'Total Companies/Executives Engaged'}) | |
st.plotly_chart(fig) | |
st.html(f'<h4 class="hero-subtitle">In two weeks span, the SalesIntel has helped in generating {total_engaged:,} personalized emails so far</h4>') | |
col1, col2 = st.columns(2) | |
with col1: | |
plot_data(cold_df, 'Cold Emails') | |
with col2: | |
plot_data(hook_df, 'Re-engagement Hooks "Hook2Lead"') | |
plot_data(rengagement_df, 'Re-engagement Emails') |