#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 1 11:20:18 2024 @author: mohanadafiffy """ import os import streamlit as st import pandas as pd import requests import os host=os.getenv("backend") CompanyBackendService=host+'/receive_companies/' UserBackendService=host+'/receive_users/' BothFeaturesService=host+'/receive_data/' NGOEmailsService=host+'/receive_ngo_emails/' IndustryEmailService=host+'/receive_industry_email/' def add_https_to_urls(df, column_name): """ Adds 'https://' to URLs in the specified column of a DataFrame if they don't already start with a valid protocol. Corrects URLs starting with 'http:/' or 'https:/'. Handles missing values, trims whitespace, and is case-insensitive. Parameters: df (pandas.DataFrame): The DataFrame containing the URLs. column_name (str): The name of the column with URLs. """ # Define a helper function to add or correct protocols def correct_protocol(url): if pd.isna(url) or url.strip() == '': return url # Return as is if the URL is NaN or empty url = url.strip() # Trim whitespace lower_url = url.lower() if lower_url.startswith('http:/') and not lower_url.startswith('http://'): return 'http://' + url[6:] elif lower_url.startswith('https:/') and not lower_url.startswith('https://'): return 'https://' + url[7:] elif not lower_url.startswith(('http://', 'https://')): return 'https://' + url return url # Apply the helper function to the specified column df[column_name] = df[column_name].apply(correct_protocol) return df def CompanySpecificClient(email_receiver): input_data_companies = None submitted_companies = False uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="CompanyUploader") opt_out_scraping = st.checkbox("Opt out of scraping",key="CompanyScraper") with st.form(key='Comapny_form'): if uploaded_file is not None: try: # Detect file type and read accordingly file_type = uploaded_file.name.split('.')[-1] if file_type == 'csv': df = pd.read_csv(uploaded_file) elif file_type == 'xlsx': df = pd.read_excel(uploaded_file) # Check if 'Website' column exists if 'Website' not in df.columns: all_columns = df.columns.tolist() website_column = st.selectbox("Select the column for Website:", all_columns,key="CompanyWebsite") else: website_column = 'Website' # Check if 'Company Name for Emails' column exists if 'Company Name for Emails' not in df.columns: all_columns = df.columns.tolist() company_column= st.selectbox("Select the column for Company Name for Emails:", all_columns,key="CompanyName") else: company_column = 'Company Name for Emails' if opt_out_scraping: if 'Company Description' not in df.columns: all_columns = df.columns.tolist() description_column = st.selectbox("Select the column for Description:", all_columns,key="CompanyDescription") df.rename(columns={description_column: 'scraped_content'}, inplace=True) else: df.rename(columns={'Company Description': 'scraped_content'}, inplace=True) input_data_companies = df except Exception as E : st.error("An error occured while processing the file") # Fetch the filtered data prompt_notes= st.text_input("If applicable please mention the network name",key="CompanyPromptNotes") # If the button is clicked, it will return True for this run button_clicked = st.form_submit_button("Submit for processing") # 2. Update session state for the button if button_clicked: submitted_companies = True # Set the session state to the new value prompt_notes = prompt_notes # 3. Use the session state variable to determine if the button was previously clicked if submitted_companies and input_data_companies is not None: df = input_data_companies if not opt_out_scraping: df[website_column] = df[website_column].astype(str) df=df[[website_column,company_column]] df.columns = ["Website","Company Name for Emails"] df = df.drop_duplicates(subset="Website", keep='first') df = df.dropna().loc[~(df == '').all(axis=1)] else: df[website_column] = df[website_column].astype(str) df=df[[website_column,company_column,"scraped_content"]] df.columns = ["Website","Company Name for Emails","scraped_content"] df = df.drop_duplicates(subset="Website", keep='first') df = df.dropna().loc[~(df == '').all(axis=1)] df = df.dropna().loc[~(df == '').all(axis=1)] df=add_https_to_urls(df, 'Website') st.write(df) # Convert DataFrame to CSV for transmission csv = df.to_csv(index=False) # Construct the data to send data_to_send = {"prompt_notes": prompt_notes, "dataframe": csv,"email_receiver":email_receiver,"filename": uploaded_file.name} # Sending the POST request to FastAPI response = requests.post(CompanyBackendService, json=data_to_send) if response.status_code == 200: st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") else: st.error("Data transmission failed. Please verify that your file contains the labels 'Company Website' and 'Company Name'. Additionally, ensure that your file is valid and contains records and try again , if the problem persists please contact us at mohanad@omdena.com") return None def UserSpecificClient(email_receiver): input_data=None submitted=None column_selections = {} uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="UserUploader") opt_out_scraping = st.checkbox("Opt out of scraping",key="userSraping") with st.form(key='User_Form'): if uploaded_file is not None: try: # Detect file type and read accordingly file_type = uploaded_file.name.split('.')[-1] if file_type == 'csv': try: df = pd.read_csv(uploaded_file) except: df = pd.read_csv(uploaded_file, encoding='ISO-8859-1') # Check if 'Person Linkedin Url' column exists required_essential_columns = ['First Name','Company Name for Emails','Email'] missing_essential_columns = [col for col in required_essential_columns if col not in df.columns] required_scraping_columns=['Title','Website','Last Name','Person Linkedin Url'] missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns] for col in missing_essential_columns: all_columns = df.columns.tolist() selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col) column_selections[col] = selected_column # Generate selectboxes for missing scraping columns if not opting out if not opt_out_scraping: for col in missing_scraping_columns: all_columns = df.columns.tolist() selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col) column_selections[col] = selected_column # Process the column renaming based on the selections for col, selected_column in column_selections.items(): df.rename(columns={selected_column: col}, inplace=True) if opt_out_scraping: if 'User Description' not in df.columns: all_columns = df.columns.tolist() description_column = st.selectbox("Select the column for Description:", all_columns,key="userdescription") df.rename(columns={description_column: 'Scrapped Profile'}, inplace=True) else: df.rename(columns={'User Description': 'Scrapped Profile'}, inplace=True) # Check if "Person Linkedin Url" is in the DataFrame if 'Person Linkedin Url' not in df.columns: # Use the DataFrame index to generate a unique value for each row # You can adjust this to create a more complex identifier df['Person Linkedin Url'] = 'LI_' + df.index.astype(str) input_data = df except Exception as E: st.write(E) st.error("An error occurred while processing the file") # If the button is clicked, it will return True for this run button_clicked = st.form_submit_button("Submit") # Update session state for the button if button_clicked: submitted = True # Use the session state variable to determine if the button was previously clicked if submitted and input_data is not None: df = input_data df = df.drop_duplicates(subset="Person Linkedin Url", keep='first') if opt_out_scraping: df=df[['First Name','Company Name for Emails','Person Linkedin Url','Scrapped Profile','Email']] else: df=df[['First Name', 'Last Name', 'Title', 'Website','Company Name for Emails','Person Linkedin Url','Email']] df=add_https_to_urls(df, 'Website') # Convert DataFrame to CSV for transmission df = df.dropna().loc[~(df == '').all(axis=1)] st.write(df) csv = df.to_csv(index=False) # Construct the data to send data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"email_template":"False","filename": uploaded_file.name} # Sending the POST request to FastAPI response = requests.post(UserBackendService, json=data_to_send) if response.status_code == 200: st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") else: st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at mohanad@omdena.com") def bothFeaturesFunction(email_receiver): input_data=None submitted=None column_selections = {} uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="BothFeaturesUploader") opt_out_scraping = st.checkbox("Opt out of scraping",key="BothOptOut") with st.form(key='User_Form'): if uploaded_file is not None: try: # Detect file type and read accordingly file_type = uploaded_file.name.split('.')[-1] if file_type == 'csv': try: df = pd.read_csv(uploaded_file) except: df = pd.read_csv(uploaded_file, encoding='ISO-8859-1') # Check if 'Person Linkedin Url' column exists required_essential_columns = ['First Name','Company Name for Emails','Email'] missing_essential_columns = [col for col in required_essential_columns if col not in df.columns] required_scraping_columns=['Title','Last Name','Person Linkedin Url','Website'] missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns] for col in missing_essential_columns: all_columns = df.columns.tolist() selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col) column_selections[col] = selected_column # Generate selectboxes for missing scraping columns if not opting out if not opt_out_scraping: for col in missing_scraping_columns: all_columns = df.columns.tolist() selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col) column_selections[col] = selected_column # Process the column renaming based on the selections for col, selected_column in column_selections.items(): df.rename(columns={selected_column: col}, inplace=True) if opt_out_scraping: if 'Company Description' not in df.columns: all_columns = df.columns.tolist() description_column = st.selectbox("Select the column for Company Description:", all_columns,key="bothCompanyDescription") df.rename(columns={description_column: 'scraped_content'}, inplace=True) else: df.rename(columns={'Company Description': 'scraped_content'}, inplace=True) if 'User Description' not in df.columns: all_columns = df.columns.tolist() description_column = st.selectbox("Select the column for User Description:", all_columns,key="bothuserdescription") df.rename(columns={description_column: 'Scrapped Profile'}, inplace=True) else: df.rename(columns={'User Description': 'Scrapped Profile'}, inplace=True) # Check if "Person Linkedin Url" is in the DataFrame if 'Person Linkedin Url' not in df.columns: # Use the DataFrame index to generate a unique value for each row # You can adjust this to create a more complex identifier df['Person Linkedin Url'] = 'LI_' + df.index.astype(str) input_data = df except Exception as E: st.write(E) st.error("An error occurred while processing the file") # If the button is clicked, it will return True for this run prompt_notes= st.text_input("If applicable please mention the network name",key="CompanyPromptNotes") button_clicked = st.form_submit_button("Submit") # Update session state for the button if button_clicked: submitted = True # Use the session state variable to determine if the button was previously clicked if submitted and input_data is not None: df = input_data df = df.drop_duplicates(subset="Person Linkedin Url", keep='first') if opt_out_scraping: df=df[['First Name','Person Linkedin Url','Scrapped Profile',"Company Name for Emails","scraped_content","Email"]] else: df=df[['First Name', 'Last Name', 'Title', 'Person Linkedin Url',"Website","Company Name for Emails","Email"]] df=add_https_to_urls(df, 'Website') df = df.dropna().loc[~(df == '').all(axis=1)] st.write(df) # Convert DataFrame to CSV for transmission csv = df.to_csv(index=False) # Construct the data to send data_to_send = {"prompt_notes": prompt_notes, "dataframe": csv,"email_receiver":email_receiver,"filename": uploaded_file.name} # Sending the POST request to FastAPI response = requests.post(BothFeaturesService, json=data_to_send) if response.status_code == 200: st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") else: st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at mohanad@omdena.com") def BH_Ngo(email_receiver,calendly_link,sender_name): input_data=None submitted=None column_selections = {} uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="BothFeaturesUploader") opt_out_scraping = st.checkbox("Opt out of scraping",key="BothOptOut") with st.form(key='User_Form'): if uploaded_file is not None: try: # Detect file type and read accordingly file_type = uploaded_file.name.split('.')[-1] if file_type == 'csv': try: df = pd.read_csv(uploaded_file) except: df = pd.read_csv(uploaded_file, encoding='ISO-8859-1') # Check if 'Person Linkedin Url' column exists required_essential_columns = ['First Name','Company Name for Emails','Domain','Email'] missing_essential_columns = [col for col in required_essential_columns if col not in df.columns] required_scraping_columns=['Title','Person Linkedin Url','Website'] missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns] for col in missing_essential_columns: all_columns = df.columns.tolist() selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col) column_selections[col] = selected_column # Generate selectboxes for missing scraping columns if not opting out if not opt_out_scraping: for col in missing_scraping_columns: all_columns = df.columns.tolist() selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col) column_selections[col] = selected_column # Process the column renaming based on the selections for col, selected_column in column_selections.items(): df.rename(columns={selected_column: col}, inplace=True) if opt_out_scraping: if 'User Description' not in df.columns: all_columns = df.columns.tolist() User_description_column = st.selectbox("Select the column for User Description:", all_columns,key="bothuserdescription") df.rename(columns={User_description_column: 'Scrapped Profile'}, inplace=True) else: df.rename(columns={'User Description': 'Scrapped Profile'}, inplace=True) # Check if "Person Linkedin Url" is in the DataFrame if 'Person Linkedin Url' not in df.columns: # Use the DataFrame index to generate a unique value for each row # You can adjust this to create a more complex identifier df['Person Linkedin Url'] = 'LI_' + df.index.astype(str) input_data = df except Exception as E: st.write(E) st.error("An error occurred while processing the file") # If the button is clicked, it will return True for this run button_clicked = st.form_submit_button("Submit") # Update session state for the button if button_clicked: submitted = True # Use the session state variable to determine if the button was previously clicked if submitted and input_data is not None: df = input_data df = df.drop_duplicates(subset="Person Linkedin Url", keep='first') if opt_out_scraping: df=df[['First Name','Person Linkedin Url','Scrapped Profile',"Company Name for Emails","Domain","Email"]] else: columns_to_select = ['First Name', 'Title', 'Person Linkedin Url', "Company Name for Emails", "Domain","Website","Email"] df=add_https_to_urls(df, 'Website') if 'Last Name' in df.columns: columns_to_select.insert(1, 'Last Name') # Insert 'Last Name' at the correct position df = df[columns_to_select] # Convert DataFrame to CSV for transmission df = df.dropna().loc[~(df == '').all(axis=1)] st.write(df) csv = df.to_csv(index=False) # Construct the data to send data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"calendly_link":calendly_link,"sender_name":sender_name} # Sending the POST request to FastAPI response = requests.post(NGOEmailsService, json=data_to_send) if response.status_code == 200: st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") else: st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at mohanad@omdena.com") def BH_industry(email_receiver,calendly_link,sender_name): input_data_companies = None submitted_companies = False uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="CompanyUploader") opt_out_scraping = st.checkbox("Opt out of scraping",key="CompanyScraper") with st.form(key='Comapny_form'): if uploaded_file is not None: try: # Detect file type and read accordingly file_type = uploaded_file.name.split('.')[-1] if file_type == 'csv': df = pd.read_csv(uploaded_file) elif file_type == 'xlsx': df = pd.read_excel(uploaded_file) # Check if 'Website' column exists if 'Website' not in df.columns: all_columns = df.columns.tolist() website_column = st.selectbox("Select the column for Website:", all_columns,key="CompanyWebsite") else: website_column = 'Website' if 'First Name' not in df.columns: all_columns = df.columns.tolist() name_column = st.selectbox("Select the column for first name:", all_columns,key="firstname") else: name_column = 'First Name' # Check if 'Company Name for Emails' column exists if 'Company Name for Emails' not in df.columns: all_columns = df.columns.tolist() company_column= st.selectbox("Select the column for Company Name :", all_columns,key="CompanyName") else: company_column = 'Company Name for Emails' if 'Email' not in df.columns: all_columns = df.columns.tolist() Email_column= st.selectbox("Select the column for email:", all_columns,key="Companyemail") else: Email_column = 'Email' if opt_out_scraping: if 'Company Description' not in df.columns: all_columns = df.columns.tolist() description_column = st.selectbox("Select the column for Description:", all_columns,key="CompanyDescription") df.rename(columns={description_column: 'scraped_content'}, inplace=True) else: df.rename(columns={'Company Description': 'scraped_content'}, inplace=True) input_data_companies = df except Exception as E : st.error("An error occured while processing the file") # Fetch the filtered data # If the button is clicked, it will return True for this run button_clicked = st.form_submit_button("Submit for processing") # 2. Update session state for the button if button_clicked: submitted_companies = True # 3. Use the session state variable to determine if the button was previously clicked if submitted_companies and input_data_companies is not None: df = input_data_companies if not opt_out_scraping: df[website_column] = df[website_column].astype(str) df=df[[website_column,company_column,name_column,Email_column]] df.columns = ["Website","Company Name for Emails","First Name","Email"] df = df.drop_duplicates(subset="Email", keep='first') df = df.dropna().loc[~(df == '').all(axis=1)] else: df[website_column] = df[website_column].astype(str) df=df[[website_column,company_column,"scraped_content",name_column,Email_column]] df.columns = ["Website","Company Name for Emails","scraped_content","First Name","Email"] df = df.drop_duplicates(subset="Email", keep='first') df = df.dropna().loc[~(df == '').all(axis=1)] df = df.dropna().loc[~(df == '').all(axis=1)] df=add_https_to_urls(df, 'Website') st.write(df) # Convert DataFrame to CSV for transmission csv = df.to_csv(index=False) # Construct the data to send data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"calendly_link":calendly_link,"sender_name":sender_name} # Sending the POST request to FastAPI response = requests.post(IndustryEmailService, json=data_to_send) if response.status_code == 200: st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.") else: st.error("Data transmission failed. Please verify that your file contains the labels 'Company Website' and 'Company Name'. Additionally, ensure that your file is valid and contains records and try again , if the problem persists please contact us at mohanad@omdena.com") return None