MohanadAfiffy commited on
Commit
42ac7a7
·
verified ·
1 Parent(s): 87b5230

Upload clients.py

Browse files
Files changed (1) hide show
  1. clients.py +472 -0
clients.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Mon Jan 1 11:20:18 2024
5
+
6
+ @author: mohanadafiffy
7
+ """
8
+ import os
9
+ import streamlit as st
10
+ import pandas as pd
11
+ import requests
12
+ from dotenv import load_dotenv
13
+ import os
14
+
15
+ # Load environment variables from .env file
16
+ load_dotenv()
17
+
18
+ host=os.getenv("Host")
19
+
20
+ CompanyBackendService=host+'/receive_companies/'
21
+ UserBackendService=host+'/receive_users/'
22
+ BothFeaturesService=host+'/receive_data/'
23
+ NGOEmailsService=host+'/receive_ngo_emails/'
24
+ IndustryEmailService=host+'/receive_industry_email/'
25
+
26
+ def CompanySpecificClient(email_receiver):
27
+ input_data_companies = None
28
+ submitted_companies = False
29
+ uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="CompanyUploader")
30
+ opt_out_scraping = st.checkbox("Opt out of scraping",key="CompanyScraper")
31
+ with st.form(key='Comapny_form'):
32
+ if uploaded_file is not None:
33
+
34
+ try:
35
+ # Detect file type and read accordingly
36
+ file_type = uploaded_file.name.split('.')[-1]
37
+ if file_type == 'csv':
38
+ df = pd.read_csv(uploaded_file)
39
+ elif file_type == 'xlsx':
40
+ df = pd.read_excel(uploaded_file)
41
+ # Check if 'Website' column exists
42
+ if 'Website' not in df.columns:
43
+ all_columns = df.columns.tolist()
44
+ website_column = st.selectbox("Select the column for Website:", all_columns,key="CompanyWebsite")
45
+ else:
46
+ website_column = 'Website'
47
+ # Check if 'Company Name for Emails' column exists
48
+ if 'Company Name for Emails' not in df.columns:
49
+ all_columns = df.columns.tolist()
50
+ company_column= st.selectbox("Select the column for Company Name for Emails:", all_columns,key="CompanyName")
51
+ else:
52
+ company_column = 'Company Name for Emails'
53
+
54
+ if opt_out_scraping:
55
+ if 'Company Description' not in df.columns:
56
+ all_columns = df.columns.tolist()
57
+ description_column = st.selectbox("Select the column for Description:", all_columns,key="CompanyDescription")
58
+ df.rename(columns={description_column: 'scraped_content'}, inplace=True)
59
+ else:
60
+ df.rename(columns={'Company Description': 'scraped_content'}, inplace=True)
61
+
62
+ input_data_companies = df
63
+
64
+ except Exception as E :
65
+ st.error("An error occured while processing the file")
66
+
67
+ # Fetch the filtered data
68
+
69
+
70
+
71
+ prompt_notes= st.text_input("If applicable please mention the network name",key="CompanyPromptNotes")
72
+
73
+ # If the button is clicked, it will return True for this run
74
+ button_clicked = st.form_submit_button("Submit for processing")
75
+
76
+ # 2. Update session state for the button
77
+ if button_clicked:
78
+ submitted_companies = True
79
+ # Set the session state to the new value
80
+ prompt_notes = prompt_notes
81
+ # 3. Use the session state variable to determine if the button was previously clicked
82
+ if submitted_companies and input_data_companies is not None:
83
+ df = input_data_companies
84
+ if not opt_out_scraping:
85
+ df[website_column] = df[website_column].astype(str)
86
+ df=df[[website_column,company_column]]
87
+ df.columns = ["Website","Company Name for Emails"]
88
+ df = df.drop_duplicates(subset="Website", keep='first')
89
+ df = df.dropna().loc[~(df == '').all(axis=1)]
90
+ else:
91
+ df[website_column] = df[website_column].astype(str)
92
+ df=df[[website_column,company_column,"scraped_content"]]
93
+ df.columns = ["Website","Company Name for Emails","scraped_content"]
94
+ df = df.drop_duplicates(subset="Website", keep='first')
95
+ df = df.dropna().loc[~(df == '').all(axis=1)]
96
+
97
+ df = df.dropna().loc[~(df == '').all(axis=1)]
98
+ st.write(df)
99
+ # Convert DataFrame to CSV for transmission
100
+ csv = df.to_csv(index=False)
101
+
102
+ # Construct the data to send
103
+ data_to_send = {"prompt_notes": prompt_notes, "dataframe": csv,"email_receiver":email_receiver}
104
+
105
+ # Sending the POST request to FastAPI
106
+ response = requests.post(CompanyBackendService, json=data_to_send)
107
+
108
+ if response.status_code == 200:
109
+ st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.")
110
+ else:
111
+ st.error("Data transmission failed. Please verify that your file contains the labels 'Company Website' and 'Company Name'. Additionally, ensure that your file is valid and contains records and try again , if the problem persists please contact us at [email protected]")
112
+ return None
113
+ def UserSpecificClient(email_receiver):
114
+ input_data=None
115
+ submitted=None
116
+ column_selections = {}
117
+ uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="UserUploader")
118
+ opt_out_scraping = st.checkbox("Opt out of scraping",key="userSraping")
119
+ with st.form(key='User_Form'):
120
+ if uploaded_file is not None:
121
+ try:
122
+ # Detect file type and read accordingly
123
+ file_type = uploaded_file.name.split('.')[-1]
124
+ if file_type == 'csv':
125
+ try:
126
+ df = pd.read_csv(uploaded_file)
127
+ except:
128
+ df = pd.read_csv(uploaded_file, encoding='ISO-8859-1')
129
+ # Check if 'Person Linkedin Url' column exists
130
+ required_essential_columns = ['First Name','Company Name for Emails','Email']
131
+ missing_essential_columns = [col for col in required_essential_columns if col not in df.columns]
132
+ required_scraping_columns=['Title','Website','Last Name','Person Linkedin Url']
133
+ missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns]
134
+ for col in missing_essential_columns:
135
+ all_columns = df.columns.tolist()
136
+ selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col)
137
+ column_selections[col] = selected_column
138
+ # Generate selectboxes for missing scraping columns if not opting out
139
+ if not opt_out_scraping:
140
+ for col in missing_scraping_columns:
141
+ all_columns = df.columns.tolist()
142
+ selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col)
143
+ column_selections[col] = selected_column
144
+ # Process the column renaming based on the selections
145
+ for col, selected_column in column_selections.items():
146
+ df.rename(columns={selected_column: col}, inplace=True)
147
+
148
+ if opt_out_scraping:
149
+ if 'User description' not in df.columns:
150
+ all_columns = df.columns.tolist()
151
+ description_column = st.selectbox("Select the column for Description:", all_columns,key="userdescription")
152
+ df.rename(columns={description_column: 'Scrapped Profile'}, inplace=True)
153
+ else:
154
+ df.rename(columns={'User description': 'Scrapped Profile'}, inplace=True)
155
+ # Check if "Person Linkedin Url" is in the DataFrame
156
+ if 'Person Linkedin Url' not in df.columns:
157
+ # Use the DataFrame index to generate a unique value for each row
158
+ # You can adjust this to create a more complex identifier
159
+ df['Person Linkedin Url'] = 'LI_' + df.index.astype(str)
160
+ input_data = df
161
+
162
+ except Exception as E:
163
+ st.write(E)
164
+ st.error("An error occurred while processing the file")
165
+ # If the button is clicked, it will return True for this run
166
+ button_clicked = st.form_submit_button("Submit")
167
+
168
+ # Update session state for the button
169
+ if button_clicked:
170
+ submitted = True
171
+
172
+ # Use the session state variable to determine if the button was previously clicked
173
+ if submitted and input_data is not None:
174
+ df = input_data
175
+ df = df.drop_duplicates(subset="Person Linkedin Url", keep='first')
176
+ if opt_out_scraping:
177
+ df=df[['First Name','Company Name for Emails','Person Linkedin Url','Scrapped Profile','Email']]
178
+ else:
179
+ df=df[['First Name', 'Last Name', 'Title', 'Website','Company Name for Emails','Person Linkedin Url','Email']]
180
+
181
+ # Convert DataFrame to CSV for transmission
182
+ df = df.dropna().loc[~(df == '').all(axis=1)]
183
+ st.write(df)
184
+ csv = df.to_csv(index=False)
185
+
186
+ # Construct the data to send
187
+
188
+ data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"email_template":"False"}
189
+
190
+ # Sending the POST request to FastAPI
191
+ response = requests.post(UserBackendService, json=data_to_send)
192
+
193
+ if response.status_code == 200:
194
+ st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.")
195
+ else:
196
+ st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at [email protected]")
197
+
198
+ def bothFeaturesFunction(email_receiver):
199
+ input_data=None
200
+ submitted=None
201
+ column_selections = {}
202
+ uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="BothFeaturesUploader")
203
+ opt_out_scraping = st.checkbox("Opt out of scraping",key="BothOptOut")
204
+ with st.form(key='User_Form'):
205
+ if uploaded_file is not None:
206
+ try:
207
+ # Detect file type and read accordingly
208
+ file_type = uploaded_file.name.split('.')[-1]
209
+ if file_type == 'csv':
210
+ try:
211
+ df = pd.read_csv(uploaded_file)
212
+ except:
213
+ df = pd.read_csv(uploaded_file, encoding='ISO-8859-1')
214
+ # Check if 'Person Linkedin Url' column exists
215
+ required_essential_columns = ['First Name','Company Name for Emails','Email']
216
+ missing_essential_columns = [col for col in required_essential_columns if col not in df.columns]
217
+ required_scraping_columns=['Title','Last Name','Person Linkedin Url','Website']
218
+ missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns]
219
+ for col in missing_essential_columns:
220
+ all_columns = df.columns.tolist()
221
+ selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col)
222
+ column_selections[col] = selected_column
223
+ # Generate selectboxes for missing scraping columns if not opting out
224
+ if not opt_out_scraping:
225
+ for col in missing_scraping_columns:
226
+ all_columns = df.columns.tolist()
227
+ selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col)
228
+ column_selections[col] = selected_column
229
+ # Process the column renaming based on the selections
230
+ for col, selected_column in column_selections.items():
231
+ df.rename(columns={selected_column: col}, inplace=True)
232
+
233
+ if opt_out_scraping:
234
+ if 'Company Description' not in df.columns:
235
+ all_columns = df.columns.tolist()
236
+ description_column = st.selectbox("Select the column for Company Description:", all_columns,key="bothCompanyDescription")
237
+ df.rename(columns={description_column: 'scraped_content'}, inplace=True)
238
+ else:
239
+ df.rename(columns={'Company Description': 'scraped_content'}, inplace=True)
240
+ if 'User description' not in df.columns:
241
+ all_columns = df.columns.tolist()
242
+ description_column = st.selectbox("Select the column for User Description:", all_columns,key="bothuserdescription")
243
+ df.rename(columns={description_column: 'Scrapped Profile'}, inplace=True)
244
+ else:
245
+ df.rename(columns={'User description': 'Scrapped Profile'}, inplace=True)
246
+ # Check if "Person Linkedin Url" is in the DataFrame
247
+ if 'Person Linkedin Url' not in df.columns:
248
+ # Use the DataFrame index to generate a unique value for each row
249
+ # You can adjust this to create a more complex identifier
250
+ df['Person Linkedin Url'] = 'LI_' + df.index.astype(str)
251
+ input_data = df
252
+
253
+ except Exception as E:
254
+ st.write(E)
255
+ st.error("An error occurred while processing the file")
256
+ # If the button is clicked, it will return True for this run
257
+ prompt_notes= st.text_input("If applicable please mention the network name",key="CompanyPromptNotes")
258
+ button_clicked = st.form_submit_button("Submit")
259
+
260
+ # Update session state for the button
261
+ if button_clicked:
262
+ submitted = True
263
+
264
+ # Use the session state variable to determine if the button was previously clicked
265
+ if submitted and input_data is not None:
266
+ df = input_data
267
+ df = df.drop_duplicates(subset="Person Linkedin Url", keep='first')
268
+ if opt_out_scraping:
269
+ df=df[['First Name','Person Linkedin Url','Scrapped Profile',"Company Name for Emails","scraped_content","Email"]]
270
+ else:
271
+ df=df[['First Name', 'Last Name', 'Title', 'Person Linkedin Url',"Website","Company Name for Emails","Email"]]
272
+
273
+ df = df.dropna().loc[~(df == '').all(axis=1)]
274
+ st.write(df)
275
+ # Convert DataFrame to CSV for transmission
276
+ csv = df.to_csv(index=False)
277
+
278
+ # Construct the data to send
279
+ data_to_send = {"prompt_notes": prompt_notes, "dataframe": csv,"email_receiver":email_receiver}
280
+
281
+ # Sending the POST request to FastAPI
282
+ response = requests.post(BothFeaturesService, json=data_to_send)
283
+
284
+ if response.status_code == 200:
285
+ st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.")
286
+ else:
287
+ st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at [email protected]")
288
+
289
+ def BH_Ngo(email_receiver,calendly_link,sender_name):
290
+ input_data=None
291
+ submitted=None
292
+ column_selections = {}
293
+ uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="BothFeaturesUploader")
294
+ opt_out_scraping = st.checkbox("Opt out of scraping",key="BothOptOut")
295
+ with st.form(key='User_Form'):
296
+ if uploaded_file is not None:
297
+ try:
298
+ # Detect file type and read accordingly
299
+ file_type = uploaded_file.name.split('.')[-1]
300
+ if file_type == 'csv':
301
+ try:
302
+ df = pd.read_csv(uploaded_file)
303
+ except:
304
+ df = pd.read_csv(uploaded_file, encoding='ISO-8859-1')
305
+ # Check if 'Person Linkedin Url' column exists
306
+ required_essential_columns = ['First Name','Company Name for Emails','Domain','Email']
307
+ missing_essential_columns = [col for col in required_essential_columns if col not in df.columns]
308
+ required_scraping_columns=['Title','Person Linkedin Url','Website']
309
+ missing_scraping_columns = [col for col in required_scraping_columns if col not in df.columns]
310
+ for col in missing_essential_columns:
311
+ all_columns = df.columns.tolist()
312
+ selected_column = st.selectbox(f"Select the column for {col}:", all_columns,key=col)
313
+ column_selections[col] = selected_column
314
+ # Generate selectboxes for missing scraping columns if not opting out
315
+ if not opt_out_scraping:
316
+ for col in missing_scraping_columns:
317
+ all_columns = df.columns.tolist()
318
+ selected_column = st.selectbox(f"Select the column for {col}:", all_columns, key=col)
319
+ column_selections[col] = selected_column
320
+ # Process the column renaming based on the selections
321
+ for col, selected_column in column_selections.items():
322
+ df.rename(columns={selected_column: col}, inplace=True)
323
+
324
+ if opt_out_scraping:
325
+ if 'User description' not in df.columns:
326
+ all_columns = df.columns.tolist()
327
+ User_description_column = st.selectbox("Select the column for User Description:", all_columns,key="bothuserdescription")
328
+ df.rename(columns={User_description_column: 'Scrapped Profile'}, inplace=True)
329
+ else:
330
+ df.rename(columns={'User description': 'Scrapped Profile'}, inplace=True)
331
+ # Check if "Person Linkedin Url" is in the DataFrame
332
+ if 'Person Linkedin Url' not in df.columns:
333
+ # Use the DataFrame index to generate a unique value for each row
334
+ # You can adjust this to create a more complex identifier
335
+ df['Person Linkedin Url'] = 'LI_' + df.index.astype(str)
336
+ input_data = df
337
+
338
+ except Exception as E:
339
+ st.write(E)
340
+ st.error("An error occurred while processing the file")
341
+ # If the button is clicked, it will return True for this run
342
+ button_clicked = st.form_submit_button("Submit")
343
+
344
+ # Update session state for the button
345
+ if button_clicked:
346
+ submitted = True
347
+
348
+ # Use the session state variable to determine if the button was previously clicked
349
+ if submitted and input_data is not None:
350
+ df = input_data
351
+
352
+ df = df.drop_duplicates(subset="Person Linkedin Url", keep='first')
353
+ if opt_out_scraping:
354
+ df=df[['First Name','Person Linkedin Url','Scrapped Profile',"Company Name for Emails","Domain","Email"]]
355
+
356
+ else:
357
+
358
+ columns_to_select = ['First Name', 'Title', 'Person Linkedin Url', "Company Name for Emails", "Domain","Website","Email"]
359
+ if 'Last Name' in df.columns:
360
+ columns_to_select.insert(1, 'Last Name') # Insert 'Last Name' at the correct position
361
+
362
+ df = df[columns_to_select]
363
+
364
+
365
+ # Convert DataFrame to CSV for transmission
366
+ df = df.dropna().loc[~(df == '').all(axis=1)]
367
+ st.write(df)
368
+ csv = df.to_csv(index=False)
369
+
370
+ # Construct the data to send
371
+ data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"calendly_link":calendly_link,"sender_name":sender_name}
372
+
373
+ # Sending the POST request to FastAPI
374
+ response = requests.post(NGOEmailsService, json=data_to_send)
375
+
376
+ if response.status_code == 200:
377
+ st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.")
378
+ else:
379
+ st.error("Data transmission failed. Please verify that your file contains the labels 'Company' and 'Person Linkedin Url'. Additionally, ensure that your file is valid and contains records and try again, if the problem persists please contact us at [email protected]")
380
+
381
+ def BH_industry(email_receiver,calendly_link,sender_name):
382
+ input_data_companies = None
383
+ submitted_companies = False
384
+ uploaded_file = st.file_uploader("Kindly upload a CSV file that includes the names and websites of the companies", type=["csv"],key="CompanyUploader")
385
+ opt_out_scraping = st.checkbox("Opt out of scraping",key="CompanyScraper")
386
+ with st.form(key='Comapny_form'):
387
+ if uploaded_file is not None:
388
+
389
+ try:
390
+ # Detect file type and read accordingly
391
+ file_type = uploaded_file.name.split('.')[-1]
392
+ if file_type == 'csv':
393
+ df = pd.read_csv(uploaded_file)
394
+ elif file_type == 'xlsx':
395
+ df = pd.read_excel(uploaded_file)
396
+ # Check if 'Website' column exists
397
+ if 'Website' not in df.columns:
398
+ all_columns = df.columns.tolist()
399
+ website_column = st.selectbox("Select the column for Website:", all_columns,key="CompanyWebsite")
400
+ else:
401
+ website_column = 'Website'
402
+ if 'First Name' not in df.columns:
403
+ all_columns = df.columns.tolist()
404
+ name_column = st.selectbox("Select the column for first name:", all_columns,key="firstname")
405
+ else:
406
+ name_column = 'First Name'
407
+ # Check if 'Company Name for Emails' column exists
408
+ if 'Company Name for Emails' not in df.columns:
409
+ all_columns = df.columns.tolist()
410
+ company_column= st.selectbox("Select the column for Company Name :", all_columns,key="CompanyName")
411
+ else:
412
+ company_column = 'Company Name for Emails'
413
+
414
+ if 'Email' not in df.columns:
415
+ all_columns = df.columns.tolist()
416
+ Email_column= st.selectbox("Select the column for email:", all_columns,key="Companyemail")
417
+ else:
418
+ Email_column = 'Email'
419
+ if opt_out_scraping:
420
+ if 'Company Description' not in df.columns:
421
+ all_columns = df.columns.tolist()
422
+ description_column = st.selectbox("Select the column for Description:", all_columns,key="CompanyDescription")
423
+ df.rename(columns={description_column: 'scraped_content'}, inplace=True)
424
+ else:
425
+ df.rename(columns={'Company Description': 'scraped_content'}, inplace=True)
426
+
427
+ input_data_companies = df
428
+
429
+ except Exception as E :
430
+ st.error("An error occured while processing the file")
431
+
432
+ # Fetch the filtered data
433
+
434
+
435
+ # If the button is clicked, it will return True for this run
436
+ button_clicked = st.form_submit_button("Submit for processing")
437
+
438
+ # 2. Update session state for the button
439
+ if button_clicked:
440
+ submitted_companies = True
441
+ # 3. Use the session state variable to determine if the button was previously clicked
442
+ if submitted_companies and input_data_companies is not None:
443
+ df = input_data_companies
444
+ if not opt_out_scraping:
445
+ df[website_column] = df[website_column].astype(str)
446
+ df=df[[website_column,company_column,name_column,Email_column]]
447
+ df.columns = ["Website","Company Name for Emails","First Name","Email"]
448
+ df = df.drop_duplicates(subset="Website", keep='first')
449
+ df = df.dropna().loc[~(df == '').all(axis=1)]
450
+ else:
451
+ df[website_column] = df[website_column].astype(str)
452
+ df=df[[website_column,company_column,"scraped_content",name_column,Email_column]]
453
+ df.columns = ["Website","Company Name for Emails","scraped_content","First Name","Email"]
454
+ df = df.drop_duplicates(subset="Website", keep='first')
455
+ df = df.dropna().loc[~(df == '').all(axis=1)]
456
+
457
+ df = df.dropna().loc[~(df == '').all(axis=1)]
458
+ st.write(df)
459
+ # Convert DataFrame to CSV for transmission
460
+ csv = df.to_csv(index=False)
461
+
462
+ # Construct the data to send
463
+ data_to_send = {"dataframe": csv, "email_receiver": email_receiver,"calendly_link":calendly_link,"sender_name":sender_name}
464
+
465
+ # Sending the POST request to FastAPI
466
+ response = requests.post(IndustryEmailService, json=data_to_send)
467
+
468
+ if response.status_code == 200:
469
+ st.info(f"We're processing your request. You can close the app now. An email will be sent to {email_receiver} once the process is finished.")
470
+ else:
471
+ st.error("Data transmission failed. Please verify that your file contains the labels 'Company Website' and 'Company Name'. Additionally, ensure that your file is valid and contains records and try again , if the problem persists please contact us at [email protected]")
472
+ return None