DreamStream-1 commited on
Commit
d3aead7
·
verified ·
1 Parent(s): 956a1f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +491 -262
app.py CHANGED
@@ -1,348 +1,577 @@
1
- import streamlit as st
 
2
  import nltk
 
3
  import numpy as np
 
4
  import tflearn
 
5
  import tensorflow
 
6
  import random
 
7
  import json
 
8
  import pickle
9
- import gradio as gr
 
 
10
  from nltk.tokenize import word_tokenize
 
11
  from nltk.stem.lancaster import LancasterStemmer
 
12
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
 
13
  from transformers import pipeline
 
14
  import requests
 
15
  import csv
 
16
  import time
 
17
  import re
 
18
  from bs4 import BeautifulSoup
 
19
  import pandas as pd
 
20
  from selenium import webdriver
 
21
  from selenium.webdriver.chrome.options import Options
 
22
  import chromedriver_autoinstaller
 
23
  import os
 
24
  import torch
25
 
26
  # Ensure necessary NLTK resources are downloaded
 
27
  nltk.download('punkt')
28
 
29
  # Initialize the stemmer
 
30
  stemmer = LancasterStemmer()
31
 
32
  # Load intents.json
 
33
  try:
34
- with open("intents.json") as file:
35
- data = json.load(file)
 
 
 
36
  except FileNotFoundError:
37
- raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
 
38
 
39
  # Load preprocessed data from pickle
 
40
  try:
41
- with open("data.pickle", "rb") as f:
42
- words, labels, training, output = pickle.load(f)
 
 
 
43
  except FileNotFoundError:
44
- raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
 
45
 
46
  # Build the model structure
 
47
  net = tflearn.input_data(shape=[None, len(training[0])])
 
48
  net = tflearn.fully_connected(net, 8)
 
49
  net = tflearn.fully_connected(net, 8)
 
50
  net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
 
51
  net = tflearn.regression(net)
52
 
53
  # Load the trained model
 
54
  model = tflearn.DNN(net)
 
55
  try:
56
- model.load("MentalHealthChatBotmodel.tflearn")
 
 
57
  except FileNotFoundError:
58
- raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
 
59
 
60
  # Function to process user input into a bag-of-words format
 
61
  def bag_of_words(s, words):
62
- bag = [0 for _ in range(len(words))]
63
- s_words = word_tokenize(s)
64
- s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
65
- for se in s_words:
66
- for i, w in enumerate(words):
67
- if w == se:
68
- bag[i] = 1
69
- return np.array(bag)
 
 
 
 
 
 
 
 
70
 
71
  # Chat function
 
72
  def chat(message, history):
73
- history = history or []
74
- message = message.lower()
75
-
76
- try:
77
- # Predict the tag
78
- results = model.predict([bag_of_words(message, words)])
79
- results_index = np.argmax(results)
80
- tag = labels[results_index]
81
-
82
- # Match tag with intent and choose a random response
83
- for tg in data["intents"]:
84
- if tg['tag'] == tag:
85
- responses = tg['responses']
86
- response = random.choice(responses)
87
- break
88
- else:
89
- response = "I'm sorry, I didn't understand that. Could you please rephrase?"
90
-
91
- except Exception as e:
92
- response = f"An error occurred: {str(e)}"
93
-
94
- history.append((message, response))
95
- return history, history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  # Load pre-trained model and tokenizer for sentiment analysis
 
98
  tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
 
99
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
100
 
101
  # Load pre-trained model and tokenizer for emotion detection
 
102
  emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
 
103
  emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
104
 
105
  # Function for sentiment analysis
 
106
  def analyze_sentiment(text):
107
- inputs = tokenizer(text, return_tensors="pt")
108
- with torch.no_grad():
109
- outputs = sentiment_model(**inputs)
110
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
111
- sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
112
- return sentiment
 
 
 
 
 
 
113
 
114
  # Function for emotion detection
 
115
  def detect_emotion(text):
116
- pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
117
- result = pipe(text)
118
- emotion = result[0]['label']
119
- return emotion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  # Function to scrape website URL from Google Maps using Selenium
 
122
  def scrape_website_from_google_maps(place_name):
123
- chrome_options = Options()
124
- chrome_options.add_argument("--headless")
125
- chrome_options.add_argument("--no-sandbox")
126
- chrome_options.add_argument("--disable-dev-shm-usage")
127
- driver = webdriver.Chrome(options=chrome_options)
128
- search_url = f"https://www.google.com/maps/search/{place_name.replace(' ', '+')}"
129
- driver.get(search_url)
130
- time.sleep(5)
131
- try:
132
- website_element = driver.find_element_by_xpath('//a[contains(@aria-label, "Visit") and contains(@aria-label, "website")]')
133
- website_url = website_element.get_attribute('href')
134
- except:
135
- website_url = "Not available"
136
- driver.quit()
137
- return website_url
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  # Function to scrape website for contact information
 
140
  def scrape_website_for_contact_info(website):
141
- phone_number = "Not available"
142
- email = "Not available"
143
- try:
144
- response = requests.get(website, timeout=5)
145
- soup = BeautifulSoup(response.content, 'html.parser')
146
- phone_match = re.search(r'$$?\+?[0-9]*$$?[0-9_\- $$$$]*', soup.get_text())
147
- if phone_match:
148
- phone_number = phone_match.group()
149
- email_match = re.search(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}', soup.get_text())
150
- if email_match:
151
- email = email_match.group()
152
- except Exception as e:
153
- print(f"Error scraping website {website}: {e}")
154
- return phone_number, email
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
  # Function to fetch detailed information for a specific place using its place_id
 
157
  def get_place_details(place_id, api_key):
158
- details_url = "https://maps.googleapis.com/maps/api/place/details/json"
159
- params = {
160
- "place_id": place_id,
161
- "key": api_key
162
- }
163
- response = requests.get(details_url, params=params)
164
- if response.status_code == 200:
165
- details_data = response.json().get("result", {})
166
- return {
167
- "opening_hours": details_data.get("opening_hours", {}).get("weekday_text", "Not available"),
168
- "reviews": details_data.get("reviews", "Not available"),
169
- "phone_number": details_data.get("formatted_phone_number", "Not available"),
170
- "website": details_data.get("website", "Not available")
171
- }
172
- else:
173
- return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  # Function to get all places data including pagination
 
176
  def get_all_places(query, location, radius, api_key):
177
- all_results = []
178
- next_page_token = None
179
- while True:
180
- data = get_places_data(query, location, radius, api_key, next_page_token)
181
- if data:
182
- results = data.get('results', [])
183
- for place in results:
184
- place_id = place.get("place_id")
185
- name = place.get("name")
186
- address = place.get("formatted_address")
187
- rating = place.get("rating", "Not available")
188
- business_status = place.get("business_status", "Not available")
189
- user_ratings_total = place.get("user_ratings_total", "Not available")
190
- website = place.get("website", "Not available")
191
- types = ", ".join(place.get("types", []))
192
- location = place.get("geometry", {}).get("location", {})
193
- latitude = location.get("lat", "Not available")
194
- longitude = location.get("lng", "Not available")
195
- details = get_place_details(place_id, api_key)
196
- phone_number = details.get("phone_number", "Not available")
197
- if phone_number == "Not available" and website != "Not available":
198
- phone_number, email = scrape_website_for_contact_info(website)
199
- else:
200
- email = "Not available"
201
- if website == "Not available":
202
- website = scrape_website_from_google_maps(name)
203
- all_results.append([name, address, phone_number, rating, business_status,
204
- user_ratings_total, website, types, latitude, longitude,
205
- details.get("opening_hours", "Not available"),
206
- details.get("reviews", "Not available"), email
207
- ])
208
- next_page_token = data.get('next_page_token')
209
- if not next_page_token:
210
- break
211
- time.sleep(2)
212
- else:
213
- break
214
- return all_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
  # Function to save results to CSV file
 
217
  def save_to_csv(data, filename):
218
- with open(filename, mode='w', newline='', encoding='utf-8') as file:
219
- writer = csv.writer(file)
220
- writer.writerow([
221
- "Name", "Address", "Phone", "Rating", "Business Status",
222
- "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
223
- "Opening Hours", "Reviews", "Email"
224
- ])
225
- writer.writerows(data)
226
- print(f"Data saved to {filename}")
 
 
 
 
 
 
 
 
 
227
 
228
  # Function to get places data from Google Places API
 
229
  def get_places_data(query, location, radius, api_key, next_page_token=None):
230
- url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
231
- params = {
232
- "query": query,
233
- "location": location,
234
- "radius": radius,
235
- "key": api_key
236
- }
237
- if next_page_token:
238
- params["pagetoken"] = next_page_token
239
- response = requests.get(url, params=params)
240
- if response.status_code == 200:
241
- data = response.json()
242
- return data
243
- else:
244
- print(f"Error: {response.status_code} - {response.text}")
245
- return None
246
-
247
- # Set page config
248
- st.set_page_config(page_title="Wellbeing Support System", layout="wide")
249
-
250
- # Display header
251
- st.title("Wellbeing Support System")
252
-
253
- # User input for location
254
- location = st.text_input("Enter your location:", "Hawaii")
255
-
256
- # Tabs for different functionalities
257
- tabs = ["Chatbot", "Sentiment Analysis", "Emotion Detection & Suggestions", "Find Local Wellness Professionals"]
258
- selected_tab = st.selectbox("Select a functionality:", tabs)
259
-
260
- if selected_tab == "Chatbot":
261
- # Chatbot functionality
262
- st.subheader("Chat with the Mental Health Support Bot")
263
- chatbot = gr.Chatbot(label="Chat")
264
- demo = gr.Interface(
265
- chat,
266
- [gr.Textbox(lines=1, label="Message"), "state"],
267
- [chatbot, "state"],
268
- allow_flagging="never",
269
- title="Wellbeing for All, ** I am your Best Friend **",
270
- )
271
- demo.launch()
272
-
273
- elif selected_tab == "Sentiment Analysis":
274
- # Sentiment Analysis
275
- st.subheader("Sentiment Analysis")
276
- user_input = st.text_area("Enter text to analyze sentiment:")
277
- if st.button("Analyze Sentiment"):
278
- if user_input:
279
- sentiment = analyze_sentiment(user_input)
280
- st.write(f"**Sentiment:** {sentiment}")
281
- else:
282
- st.warning("Please enter some text to analyze.")
283
-
284
- elif selected_tab == "Emotion Detection & Suggestions":
285
- # Emotion Detection and Suggestions
286
- st.subheader("Emotion Detection and Well-Being Suggestions")
287
- user_input = st.text_area("How are you feeling today?", "Enter your thoughts here...")
288
- if st.button("Detect Emotion"):
289
- if user_input:
290
- emotion = detect_emotion(user_input)
291
- st.write(f"**Emotion Detected:** {emotion}")
292
- # Provide suggestions based on the detected emotion
293
- if emotion == 'joy':
294
- st.write("You're feeling happy! Keep up the great mood!")
295
- st.write("Useful Resources:")
296
- st.markdown("[Relaxation Techniques](https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation)")
297
- st.write("[Dealing with Stress](https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety)")
298
- st.write("[Emotional Wellness Toolkit](https://www.nih.gov/health-information/emotional-wellness-toolkit)")
299
- st.write("Relaxation Videos:")
300
- st.markdown("[Watch on YouTube](https://youtu.be/m1vaUGtyo-A)")
301
- elif emotion == 'anger':
302
- st.write("You're feeling angry. It's okay to feel this way. Let's try to calm down.")
303
- st.write("Useful Resources:")
304
- st.markdown("[Emotional Wellness Toolkit](https://www.nih.gov/health-information/emotional-wellness-toolkit)")
305
- st.write("[Stress Management Tips](https://www.health.harvard.edu/health-a-to-z)")
306
- st.write("[Dealing with Anger](https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety)")
307
- st.write("Relaxation Videos:")
308
- st.markdown("[Watch on YouTube](https://youtu.be/MIc299Flibs)")
309
- # Add more conditions for other emotions...
310
- else:
311
- st.warning("Please enter some text to analyze.")
312
-
313
- elif selected_tab == "Find Local Wellness Professionals":
314
- # Find Local Wellness Professionals
315
- st.subheader("Find Local Wellness Professionals")
316
- if st.button("Search"):
317
- # Define search parameters
318
- query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath in " + location
319
- api_key = "AIzaSyCcfJzMFfuv_1LN7JPTJJYw_aS0A_SLeW0" # Replace with your own Google API key
320
- location_coords = "21.3,-157.8" # Default to Oahu, Hawaii
321
- radius = 50000 # 50 km radius
322
-
323
- # Install Chrome and Chromedriver
324
- def install_chrome_and_driver():
325
- os.system("apt-get update")
326
- os.system("apt-get install -y wget curl")
327
- os.system("wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb")
328
- os.system("dpkg -i google-chrome-stable_current_amd64.deb")
329
- os.system("apt-get install -y -f")
330
- os.system("google-chrome-stable --version")
331
- chromedriver_autoinstaller.install()
332
-
333
- install_chrome_and_driver()
334
-
335
- # Get all places data
336
- google_places_data = get_all_places(query, location_coords, radius, api_key)
337
- if google_places_data:
338
- # Display the results
339
- df = pd.DataFrame(google_places_data, columns=[
340
- "Name", "Address", "Phone", "Rating", "Business Status",
341
- "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
342
- "Opening Hours", "Reviews", "Email"
343
- ])
344
- st.write(df)
345
- # Save to CSV
346
- save_to_csv(google_places_data, "wellness_professionals.csv")
347
- else:
348
- st.write("No data found.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
  import nltk
4
+
5
  import numpy as np
6
+
7
  import tflearn
8
+
9
  import tensorflow
10
+
11
  import random
12
+
13
  import json
14
+
15
  import pickle
16
+
17
+ import nltk
18
+
19
  from nltk.tokenize import word_tokenize
20
+
21
  from nltk.stem.lancaster import LancasterStemmer
22
+
23
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
24
+
25
  from transformers import pipeline
26
+
27
  import requests
28
+
29
  import csv
30
+
31
  import time
32
+
33
  import re
34
+
35
  from bs4 import BeautifulSoup
36
+
37
  import pandas as pd
38
+
39
  from selenium import webdriver
40
+
41
  from selenium.webdriver.chrome.options import Options
42
+
43
  import chromedriver_autoinstaller
44
+
45
  import os
46
+
47
  import torch
48
 
49
  # Ensure necessary NLTK resources are downloaded
50
+
51
  nltk.download('punkt')
52
 
53
  # Initialize the stemmer
54
+
55
  stemmer = LancasterStemmer()
56
 
57
  # Load intents.json
58
+
59
  try:
60
+
61
+ with open("intents.json") as file:
62
+
63
+ data = json.load(file)
64
+
65
  except FileNotFoundError:
66
+
67
+ raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
68
 
69
  # Load preprocessed data from pickle
70
+
71
  try:
72
+
73
+ with open("data.pickle", "rb") as f:
74
+
75
+ words, labels, training, output = pickle.load(f)
76
+
77
  except FileNotFoundError:
78
+
79
+ raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
80
 
81
  # Build the model structure
82
+
83
  net = tflearn.input_data(shape=[None, len(training[0])])
84
+
85
  net = tflearn.fully_connected(net, 8)
86
+
87
  net = tflearn.fully_connected(net, 8)
88
+
89
  net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
90
+
91
  net = tflearn.regression(net)
92
 
93
  # Load the trained model
94
+
95
  model = tflearn.DNN(net)
96
+
97
  try:
98
+
99
+ model.load("MentalHealthChatBotmodel.tflearn")
100
+
101
  except FileNotFoundError:
102
+
103
+ raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
104
 
105
  # Function to process user input into a bag-of-words format
106
+
107
  def bag_of_words(s, words):
108
+
109
+ bag = [0 for _ in range(len(words))]
110
+
111
+ s_words = word_tokenize(s)
112
+
113
+ s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
114
+
115
+ for se in s_words:
116
+
117
+ for i, w in enumerate(words):
118
+
119
+ if w == se:
120
+
121
+ bag[i] = 1
122
+
123
+ return np.array(bag)
124
 
125
  # Chat function
126
+
127
  def chat(message, history):
128
+
129
+ history = history or []
130
+
131
+ message = message.lower()
132
+
133
+ try:
134
+
135
+ # Predict the tag
136
+
137
+ results = model.predict([bag_of_words(message, words)])
138
+
139
+ results_index = np.argmax(results)
140
+
141
+ tag = labels[results_index]
142
+
143
+ # Match tag with intent and choose a random response
144
+
145
+ for tg in data["intents"]:
146
+
147
+ if tg['tag'] == tag:
148
+
149
+ responses = tg['responses']
150
+
151
+ response = random.choice(responses)
152
+
153
+ break
154
+
155
+ else:
156
+
157
+ response = "I'm sorry, I didn't understand that. Could you please rephrase?"
158
+
159
+ except Exception as e:
160
+
161
+ response = f"An error occurred: {str(e)}"
162
+
163
+ history.append((message, response))
164
+
165
+ return history, history
166
 
167
  # Load pre-trained model and tokenizer for sentiment analysis
168
+
169
  tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
170
+
171
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
172
 
173
  # Load pre-trained model and tokenizer for emotion detection
174
+
175
  emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
176
+
177
  emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
178
 
179
  # Function for sentiment analysis
180
+
181
  def analyze_sentiment(text):
182
+
183
+ inputs = tokenizer(text, return_tensors="pt")
184
+
185
+ with torch.no_grad():
186
+
187
+ outputs = sentiment_model(**inputs)
188
+
189
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
190
+
191
+ sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
192
+
193
+ return sentiment
194
 
195
  # Function for emotion detection
196
+
197
  def detect_emotion(text):
198
+
199
+ pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
200
+
201
+ result = pipe(text)
202
+
203
+ emotion = result[0]['label']
204
+
205
+ return emotion
206
+
207
+ # Function to provide suggestions based on emotion
208
+
209
+ def provide_suggestions(emotion):
210
+
211
+ if emotion == 'joy':
212
+
213
+ return "You're feeling happy! Keep up the great mood!"
214
+
215
+ elif emotion == 'anger':
216
+
217
+ return "You're feeling angry. It's okay to feel this way. Let's try to calm down."
218
+
219
+ # Add more conditions for other emotions...
220
+
221
+ else:
222
+
223
+ return "Sorry, no suggestions available for this emotion."
224
+
225
+ # Combined function for emotion detection and suggestions
226
+
227
+ def detect_emotion_and_suggest(text):
228
+
229
+ emotion = detect_emotion(text)
230
+
231
+ suggestions = provide_suggestions(emotion)
232
+
233
+ return emotion, suggestions
234
 
235
  # Function to scrape website URL from Google Maps using Selenium
236
+
237
  def scrape_website_from_google_maps(place_name):
238
+
239
+ chrome_options = Options()
240
+
241
+ chrome_options.add_argument("--headless")
242
+
243
+ chrome_options.add_argument("--no-sandbox")
244
+
245
+ chrome_options.add_argument("--disable-dev-shm-usage")
246
+
247
+ driver = webdriver.Chrome(options=chrome_options)
248
+
249
+ search_url = f"https://www.google.com/maps/search/{place_name.replace(' ', '+')}"
250
+
251
+ driver.get(search_url)
252
+
253
+ time.sleep(5)
254
+
255
+ try:
256
+
257
+ website_element = driver.find_element_by_xpath('//a[contains(@aria-label, "Visit") and contains(@aria-label, "website")]')
258
+
259
+ website_url = website_element.get_attribute('href')
260
+
261
+ except:
262
+
263
+ website_url = "Not available"
264
+
265
+ driver.quit()
266
+
267
+ return website_url
268
 
269
  # Function to scrape website for contact information
270
+
271
  def scrape_website_for_contact_info(website):
272
+
273
+ phone_number = "Not available"
274
+
275
+ email = "Not available"
276
+
277
+ try:
278
+
279
+ response = requests.get(website, timeout=5)
280
+
281
+ soup = BeautifulSoup(response.content, 'html.parser')
282
+
283
+ phone_match = re.search(r'$$?\+?[0-9]*$$?[0-9_\- $$$$]*', soup.get_text())
284
+
285
+ if phone_match:
286
+
287
+ phone_number = phone_match.group()
288
+
289
+ email_match = re.search(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}', soup.get_text())
290
+
291
+ if email_match:
292
+
293
+ email = email_match.group()
294
+
295
+ except Exception as e:
296
+
297
+ print(f"Error scraping website {website}: {e}")
298
+
299
+ return phone_number, email
300
 
301
  # Function to fetch detailed information for a specific place using its place_id
302
+
303
  def get_place_details(place_id, api_key):
304
+
305
+ details_url = "https://maps.googleapis.com/maps/api/place/details/json"
306
+
307
+ params = {
308
+
309
+ "place_id": place_id,
310
+
311
+ "key": api_key
312
+
313
+ }
314
+
315
+ response = requests.get(details_url, params=params)
316
+
317
+ if response.status_code == 200:
318
+
319
+ details_data = response.json().get("result", {})
320
+
321
+ return {
322
+
323
+ "opening_hours": details_data.get("opening_hours", {}).get("weekday_text", "Not available"),
324
+
325
+ "reviews": details_data.get("reviews", "Not available"),
326
+
327
+ "phone_number": details_data.get("formatted_phone_number", "Not available"),
328
+
329
+ "website": details_data.get("website", "Not available")
330
+
331
+ }
332
+
333
+ else:
334
+
335
+ return {}
336
 
337
  # Function to get all places data including pagination
338
+
339
  def get_all_places(query, location, radius, api_key):
340
+
341
+ all_results = []
342
+
343
+ next_page_token = None
344
+
345
+ while True:
346
+
347
+ data = get_places_data(query, location, radius, api_key, next_page_token)
348
+
349
+ if data:
350
+
351
+ results = data.get('results', [])
352
+
353
+ for place in results:
354
+
355
+ place_id = place.get("place_id")
356
+
357
+ name = place.get("name")
358
+
359
+ address = place.get("formatted_address")
360
+
361
+ rating = place.get("rating", "Not available")
362
+
363
+ business_status = place.get("business_status", "Not available")
364
+
365
+ user_ratings_total = place.get("user_ratings_total", "Not available")
366
+
367
+ website = place.get("website", "Not available")
368
+
369
+ types = ", ".join(place.get("types", []))
370
+
371
+ location = place.get("geometry", {}).get("location", {})
372
+
373
+ latitude = location.get("lat", "Not available")
374
+
375
+ longitude = location.get("lng", "Not available")
376
+
377
+ details = get_place_details(place_id, api_key)
378
+
379
+ phone_number = details.get("phone_number", "Not available")
380
+
381
+ if phone_number == "Not available" and website != "Not available":
382
+
383
+ phone_number, email = scrape_website_for_contact_info(website)
384
+
385
+ else:
386
+
387
+ email = "Not available"
388
+
389
+ if website == "Not available":
390
+
391
+ website = scrape_website_from_google_maps(name)
392
+
393
+ all_results.append([name, address, phone_number, rating, business_status,
394
+
395
+ user_ratings_total, website, types, latitude, longitude,
396
+
397
+ details.get("opening_hours", "Not available"),
398
+
399
+ details.get("reviews", "Not available"), email
400
+
401
+ ])
402
+
403
+ next_page_token = data.get('next_page_token')
404
+
405
+ if not next_page_token:
406
+
407
+ break
408
+
409
+ time.sleep(2)
410
+
411
+ else:
412
+
413
+ break
414
+
415
+ return all_results
416
 
417
  # Function to save results to CSV file
418
+
419
  def save_to_csv(data, filename):
420
+
421
+ with open(filename, mode='w', newline='', encoding='utf-8') as file:
422
+
423
+ writer = csv.writer(file)
424
+
425
+ writer.writerow([
426
+
427
+ "Name", "Address", "Phone", "Rating", "Business Status",
428
+
429
+ "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
430
+
431
+ "Opening Hours", "Reviews", "Email"
432
+
433
+ ])
434
+
435
+ writer.writerows(data)
436
+
437
+ print(f"Data saved to {filename}")
438
 
439
  # Function to get places data from Google Places API
440
+
441
  def get_places_data(query, location, radius, api_key, next_page_token=None):
442
+
443
+ url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
444
+
445
+ params = {
446
+
447
+ "query": query,
448
+
449
+ "location": location,
450
+
451
+ "radius": radius,
452
+
453
+ "key": api_key
454
+
455
+ }
456
+
457
+ if next_page_token:
458
+
459
+ params["pagetoken"] = next_page_token
460
+
461
+ response = requests.get(url, params=params)
462
+
463
+ if response.status_code == 200:
464
+
465
+ data = response.json()
466
+
467
+ return data
468
+
469
+ else:
470
+
471
+ print(f"Error: {response.status_code} - {response.text}")
472
+
473
+ return None
474
+
475
+ # Function to find local wellness professionals
476
+
477
+ def find_wellness_professionals(location):
478
+
479
+ query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath in " + location
480
+
481
+ api_key = "AIzaSyCcfJzMFfuv_1LN7JPTJJYw_aS0A_SLeW0" # Replace with your own Google API key
482
+
483
+ location_coords = "21.3,-157.8" # Default to Oahu, Hawaii
484
+
485
+ radius = 50000 # 50 km radius
486
+
487
+ # Install Chrome and Chromedriver
488
+
489
+ def install_chrome_and_driver():
490
+
491
+ os.system("apt-get update")
492
+
493
+ os.system("apt-get install -y wget curl")
494
+
495
+ os.system("wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb")
496
+
497
+ os.system("dpkg -i google-chrome-stable_current_amd64.deb")
498
+
499
+ os.system("apt-get install -y -f")
500
+
501
+ os.system("google-chrome-stable --version")
502
+
503
+ chromedriver_autoinstaller.install()
504
+
505
+ install_chrome_and_driver()
506
+
507
+ # Get all places data
508
+
509
+ google_places_data = get_all_places(query, location_coords, radius, api_key)
510
+
511
+ if google_places_data:
512
+
513
+ df = pd.DataFrame(google_places_data, columns=[
514
+
515
+ "Name", "Address", "Phone", "Rating", "Business Status",
516
+
517
+ "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
518
+
519
+ "Opening Hours", "Reviews", "Email"
520
+
521
+ ])
522
+
523
+ return df
524
+
525
+ else:
526
+
527
+ return pd.DataFrame()
528
+
529
+ with gr.Blocks() as demo:
530
+
531
+ gr.Markdown("# Wellbeing Support System")
532
+
533
+ with gr.Tab("Chatbot"):
534
+
535
+ chatbot = gr.Chatbot()
536
+
537
+ msg = gr.Textbox()
538
+
539
+ clear = gr.Button("Clear")
540
+
541
+ msg.submit(chat, inputs=[msg, chatbot], outputs=chatbot)
542
+
543
+ clear.click(lambda: None, None, chatbot)
544
+
545
+ with gr.Tab("Sentiment Analysis"):
546
+
547
+ text_input = gr.Textbox(label="Enter text to analyze sentiment:")
548
+
549
+ analyze_button = gr.Button("Analyze Sentiment")
550
+
551
+ sentiment_output = gr.Textbox(label="Sentiment:")
552
+
553
+ analyze_button.click(analyze_sentiment, inputs=text_input, outputs=sentiment_output)
554
+
555
+ with gr.Tab("Emotion Detection & Suggestions"):
556
+
557
+ emotion_input = gr.Textbox(label="How are you feeling today?", value="Enter your thoughts here...")
558
+
559
+ detect_button = gr.Button("Detect Emotion")
560
+
561
+ emotion_output = gr.Textbox(label="Detected Emotion:")
562
+
563
+ suggestions_output = gr.Textbox(label="Suggestions:")
564
+
565
+ detect_button.click(detect_emotion_and_suggest, inputs=emotion_input, outputs=[emotion_output, suggestions_output])
566
+
567
+ with gr.Tab("Find Local Wellness Professionals"):
568
+
569
+ location_input = gr.Textbox(label="Enter your location:", value="Hawaii")
570
+
571
+ search_button = gr.Button("Search")
572
+
573
+ results_output = gr.Dataframe(label="Search Results")
574
+
575
+ search_button.click(find_wellness_professionals, inputs=location_input, outputs=results_output)
576
+
577
+ demo.launch()