DreamStream-1 commited on
Commit
48a4b0f
·
verified ·
1 Parent(s): d3aead7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -516
app.py CHANGED
@@ -1,577 +1,224 @@
1
  import gradio as gr
2
-
3
  import nltk
4
-
5
  import numpy as np
6
-
7
  import tflearn
8
-
9
- import tensorflow
10
-
11
  import random
12
-
13
  import json
14
-
15
  import pickle
16
-
17
- import nltk
18
-
19
  from nltk.tokenize import word_tokenize
20
-
21
  from nltk.stem.lancaster import LancasterStemmer
22
-
23
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
24
-
25
- from transformers import pipeline
26
-
27
  import requests
28
-
29
- import csv
30
-
31
- import time
32
-
33
  import re
34
-
35
  from bs4 import BeautifulSoup
36
-
37
  import pandas as pd
38
-
39
  from selenium import webdriver
40
-
41
  from selenium.webdriver.chrome.options import Options
42
-
43
  import chromedriver_autoinstaller
44
-
45
  import os
46
 
47
- import torch
48
-
49
  # Ensure necessary NLTK resources are downloaded
50
-
51
  nltk.download('punkt')
52
 
53
  # Initialize the stemmer
54
-
55
  stemmer = LancasterStemmer()
56
 
57
  # Load intents.json
58
-
59
  try:
60
-
61
- with open("intents.json") as file:
62
-
63
- data = json.load(file)
64
-
65
  except FileNotFoundError:
66
-
67
- raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
68
 
69
  # Load preprocessed data from pickle
70
-
71
  try:
72
-
73
- with open("data.pickle", "rb") as f:
74
-
75
- words, labels, training, output = pickle.load(f)
76
-
77
  except FileNotFoundError:
78
-
79
- raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
80
 
81
  # Build the model structure
82
-
83
  net = tflearn.input_data(shape=[None, len(training[0])])
84
-
85
  net = tflearn.fully_connected(net, 8)
86
-
87
  net = tflearn.fully_connected(net, 8)
88
-
89
  net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
90
-
91
  net = tflearn.regression(net)
92
 
93
  # Load the trained model
94
-
95
  model = tflearn.DNN(net)
96
-
97
  try:
98
-
99
- model.load("MentalHealthChatBotmodel.tflearn")
100
-
101
  except FileNotFoundError:
102
-
103
- raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
104
 
105
  # Function to process user input into a bag-of-words format
106
-
107
  def bag_of_words(s, words):
108
-
109
- bag = [0 for _ in range(len(words))]
110
-
111
- s_words = word_tokenize(s)
112
-
113
- s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
114
-
115
- for se in s_words:
116
-
117
- for i, w in enumerate(words):
118
-
119
- if w == se:
120
-
121
- bag[i] = 1
122
-
123
- return np.array(bag)
124
 
125
  # Chat function
126
-
127
- def chat(message, history):
128
-
129
- history = history or []
130
-
131
- message = message.lower()
132
-
133
- try:
134
-
135
- # Predict the tag
136
-
137
- results = model.predict([bag_of_words(message, words)])
138
-
139
- results_index = np.argmax(results)
140
-
141
- tag = labels[results_index]
142
-
143
- # Match tag with intent and choose a random response
144
-
145
- for tg in data["intents"]:
146
-
147
- if tg['tag'] == tag:
148
-
149
- responses = tg['responses']
150
-
151
- response = random.choice(responses)
152
-
153
- break
154
-
155
- else:
156
-
157
- response = "I'm sorry, I didn't understand that. Could you please rephrase?"
158
-
159
- except Exception as e:
160
-
161
- response = f"An error occurred: {str(e)}"
162
-
163
- history.append((message, response))
164
-
165
- return history, history
166
 
167
  # Load pre-trained model and tokenizer for sentiment analysis
168
-
169
  tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
170
-
171
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
172
 
173
- # Load pre-trained model and tokenizer for emotion detection
 
 
 
 
 
 
 
 
 
 
174
 
 
175
  emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
176
-
177
  emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
178
 
179
- # Function for sentiment analysis
180
-
181
- def analyze_sentiment(text):
182
-
183
- inputs = tokenizer(text, return_tensors="pt")
184
-
185
- with torch.no_grad():
186
-
187
- outputs = sentiment_model(**inputs)
188
-
189
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
190
-
191
- sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
192
-
193
- return sentiment
194
-
195
  # Function for emotion detection
196
-
197
- def detect_emotion(text):
198
-
199
- pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
200
-
201
- result = pipe(text)
202
-
203
- emotion = result[0]['label']
204
-
205
- return emotion
206
-
207
- # Function to provide suggestions based on emotion
208
-
209
  def provide_suggestions(emotion):
210
-
211
- if emotion == 'joy':
212
-
213
- return "You're feeling happy! Keep up the great mood!"
214
-
215
- elif emotion == 'anger':
216
-
217
- return "You're feeling angry. It's okay to feel this way. Let's try to calm down."
218
-
219
- # Add more conditions for other emotions...
220
-
221
- else:
222
-
223
- return "Sorry, no suggestions available for this emotion."
224
-
225
- # Combined function for emotion detection and suggestions
226
-
227
- def detect_emotion_and_suggest(text):
228
-
229
- emotion = detect_emotion(text)
230
-
231
- suggestions = provide_suggestions(emotion)
232
-
233
- return emotion, suggestions
234
-
235
- # Function to scrape website URL from Google Maps using Selenium
236
-
237
- def scrape_website_from_google_maps(place_name):
238
-
239
- chrome_options = Options()
240
-
241
- chrome_options.add_argument("--headless")
242
-
243
- chrome_options.add_argument("--no-sandbox")
244
-
245
- chrome_options.add_argument("--disable-dev-shm-usage")
246
-
247
- driver = webdriver.Chrome(options=chrome_options)
248
-
249
- search_url = f"https://www.google.com/maps/search/{place_name.replace(' ', '+')}"
250
-
251
- driver.get(search_url)
252
-
253
- time.sleep(5)
254
-
255
- try:
256
-
257
- website_element = driver.find_element_by_xpath('//a[contains(@aria-label, "Visit") and contains(@aria-label, "website")]')
258
-
259
- website_url = website_element.get_attribute('href')
260
-
261
- except:
262
-
263
- website_url = "Not available"
264
-
265
- driver.quit()
266
-
267
- return website_url
268
-
269
- # Function to scrape website for contact information
270
-
271
- def scrape_website_for_contact_info(website):
272
-
273
- phone_number = "Not available"
274
-
275
- email = "Not available"
276
-
277
- try:
278
-
279
- response = requests.get(website, timeout=5)
280
-
281
- soup = BeautifulSoup(response.content, 'html.parser')
282
-
283
- phone_match = re.search(r'$$?\+?[0-9]*$$?[0-9_\- $$$$]*', soup.get_text())
284
-
285
- if phone_match:
286
-
287
- phone_number = phone_match.group()
288
-
289
- email_match = re.search(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}', soup.get_text())
290
-
291
- if email_match:
292
-
293
- email = email_match.group()
294
-
295
- except Exception as e:
296
-
297
- print(f"Error scraping website {website}: {e}")
298
-
299
- return phone_number, email
300
-
301
- # Function to fetch detailed information for a specific place using its place_id
302
-
303
- def get_place_details(place_id, api_key):
304
-
305
- details_url = "https://maps.googleapis.com/maps/api/place/details/json"
306
-
307
- params = {
308
-
309
- "place_id": place_id,
310
-
311
- "key": api_key
312
-
313
- }
314
-
315
- response = requests.get(details_url, params=params)
316
-
317
- if response.status_code == 200:
318
-
319
- details_data = response.json().get("result", {})
320
-
321
- return {
322
-
323
- "opening_hours": details_data.get("opening_hours", {}).get("weekday_text", "Not available"),
324
-
325
- "reviews": details_data.get("reviews", "Not available"),
326
-
327
- "phone_number": details_data.get("formatted_phone_number", "Not available"),
328
-
329
- "website": details_data.get("website", "Not available")
330
-
331
- }
332
-
333
- else:
334
-
335
- return {}
336
-
337
- # Function to get all places data including pagination
338
-
339
  def get_all_places(query, location, radius, api_key):
340
-
341
- all_results = []
342
-
343
- next_page_token = None
344
-
345
- while True:
346
-
347
- data = get_places_data(query, location, radius, api_key, next_page_token)
348
-
349
- if data:
350
-
351
- results = data.get('results', [])
352
-
353
- for place in results:
354
-
355
- place_id = place.get("place_id")
356
-
357
- name = place.get("name")
358
-
359
- address = place.get("formatted_address")
360
-
361
- rating = place.get("rating", "Not available")
362
-
363
- business_status = place.get("business_status", "Not available")
364
-
365
- user_ratings_total = place.get("user_ratings_total", "Not available")
366
-
367
- website = place.get("website", "Not available")
368
-
369
- types = ", ".join(place.get("types", []))
370
-
371
- location = place.get("geometry", {}).get("location", {})
372
-
373
- latitude = location.get("lat", "Not available")
374
-
375
- longitude = location.get("lng", "Not available")
376
-
377
- details = get_place_details(place_id, api_key)
378
-
379
- phone_number = details.get("phone_number", "Not available")
380
-
381
- if phone_number == "Not available" and website != "Not available":
382
-
383
- phone_number, email = scrape_website_for_contact_info(website)
384
-
385
- else:
386
-
387
- email = "Not available"
388
-
389
- if website == "Not available":
390
-
391
- website = scrape_website_from_google_maps(name)
392
-
393
- all_results.append([name, address, phone_number, rating, business_status,
394
-
395
- user_ratings_total, website, types, latitude, longitude,
396
-
397
- details.get("opening_hours", "Not available"),
398
-
399
- details.get("reviews", "Not available"), email
400
-
401
- ])
402
-
403
- next_page_token = data.get('next_page_token')
404
-
405
- if not next_page_token:
406
-
407
- break
408
-
409
- time.sleep(2)
410
-
411
- else:
412
-
413
- break
414
-
415
- return all_results
416
-
417
- # Function to save results to CSV file
418
-
419
- def save_to_csv(data, filename):
420
-
421
- with open(filename, mode='w', newline='', encoding='utf-8') as file:
422
-
423
- writer = csv.writer(file)
424
-
425
- writer.writerow([
426
-
427
- "Name", "Address", "Phone", "Rating", "Business Status",
428
-
429
- "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
430
-
431
- "Opening Hours", "Reviews", "Email"
432
-
433
- ])
434
-
435
- writer.writerows(data)
436
-
437
- print(f"Data saved to {filename}")
438
-
439
- # Function to get places data from Google Places API
440
-
441
- def get_places_data(query, location, radius, api_key, next_page_token=None):
442
-
443
- url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
444
-
445
- params = {
446
-
447
- "query": query,
448
-
449
- "location": location,
450
-
451
- "radius": radius,
452
-
453
- "key": api_key
454
-
455
- }
456
-
457
- if next_page_token:
458
-
459
- params["pagetoken"] = next_page_token
460
-
461
- response = requests.get(url, params=params)
462
-
463
- if response.status_code == 200:
464
-
465
- data = response.json()
466
-
467
- return data
468
-
469
- else:
470
-
471
- print(f"Error: {response.status_code} - {response.text}")
472
-
473
- return None
474
-
475
- # Function to find local wellness professionals
476
-
477
- def find_wellness_professionals(location):
478
-
479
- query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath in " + location
480
-
481
- api_key = "AIzaSyCcfJzMFfuv_1LN7JPTJJYw_aS0A_SLeW0" # Replace with your own Google API key
482
-
483
- location_coords = "21.3,-157.8" # Default to Oahu, Hawaii
484
-
485
- radius = 50000 # 50 km radius
486
-
487
- # Install Chrome and Chromedriver
488
-
489
- def install_chrome_and_driver():
490
-
491
- os.system("apt-get update")
492
-
493
- os.system("apt-get install -y wget curl")
494
-
495
- os.system("wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb")
496
-
497
- os.system("dpkg -i google-chrome-stable_current_amd64.deb")
498
-
499
- os.system("apt-get install -y -f")
500
-
501
- os.system("google-chrome-stable --version")
502
-
503
- chromedriver_autoinstaller.install()
504
-
505
- install_chrome_and_driver()
506
-
507
- # Get all places data
508
-
509
- google_places_data = get_all_places(query, location_coords, radius, api_key)
510
-
511
- if google_places_data:
512
-
513
- df = pd.DataFrame(google_places_data, columns=[
514
-
515
- "Name", "Address", "Phone", "Rating", "Business Status",
516
-
517
- "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
518
-
519
- "Opening Hours", "Reviews", "Email"
520
-
521
- ])
522
-
523
- return df
524
-
525
- else:
526
-
527
- return pd.DataFrame()
528
-
529
  with gr.Blocks() as demo:
530
-
531
- gr.Markdown("# Wellbeing Support System")
532
-
533
- with gr.Tab("Chatbot"):
534
-
535
- chatbot = gr.Chatbot()
536
-
537
- msg = gr.Textbox()
538
-
539
- clear = gr.Button("Clear")
540
-
541
- msg.submit(chat, inputs=[msg, chatbot], outputs=chatbot)
542
-
543
- clear.click(lambda: None, None, chatbot)
544
-
545
- with gr.Tab("Sentiment Analysis"):
546
-
547
- text_input = gr.Textbox(label="Enter text to analyze sentiment:")
548
-
549
- analyze_button = gr.Button("Analyze Sentiment")
550
-
551
- sentiment_output = gr.Textbox(label="Sentiment:")
552
-
553
- analyze_button.click(analyze_sentiment, inputs=text_input, outputs=sentiment_output)
554
-
555
- with gr.Tab("Emotion Detection & Suggestions"):
556
-
557
- emotion_input = gr.Textbox(label="How are you feeling today?", value="Enter your thoughts here...")
558
-
559
- detect_button = gr.Button("Detect Emotion")
560
-
561
- emotion_output = gr.Textbox(label="Detected Emotion:")
562
-
563
- suggestions_output = gr.Textbox(label="Suggestions:")
564
-
565
- detect_button.click(detect_emotion_and_suggest, inputs=emotion_input, outputs=[emotion_output, suggestions_output])
566
-
567
- with gr.Tab("Find Local Wellness Professionals"):
568
-
569
- location_input = gr.Textbox(label="Enter your location:", value="Hawaii")
570
-
571
- search_button = gr.Button("Search")
572
-
573
- results_output = gr.Dataframe(label="Search Results")
574
-
575
- search_button.click(find_wellness_professionals, inputs=location_input, outputs=results_output)
576
-
577
- demo.launch()
 
1
  import gradio as gr
 
2
  import nltk
 
3
  import numpy as np
 
4
  import tflearn
 
 
 
5
  import random
 
6
  import json
 
7
  import pickle
8
+ import torch
 
 
9
  from nltk.tokenize import word_tokenize
 
10
  from nltk.stem.lancaster import LancasterStemmer
11
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
 
 
 
12
  import requests
 
 
 
 
 
13
  import re
 
14
  from bs4 import BeautifulSoup
15
+ import time
16
  import pandas as pd
 
17
  from selenium import webdriver
 
18
  from selenium.webdriver.chrome.options import Options
 
19
  import chromedriver_autoinstaller
 
20
  import os
21
 
 
 
22
  # Ensure necessary NLTK resources are downloaded
 
23
  nltk.download('punkt')
24
 
25
  # Initialize the stemmer
 
26
  stemmer = LancasterStemmer()
27
 
28
  # Load intents.json
 
29
  try:
30
+ with open("intents.json") as file:
31
+ data = json.load(file)
 
 
 
32
  except FileNotFoundError:
33
+ raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
 
34
 
35
  # Load preprocessed data from pickle
 
36
  try:
37
+ with open("data.pickle", "rb") as f:
38
+ words, labels, training, output = pickle.load(f)
 
 
 
39
  except FileNotFoundError:
40
+ raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
 
41
 
42
  # Build the model structure
 
43
  net = tflearn.input_data(shape=[None, len(training[0])])
 
44
  net = tflearn.fully_connected(net, 8)
 
45
  net = tflearn.fully_connected(net, 8)
 
46
  net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
 
47
  net = tflearn.regression(net)
48
 
49
  # Load the trained model
 
50
  model = tflearn.DNN(net)
 
51
  try:
52
+ model.load("MentalHealthChatBotmodel.tflearn")
 
 
53
  except FileNotFoundError:
54
+ raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
 
55
 
56
  # Function to process user input into a bag-of-words format
 
57
  def bag_of_words(s, words):
58
+ bag = [0 for _ in range(len(words))]
59
+ s_words = word_tokenize(s)
60
+ s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
61
+ for se in s_words:
62
+ for i, w in enumerate(words):
63
+ if w == se:
64
+ bag[i] = 1
65
+ return np.array(bag)
 
 
 
 
 
 
 
 
66
 
67
  # Chat function
68
+ def chat(message, history, state):
69
+ history = history or []
70
+ message = message.lower()
71
+ try:
72
+ # Predict the tag
73
+ results = model.predict([bag_of_words(message, words)])
74
+ results_index = np.argmax(results)
75
+ tag = labels[results_index]
76
+
77
+ # Match tag with intent and choose a random response
78
+ for tg in data["intents"]:
79
+ if tg['tag'] == tag:
80
+ responses = tg['responses']
81
+ response = random.choice(responses)
82
+ break
83
+ else:
84
+ response = "I'm sorry, I didn't understand that. Could you please rephrase?"
85
+
86
+ history.append((message, response))
87
+
88
+ # Update state to move to the next feature
89
+ state['step'] = 2 # Move to sentiment analysis
90
+ except Exception as e:
91
+ response = f"An error occurred: {str(e)}"
92
+
93
+ return history, history, state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  # Load pre-trained model and tokenizer for sentiment analysis
 
96
  tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
 
97
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
98
 
99
+ # Function for sentiment analysis
100
+ def analyze_sentiment(text, state):
101
+ inputs = tokenizer(text, return_tensors="pt")
102
+ with torch.no_grad():
103
+ outputs = sentiment_model(**inputs)
104
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
105
+ sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
106
+
107
+ # Update state to move to the next feature
108
+ state['step'] = 3 # Move to emotion detection and suggestions
109
+ return sentiment, state
110
 
111
+ # Load pre-trained model and tokenizer for emotion detection
112
  emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
 
113
  emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  # Function for emotion detection
116
+ def detect_emotion(text, state):
117
+ pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
118
+ result = pipe(text)
119
+ emotion = result[0]['label']
120
+
121
+ # Provide suggestions based on emotion
122
+ suggestions = provide_suggestions(emotion)
123
+
124
+ # Update state to move to the next feature
125
+ state['step'] = 4 # Move to wellness professional search
126
+ return emotion, suggestions, state
127
+
128
+ # Suggestions based on detected emotion
129
  def provide_suggestions(emotion):
130
+ if emotion == 'joy':
131
+ return "You're feeling happy! Keep up the great mood!"
132
+ elif emotion == 'anger':
133
+ return "You're feeling angry. It's okay to feel this way. Let's try to calm down."
134
+ elif emotion == 'fear':
135
+ return "You're feeling fearful. Take deep breaths, everything will be okay."
136
+ elif emotion == 'sadness':
137
+ return "You're feeling sad. It's okay, things will get better. You're not alone."
138
+ else:
139
+ return "Sorry, no suggestions available for this emotion."
140
+
141
+ # Function to find wellness professionals
142
+ def find_wellness_professionals(location, state):
143
+ query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist in " + location
144
+ api_key = "YOUR_GOOGLE_API_KEY" # Replace with your own API key
145
+ location_coords = "21.3,-157.8" # Default to Oahu, Hawaii
146
+ radius = 50000 # 50 km radius
147
+
148
+ google_places_data = get_all_places(query, location_coords, radius, api_key)
149
+ if google_places_data:
150
+ df = pd.DataFrame(google_places_data, columns=[
151
+ "Name", "Address", "Phone", "Rating", "Business Status",
152
+ "User Ratings Total", "Website", "Types", "Latitude", "Longitude",
153
+ "Opening Hours", "Reviews", "Email"
154
+ ])
155
+ return df, state
156
+ else:
157
+ return pd.DataFrame(), state
158
+
159
+ # The functions for scraping websites and fetching details
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  def get_all_places(query, location, radius, api_key):
161
+ all_results = []
162
+ next_page_token = None
163
+ while True:
164
+ data = get_places_data(query, location, radius, api_key, next_page_token)
165
+ if data:
166
+ results = data.get('results', [])
167
+ for place in results:
168
+ place_id = place.get("place_id")
169
+ name = place.get("name")
170
+ address = place.get("formatted_address")
171
+ rating = place.get("rating", "Not available")
172
+ business_status = place.get("business_status", "Not available")
173
+ user_ratings_total = place.get("user_ratings_total", "Not available")
174
+ website = place.get("website", "Not available")
175
+ types = ", ".join(place.get("types", []))
176
+ location = place.get("geometry", {}).get("location", {})
177
+ latitude = location.get("lat", "Not available")
178
+ longitude = location.get("lng", "Not available")
179
+ details = get_place_details(place_id, api_key)
180
+ phone_number = details.get("phone_number", "Not available")
181
+ email = details.get("email", "Not available")
182
+ all_results.append([name, address, phone_number, rating, business_status,
183
+ user_ratings_total, website, types, latitude, longitude,
184
+ details.get("opening_hours", "Not available"),
185
+ details.get("reviews", "Not available"), email])
186
+ next_page_token = data.get('next_page_token')
187
+ if not next_page_token:
188
+ break
189
+ time.sleep(2)
190
+ return all_results
191
+
192
+ # Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  with gr.Blocks() as demo:
194
+ gr.Markdown("# Wellbeing Support System")
195
+
196
+ state = gr.State({"step": 1}) # Track the flow step
197
+
198
+ with gr.Tab("Chatbot"):
199
+ chatbot = gr.Chatbot()
200
+ msg = gr.Textbox()
201
+ clear = gr.Button("Clear")
202
+ msg.submit(chat, inputs=[msg, chatbot, state], outputs=[chatbot, chatbot, state])
203
+ clear.click(lambda: None, None, chatbot)
204
+
205
+ with gr.Tab("Sentiment Analysis"):
206
+ sentiment_output = gr.Textbox(label="Sentiment:")
207
+ text_input = gr.Textbox(label="Enter text to analyze sentiment:")
208
+ analyze_button = gr.Button("Analyze Sentiment")
209
+ analyze_button.click(analyze_sentiment, inputs=[text_input, state], outputs=[sentiment_output, state])
210
+
211
+ with gr.Tab("Emotion Detection & Suggestions"):
212
+ emotion_input = gr.Textbox(label="How are you feeling today?", value="Enter your thoughts here...")
213
+ detect_button = gr.Button("Detect Emotion")
214
+ emotion_output = gr.Textbox(label="Detected Emotion:")
215
+ suggestions_output = gr.Textbox(label="Suggestions:")
216
+ detect_button.click(detect_emotion, inputs=[emotion_input, state], outputs=[emotion_output, suggestions_output, state])
217
+
218
+ with gr.Tab("Find Local Wellness Professionals"):
219
+ location_input = gr.Textbox(label="Enter your location:", value="Hawaii")
220
+ search_button = gr.Button("Search")
221
+ results_output = gr.Dataframe(label="Search Results")
222
+ search_button.click(find_wellness_professionals, inputs=[location_input, state], outputs=[results_output, state])
223
+
224
+ demo.launch()