Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,33 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
3 |
-
import json
|
4 |
-
import pickle
|
5 |
-
import random
|
6 |
-
import pandas as pd
|
7 |
-
import requests
|
8 |
import nltk
|
9 |
-
from nltk.stem import LancasterStemmer
|
10 |
import numpy as np
|
11 |
-
import tensorflow as tf
|
12 |
-
from bs4 import BeautifulSoup
|
13 |
import tflearn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
#
|
16 |
nltk.download('punkt')
|
17 |
|
18 |
# Initialize the stemmer
|
19 |
stemmer = LancasterStemmer()
|
20 |
|
21 |
-
# Load intents.json
|
22 |
with open("intents.json") as file:
|
23 |
data = json.load(file)
|
24 |
|
25 |
-
# Load preprocessed data
|
26 |
with open("data.pickle", "rb") as f:
|
27 |
words, labels, training, output = pickle.load(f)
|
28 |
|
29 |
-
# Build the model structure
|
30 |
net = tflearn.input_data(shape=[None, len(training[0])])
|
31 |
net = tflearn.fully_connected(net, 8)
|
32 |
net = tflearn.fully_connected(net, 8)
|
@@ -37,20 +38,10 @@ net = tflearn.regression(net)
|
|
37 |
model = tflearn.DNN(net)
|
38 |
model.load("MentalHealthChatBotmodel.tflearn")
|
39 |
|
40 |
-
#
|
41 |
-
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
42 |
-
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
43 |
-
sentiment_pipeline = pipeline("sentiment-analysis")
|
44 |
-
|
45 |
-
# Emotion detection
|
46 |
-
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
47 |
-
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
48 |
-
emotion_pipeline = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
|
49 |
-
|
50 |
-
# Function to process user input into a bag-of-words format
|
51 |
def bag_of_words(s, words):
|
52 |
bag = [0 for _ in range(len(words))]
|
53 |
-
s_words =
|
54 |
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
|
55 |
for se in s_words:
|
56 |
for i, w in enumerate(words):
|
@@ -58,108 +49,135 @@ def bag_of_words(s, words):
|
|
58 |
bag[i] = 1
|
59 |
return np.array(bag)
|
60 |
|
61 |
-
#
|
62 |
-
def
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
return
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
def detect_emotion_and_suggest(text):
|
90 |
-
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
|
91 |
-
result = pipe(text)
|
92 |
-
emotion = result[0]['label']
|
93 |
-
|
94 |
-
# Prepare suggestions based on the detected emotion
|
95 |
-
suggestions = ""
|
96 |
-
relaxation_videos = ""
|
97 |
-
if emotion == 'joy':
|
98 |
-
suggestions = "You're feeling happy! Keep up the great mood!\n\nUseful Resources:\n- Relaxation Techniques: [Link](https://www.example.com/joy)\n- Dealing with Stress: [Link](https://www.example.com/stress)\n- Emotional Wellness Toolkit: [Link](https://www.example.com/wellness)"
|
99 |
-
relaxation_videos = "Relaxation Videos:\n- Watch on YouTube: [Link](https://youtu.be/m1vaUGtyo-A)"
|
100 |
-
|
101 |
-
elif emotion == 'anger':
|
102 |
-
suggestions = "You're feeling angry. It's okay to feel this way. Let's try to calm down.\n\nUseful Resources:\n- Emotional Wellness Toolkit: [Link](https://www.example.com/anger)\n- Stress Management Tips: [Link](https://www.example.com/stress)\n- Dealing with Anger: [Link](https://www.example.com/dealing_with_anger)"
|
103 |
-
relaxation_videos = "Relaxation Videos:\n- Watch on YouTube: [Link](https://youtu.be/MIc299Flibs)"
|
104 |
-
|
105 |
-
elif emotion == 'fear':
|
106 |
-
suggestions = "You're feeling fearful. Take a moment to breathe and relax.\n\nUseful Resources:\n- Mindfulness Practices: [Link](https://www.example.com/fear)\n- Coping with Anxiety: [Link](https://www.example.com/anxiety)\n- Emotional Wellness Toolkit: [Link](https://www.example.com/wellness)"
|
107 |
-
relaxation_videos = "Relaxation Videos:\n- Watch on YouTube: [Link](https://youtu.be/yGKKz185M5o)"
|
108 |
-
|
109 |
-
elif emotion == 'sadness':
|
110 |
-
suggestions = "You're feeling sad. It's okay to take a break.\n\nUseful Resources:\n- Emotional Wellness Toolkit: [Link](https://www.example.com/sadness)\n- Dealing with Anxiety: [Link](https://www.example.com/anxiety)"
|
111 |
-
relaxation_videos = "Relaxation Videos:\n- Watch on YouTube: [Link](https://youtu.be/-e-4Kx5px_I)"
|
112 |
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
116 |
|
117 |
-
|
|
|
|
|
118 |
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
iface = gr.Interface(
|
121 |
-
fn=
|
122 |
-
inputs="
|
123 |
-
outputs=[
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
],
|
128 |
-
title="Emotion Detection and Well-Being Suggestions",
|
129 |
-
description="Enter your thoughts below to detect your current emotion and receive personalized well-being suggestions.",
|
130 |
-
)
|
131 |
-
|
132 |
-
# Function to show a summary of the detected emotion and suggestions
|
133 |
-
def show_summary(emotion, suggestions):
|
134 |
-
return f"**Emotion Detected:** {emotion}\n{suggestions}"
|
135 |
-
|
136 |
-
# Gradio interface for showing summary
|
137 |
-
summary_iface = gr.Interface(
|
138 |
-
fn=show_summary,
|
139 |
-
inputs=[
|
140 |
-
"text", # For detected emotion
|
141 |
-
"text", # For suggestions
|
142 |
-
],
|
143 |
-
outputs="markdown",
|
144 |
-
title="Summary of Emotion and Suggestions",
|
145 |
-
description="Click the button to see a summary of your detected emotion and the suggested well-being resources.",
|
146 |
-
)
|
147 |
-
|
148 |
-
# Function to fetch and display nearby health professionals
|
149 |
-
def fetch_and_display_health_professionals(location):
|
150 |
-
df = fetch_nearby_health_professionals(location)
|
151 |
-
return df
|
152 |
-
|
153 |
-
# Gradio interface for fetching nearby health professionals
|
154 |
-
health_professionals_iface = gr.Interface(
|
155 |
-
fn=fetch_and_display_health_professionals,
|
156 |
-
inputs="text",
|
157 |
-
outputs="dataframe",
|
158 |
-
title="Find Nearby Health Professionals",
|
159 |
-
description="Enter your location to find nearby health professionals.",
|
160 |
)
|
161 |
|
162 |
-
|
163 |
-
iface.launch()
|
164 |
-
summary_iface.launch()
|
165 |
-
health_professionals_iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import nltk
|
|
|
2 |
import numpy as np
|
|
|
|
|
3 |
import tflearn
|
4 |
+
import tensorflow
|
5 |
+
import random
|
6 |
+
import json
|
7 |
+
import pickle
|
8 |
+
import gradio as gr
|
9 |
+
from nltk.tokenize import word_tokenize
|
10 |
+
from nltk.stem.lancaster import LancasterStemmer
|
11 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
12 |
+
import googlemaps
|
13 |
+
import folium
|
14 |
+
import os
|
15 |
|
16 |
+
# Ensure necessary NLTK resources are downloaded
|
17 |
nltk.download('punkt')
|
18 |
|
19 |
# Initialize the stemmer
|
20 |
stemmer = LancasterStemmer()
|
21 |
|
22 |
+
# Load intents.json for Mental Health Chatbot
|
23 |
with open("intents.json") as file:
|
24 |
data = json.load(file)
|
25 |
|
26 |
+
# Load preprocessed data for Mental Health Chatbot
|
27 |
with open("data.pickle", "rb") as f:
|
28 |
words, labels, training, output = pickle.load(f)
|
29 |
|
30 |
+
# Build the model structure for Mental Health Chatbot
|
31 |
net = tflearn.input_data(shape=[None, len(training[0])])
|
32 |
net = tflearn.fully_connected(net, 8)
|
33 |
net = tflearn.fully_connected(net, 8)
|
|
|
38 |
model = tflearn.DNN(net)
|
39 |
model.load("MentalHealthChatBotmodel.tflearn")
|
40 |
|
41 |
+
# Function to process user input into a bag-of-words format for Chatbot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def bag_of_words(s, words):
|
43 |
bag = [0 for _ in range(len(words))]
|
44 |
+
s_words = word_tokenize(s)
|
45 |
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
|
46 |
for se in s_words:
|
47 |
for i, w in enumerate(words):
|
|
|
49 |
bag[i] = 1
|
50 |
return np.array(bag)
|
51 |
|
52 |
+
# Chat function for Mental Health Chatbot
|
53 |
+
def chatbot(message, history):
|
54 |
+
history = history or []
|
55 |
+
message = message.lower()
|
56 |
+
try:
|
57 |
+
# Predict the tag
|
58 |
+
results = model.predict([bag_of_words(message, words)])
|
59 |
+
results_index = np.argmax(results)
|
60 |
+
tag = labels[results_index]
|
61 |
+
|
62 |
+
# Match tag with intent and choose a random response
|
63 |
+
for tg in data["intents"]:
|
64 |
+
if tg['tag'] == tag:
|
65 |
+
responses = tg['responses']
|
66 |
+
response = random.choice(responses)
|
67 |
+
break
|
68 |
+
else:
|
69 |
+
response = "I'm sorry, I didn't understand that. Could you please rephrase?"
|
70 |
+
except Exception as e:
|
71 |
+
response = f"An error occurred: {str(e)}"
|
72 |
+
|
73 |
+
history.append((message, response))
|
74 |
+
return history, history
|
75 |
+
|
76 |
+
|
77 |
+
# Sentiment Analysis using Hugging Face model
|
78 |
+
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
79 |
+
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
+
def analyze_sentiment(user_input):
|
82 |
+
inputs = tokenizer(user_input, return_tensors="pt")
|
83 |
+
with torch.no_grad():
|
84 |
+
outputs = model_sentiment(**inputs)
|
85 |
+
predicted_class = torch.argmax(outputs.logits, dim=1).item()
|
86 |
+
sentiment = ["Negative", "Neutral", "Positive"][predicted_class] # Assuming 3 classes
|
87 |
+
return f"Predicted Sentiment: {sentiment}"
|
88 |
|
89 |
+
# Emotion Detection using Hugging Face model
|
90 |
+
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
91 |
+
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
92 |
|
93 |
+
def detect_emotion(user_input):
|
94 |
+
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
|
95 |
+
result = pipe(user_input)
|
96 |
+
emotion = result[0]['label']
|
97 |
+
return f"Emotion Detected: {emotion}"
|
98 |
+
|
99 |
+
# Initialize Google Maps API client securely
|
100 |
+
gmaps = googlemaps.Client(key=os.getenv('GOOGLE_API_KEY'))
|
101 |
+
|
102 |
+
# Function to search for health professionals
|
103 |
+
def search_health_professionals(query, location, radius=10000):
|
104 |
+
places_result = gmaps.places_nearby(location, radius=radius, type='doctor', keyword=query)
|
105 |
+
return places_result.get('results', [])
|
106 |
+
|
107 |
+
# Function to get directions and display on Gradio UI
|
108 |
+
def get_health_professionals_and_map(current_location, health_professional_query):
|
109 |
+
route_info = ""
|
110 |
+
m = None # Default to None
|
111 |
+
try:
|
112 |
+
# Geocode the current location (i.e., convert it to latitude and longitude)
|
113 |
+
geocode_result = gmaps.geocode(current_location)
|
114 |
+
if not geocode_result:
|
115 |
+
route_info = "Could not retrieve location coordinates. Please enter a valid location."
|
116 |
+
return route_info, m
|
117 |
+
|
118 |
+
location_coords = geocode_result[0]['geometry']['location']
|
119 |
+
lat, lon = location_coords['lat'], location_coords['lng']
|
120 |
+
|
121 |
+
# Search for health professionals
|
122 |
+
health_professionals = search_health_professionals(health_professional_query, (lat, lon))
|
123 |
+
|
124 |
+
if health_professionals:
|
125 |
+
route_info = "Health professionals found:\n"
|
126 |
+
m = folium.Map(location=[lat, lon], zoom_start=12)
|
127 |
+
for professional in health_professionals:
|
128 |
+
name = professional['name']
|
129 |
+
vicinity = professional.get('vicinity', 'N/A')
|
130 |
+
rating = professional.get('rating', 'N/A')
|
131 |
+
folium.Marker([professional['geometry']['location']['lat'], professional['geometry']['location']['lng']],
|
132 |
+
popup=f"{name}\n{vicinity}\nRating: {rating}").add_to(m)
|
133 |
+
route_info += f"- {name} ({rating} stars): {vicinity}\n"
|
134 |
+
else:
|
135 |
+
route_info = "No health professionals found matching your query."
|
136 |
+
m = folium.Map(location=[lat, lon], zoom_start=12) # Default map if no professionals are found
|
137 |
+
|
138 |
+
except Exception as e:
|
139 |
+
route_info = f"Error: {str(e)}"
|
140 |
+
m = folium.Map(location=[20, 0], zoom_start=2) # Default map if any error occurs
|
141 |
+
|
142 |
+
return route_info, m._repr_html_()
|
143 |
+
|
144 |
+
# Gradio interface
|
145 |
+
def gradio_app(message, location, health_query, history):
|
146 |
+
# Chatbot interaction
|
147 |
+
history, _ = chatbot(message, history)
|
148 |
+
|
149 |
+
# Sentiment analysis
|
150 |
+
sentiment_response = analyze_sentiment(message)
|
151 |
+
|
152 |
+
# Emotion detection
|
153 |
+
emotion_response = detect_emotion(message)
|
154 |
+
|
155 |
+
# Health professional search and map display
|
156 |
+
route_info, map_html = get_health_professionals_and_map(location, health_query)
|
157 |
+
|
158 |
+
return history, sentiment_response, emotion_response, route_info, map_html
|
159 |
+
|
160 |
+
# Gradio UI components
|
161 |
+
message_input = gr.Textbox(lines=1, label="Message")
|
162 |
+
location_input = gr.Textbox(value="Honolulu, HI", label="Current Location")
|
163 |
+
health_query_input = gr.Textbox(value="doctor", label="Health Professional Query (e.g., doctor, psychiatrist, psychologist)")
|
164 |
+
|
165 |
+
chat_history = gr.Chatbot(label="Chat History")
|
166 |
+
|
167 |
+
# Outputs
|
168 |
+
sentiment_output = gr.Textbox(label="Sentiment Analysis Result")
|
169 |
+
emotion_output = gr.Textbox(label="Emotion Detection Result")
|
170 |
+
route_info_output = gr.Textbox(label="Health Professionals Information")
|
171 |
+
map_output = gr.HTML(label="Map with Health Professionals")
|
172 |
+
|
173 |
+
# Create Gradio interface
|
174 |
iface = gr.Interface(
|
175 |
+
fn=gradio_app,
|
176 |
+
inputs=[message_input, location_input, health_query_input, "state"],
|
177 |
+
outputs=[chat_history, sentiment_output, emotion_output, route_info_output, map_output],
|
178 |
+
allow_flagging="never",
|
179 |
+
live=True,
|
180 |
+
title="Wellbeing App: Mental Health, Sentiment, Emotion Detection & Health Professional Search"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
)
|
182 |
|
183 |
+
iface.launch()
|
|
|
|
|
|