Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
import pandas as pd
|
3 |
import nltk
|
4 |
import numpy as np
|
5 |
import tflearn
|
@@ -11,216 +11,179 @@ from nltk.stem.lancaster import LancasterStemmer
|
|
11 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
12 |
import googlemaps
|
13 |
import folium
|
14 |
-
import
|
15 |
-
import
|
16 |
-
import torch # Added missing import for torch
|
17 |
-
from PIL import Image
|
18 |
|
19 |
-
# Disable GPU usage for TensorFlow
|
20 |
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
21 |
|
22 |
-
#
|
23 |
nltk.download('punkt')
|
24 |
|
25 |
-
# Initialize
|
26 |
stemmer = LancasterStemmer()
|
27 |
|
28 |
-
# Load intents.json for
|
29 |
with open("intents.json") as file:
|
30 |
-
|
31 |
|
32 |
-
# Load
|
33 |
with open("data.pickle", "rb") as f:
|
34 |
words, labels, training, output = pickle.load(f)
|
35 |
|
36 |
-
# Build
|
37 |
-
|
38 |
-
net = tflearn.
|
39 |
-
net = tflearn.fully_connected(net, 8)
|
40 |
-
net = tflearn.fully_connected(net,
|
41 |
-
net = tflearn.
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
|
44 |
-
model = tflearn.DNN(net)
|
45 |
-
model.load("MentalHealthChatBotmodel.tflearn")
|
46 |
|
47 |
-
#
|
48 |
def bag_of_words(s, words):
|
49 |
bag = [0 for _ in range(len(words))]
|
50 |
s_words = word_tokenize(s)
|
51 |
-
s_words = [stemmer.stem(word.lower()) for word in s_words if word.
|
52 |
for se in s_words:
|
53 |
for i, w in enumerate(words):
|
54 |
if w == se:
|
55 |
bag[i] = 1
|
56 |
return np.array(bag)
|
57 |
|
58 |
-
#
|
59 |
-
def
|
|
|
60 |
history = history or []
|
61 |
-
message = message.lower()
|
62 |
try:
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
for
|
69 |
-
if
|
70 |
-
|
71 |
-
response = random.choice(responses)
|
72 |
break
|
73 |
-
else:
|
74 |
-
response = "I'm sorry, I didn't understand that. Could you please rephrase?"
|
75 |
except Exception as e:
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
history.append({"role": "user", "content": message})
|
80 |
history.append({"role": "assistant", "content": response})
|
81 |
-
return history,
|
82 |
|
83 |
-
#
|
84 |
-
|
85 |
-
|
86 |
|
87 |
-
def
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
-
#
|
96 |
-
|
97 |
-
|
98 |
|
99 |
-
def
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
#
|
106 |
def generate_suggestions(emotion):
|
107 |
suggestions = {
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
}
|
115 |
-
return
|
116 |
-
|
117 |
-
#
|
118 |
-
def
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
route_info = "\n".join([place['name'] for place in results])
|
126 |
-
map_html = create_map(results)
|
127 |
-
return route_info, map_html
|
128 |
-
return "No professionals found.", None
|
129 |
-
|
130 |
-
def create_map(places):
|
131 |
-
m = folium.Map(location=[places[0]['geometry']['location']['lat'], places[0]['geometry']['location']['lng']], zoom_start=13)
|
132 |
-
for place in places:
|
133 |
-
folium.Marker([place['geometry']['location']['lat'], place['geometry']['location']['lng']],
|
134 |
-
popup=place['name']).add_to(m)
|
135 |
-
map_html = m._repr_html_()
|
136 |
-
return map_html
|
137 |
-
|
138 |
-
# Custom CSS styling for Gradio interface
|
139 |
-
css = """
|
140 |
-
body {
|
141 |
-
font-family: 'Roboto', sans-serif;
|
142 |
-
}
|
143 |
-
.gradio-container {
|
144 |
-
background-color: #f0f0f0;
|
145 |
-
font-size: 16px;
|
146 |
-
}
|
147 |
-
.gradio-input, .gradio-output {
|
148 |
-
padding: 15px;
|
149 |
-
border-radius: 10px;
|
150 |
-
background-color: #ffffff;
|
151 |
-
border: 2px solid #ccc;
|
152 |
-
}
|
153 |
-
.gradio-container .gradio-button {
|
154 |
-
background-color: #007BFF;
|
155 |
-
color: white;
|
156 |
-
border-radius: 5px;
|
157 |
-
padding: 10px 15px;
|
158 |
-
}
|
159 |
-
.gradio-container .gradio-button:hover {
|
160 |
-
background-color: #0056b3;
|
161 |
-
}
|
162 |
-
.gradio-container h3 {
|
163 |
-
color: #333;
|
164 |
-
}
|
165 |
-
.gradio-output .output {
|
166 |
-
border-top: 3px solid #ddd;
|
167 |
-
padding-top: 10px;
|
168 |
-
}
|
169 |
-
.gradio-input input {
|
170 |
-
color: #333;
|
171 |
-
}
|
172 |
-
.gradio-input textarea {
|
173 |
-
color: #333;
|
174 |
-
}
|
175 |
-
"""
|
176 |
-
|
177 |
-
# Gradio interface components
|
178 |
-
def gradio_app(message, current_location, health_professional_query, history):
|
179 |
-
# Detect sentiment and emotion
|
180 |
-
sentiment = analyze_sentiment(message)
|
181 |
-
emotion = detect_emotion(message)
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
route_info, map_html = get_health_professionals_and_map(current_location, health_professional_query)
|
188 |
-
|
189 |
-
# Add emoticon for emotion
|
190 |
-
emotion_emoticons = {
|
191 |
-
'joy': 'π',
|
192 |
-
'anger': 'π‘',
|
193 |
-
'fear': 'π¨',
|
194 |
-
'sadness': 'π’',
|
195 |
-
'surprise': 'π²',
|
196 |
-
'disgust': 'π€’'
|
197 |
-
}
|
198 |
-
emotion_icon = emotion_emoticons.get(emotion, 'π')
|
199 |
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
gr.Textbox(label="Sentiment Analysis"),
|
213 |
-
gr.Textbox(label="Detected Emotion"),
|
214 |
-
gr.Dataframe(label="Suggestions"),
|
215 |
-
gr.Textbox(label="Nearby Health Professionals"),
|
216 |
-
gr.HTML(label="Map of Health Professionals"),
|
217 |
-
gr.State(value=[])
|
218 |
-
],
|
219 |
-
live=True,
|
220 |
-
allow_flagging="never",
|
221 |
-
theme="huggingface",
|
222 |
-
css=css # Apply custom CSS styling
|
223 |
-
)
|
224 |
-
|
225 |
-
# Launch Gradio interface
|
226 |
-
iface.launch(share=True)
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
|
|
3 |
import nltk
|
4 |
import numpy as np
|
5 |
import tflearn
|
|
|
11 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
12 |
import googlemaps
|
13 |
import folium
|
14 |
+
import pandas as pd
|
15 |
+
import torch
|
|
|
|
|
16 |
|
17 |
+
# Disable GPU usage for TensorFlow for compatibility
|
18 |
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
19 |
|
20 |
+
# Download necessary NLTK resources
|
21 |
nltk.download('punkt')
|
22 |
|
23 |
+
# Initialize Lancaster Stemmer
|
24 |
stemmer = LancasterStemmer()
|
25 |
|
26 |
+
# Load intents.json for Chatbot
|
27 |
with open("intents.json") as file:
|
28 |
+
intents_data = json.load(file)
|
29 |
|
30 |
+
# Load tokenized data for Chatbot
|
31 |
with open("data.pickle", "rb") as f:
|
32 |
words, labels, training, output = pickle.load(f)
|
33 |
|
34 |
+
# Build Chatbot Model
|
35 |
+
def build_chatbot_model():
|
36 |
+
net = tflearn.input_data(shape=[None, len(training[0])])
|
37 |
+
net = tflearn.fully_connected(net, 8)
|
38 |
+
net = tflearn.fully_connected(net, 8)
|
39 |
+
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
|
40 |
+
net = tflearn.regression(net)
|
41 |
+
model = tflearn.DNN(net)
|
42 |
+
model.load("MentalHealthChatBotmodel.tflearn")
|
43 |
+
return model
|
44 |
|
45 |
+
chatbot_model = build_chatbot_model()
|
|
|
|
|
46 |
|
47 |
+
# Bag of Words Function for Chatbot
|
48 |
def bag_of_words(s, words):
|
49 |
bag = [0 for _ in range(len(words))]
|
50 |
s_words = word_tokenize(s)
|
51 |
+
s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
|
52 |
for se in s_words:
|
53 |
for i, w in enumerate(words):
|
54 |
if w == se:
|
55 |
bag[i] = 1
|
56 |
return np.array(bag)
|
57 |
|
58 |
+
# Chatbot Response Function
|
59 |
+
def chatbot_response(message, history):
|
60 |
+
"""Respond to user input and update chat history."""
|
61 |
history = history or []
|
|
|
62 |
try:
|
63 |
+
result = chatbot_model.predict([bag_of_words(message, words)])
|
64 |
+
result_index = np.argmax(result)
|
65 |
+
tag = labels[result_index]
|
66 |
+
|
67 |
+
response = "I didn't understand that. π€ Try rephrasing your question."
|
68 |
+
for intent in intents_data["intents"]:
|
69 |
+
if intent["tag"] == tag:
|
70 |
+
response = f"π€ {random.choice(intent['responses'])}"
|
|
|
71 |
break
|
|
|
|
|
72 |
except Exception as e:
|
73 |
+
response = f"Error generating response: {str(e)} π₯"
|
74 |
+
|
75 |
+
history.append({"role": "user", "content": f"π¬ {message}"})
|
|
|
76 |
history.append({"role": "assistant", "content": response})
|
77 |
+
return history, response
|
78 |
|
79 |
+
# Emotion Detection with Transformers
|
80 |
+
emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
81 |
+
emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
82 |
|
83 |
+
def detect_emotion(user_input):
|
84 |
+
"""Detect emotion using a pre-trained model and return label with an emoji."""
|
85 |
+
pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
|
86 |
+
try:
|
87 |
+
result = pipe(user_input)
|
88 |
+
emotion = result[0]["label"]
|
89 |
+
emotion_map = {
|
90 |
+
"joy": "π Joy",
|
91 |
+
"anger": "π Anger",
|
92 |
+
"sadness": "π’ Sadness",
|
93 |
+
"fear": "π¨ Fear",
|
94 |
+
"surprise": "π² Surprise",
|
95 |
+
"neutral": "π Neutral",
|
96 |
+
}
|
97 |
+
return emotion_map.get(emotion, "Unknown Emotion π€")
|
98 |
+
except Exception as e:
|
99 |
+
return f"Error detecting emotion: {str(e)} π₯"
|
100 |
|
101 |
+
# Sentiment Analysis
|
102 |
+
sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
103 |
+
sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
104 |
|
105 |
+
def analyze_sentiment(user_input):
|
106 |
+
"""Analyze sentiment of user input."""
|
107 |
+
inputs = sentiment_tokenizer(user_input, return_tensors="pt")
|
108 |
+
try:
|
109 |
+
with torch.no_grad():
|
110 |
+
outputs = sentiment_model(**inputs)
|
111 |
+
sentiment_class = torch.argmax(outputs.logits, dim=1).item()
|
112 |
+
sentiment_map = ["Negative π", "Neutral π", "Positive π"]
|
113 |
+
return f"Sentiment: {sentiment_map[sentiment_class]}"
|
114 |
+
except Exception as e:
|
115 |
+
return f"Error in sentiment analysis: {str(e)} π₯"
|
116 |
|
117 |
+
# Generate Suggestions Based on Emotion
|
118 |
def generate_suggestions(emotion):
|
119 |
suggestions = {
|
120 |
+
"π Joy": [
|
121 |
+
{"Title": "Meditation Techniques", "Link": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"},
|
122 |
+
{"Title": "Learn Something New", "Link": "https://www.edx.org/"},
|
123 |
+
],
|
124 |
+
"π’ Sadness": [
|
125 |
+
{"Title": "Emotional Wellness Toolkit", "Link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"},
|
126 |
+
{"Title": "Relaxation Videos", "Link": "https://youtu.be/-e-4Kx5px_I"},
|
127 |
+
],
|
128 |
+
"π Anger": [
|
129 |
+
{"Title": "Dealing with Anger", "Link": "https://www.helpguide.org/articles/anger/anger-management.htm"},
|
130 |
+
{"Title": "Stress Reducing Tips", "Link": "https://www.webmd.com/stress-management"},
|
131 |
+
],
|
132 |
}
|
133 |
+
return suggestions.get(emotion, [{"Title": "General Tips", "Link": "https://www.psychologytoday.com/"}])
|
134 |
+
|
135 |
+
# Gradio Interface Main Function
|
136 |
+
def well_being_app(user_input, location, query, history):
|
137 |
+
"""Main app combining chatbot, emotion detection, sentiment, suggestions, and map."""
|
138 |
+
# Chatbot Interaction
|
139 |
+
history, chatbot_reply = chatbot_response(user_input, history)
|
140 |
+
|
141 |
+
# Emotion Detection
|
142 |
+
emotion = detect_emotion(user_input)
|
143 |
+
|
144 |
+
# Sentiment Analysis
|
145 |
+
sentiment = analyze_sentiment(user_input)
|
146 |
+
|
147 |
+
# Suggestions Based on Emotion
|
148 |
+
emotion_label = emotion.split(": ")[-1]
|
149 |
+
suggestions = generate_suggestions(emotion_label)
|
150 |
+
suggestions_df = pd.DataFrame(suggestions)
|
151 |
+
|
152 |
+
# Return Outputs
|
153 |
+
return (
|
154 |
+
history,
|
155 |
+
sentiment,
|
156 |
+
emotion,
|
157 |
+
suggestions_df
|
158 |
+
)
|
159 |
+
|
160 |
+
# Gradio Interface UI
|
161 |
+
with gr.Blocks() as app:
|
162 |
+
with gr.Row():
|
163 |
+
gr.Markdown("# πΌ Well-Being Support Application")
|
164 |
+
|
165 |
+
with gr.Row():
|
166 |
+
user_input = gr.Textbox(lines=2, placeholder="Type your message here...", label="Your Message")
|
167 |
+
location = gr.Textbox(value="Honolulu, HI", label="Your Location")
|
168 |
+
query = gr.Textbox(value="Counselor", label="Health Professional (Doctor, Therapist, etc.)")
|
169 |
|
170 |
+
with gr.Row():
|
171 |
+
submit_button = gr.Button(value="Submit", label="Submit")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
|
173 |
+
with gr.Row():
|
174 |
+
chatbot = gr.Chatbot(label="Chat History")
|
175 |
+
sentiment_output = gr.Textbox(label="Sentiment Analysis")
|
176 |
+
emotion_output = gr.Textbox(label="Emotion Detected")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
|
178 |
+
with gr.Row():
|
179 |
+
suggestions_output = gr.DataFrame(label="Suggestions Based on Mood")
|
180 |
+
|
181 |
+
# Connect inputs and outputs
|
182 |
+
submit_button.click(
|
183 |
+
well_being_app,
|
184 |
+
inputs=[user_input, location, query, chatbot],
|
185 |
+
outputs=[chatbot, sentiment_output, emotion_output, suggestions_output],
|
186 |
+
)
|
187 |
+
|
188 |
+
# Launch the app
|
189 |
+
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|