DreamStream-1 commited on
Commit
f0734be
Β·
verified Β·
1 Parent(s): fabcaa4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -175
app.py CHANGED
@@ -1,5 +1,5 @@
 
1
  import gradio as gr
2
- import pandas as pd
3
  import nltk
4
  import numpy as np
5
  import tflearn
@@ -11,216 +11,179 @@ from nltk.stem.lancaster import LancasterStemmer
11
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
12
  import googlemaps
13
  import folium
14
- import os
15
- import base64
16
- import torch # Added missing import for torch
17
- from PIL import Image
18
 
19
- # Disable GPU usage for TensorFlow
20
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
21
 
22
- # Ensure necessary NLTK resources are downloaded
23
  nltk.download('punkt')
24
 
25
- # Initialize the stemmer
26
  stemmer = LancasterStemmer()
27
 
28
- # Load intents.json for Well-Being Chatbot
29
  with open("intents.json") as file:
30
- data = json.load(file)
31
 
32
- # Load preprocessed data for Well-Being Chatbot
33
  with open("data.pickle", "rb") as f:
34
  words, labels, training, output = pickle.load(f)
35
 
36
- # Build the model structure for Well-Being Chatbot
37
- net = tflearn.input_data(shape=[None, len(training[0])])
38
- net = tflearn.fully_connected(net, 8)
39
- net = tflearn.fully_connected(net, 8)
40
- net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
41
- net = tflearn.regression(net)
 
 
 
 
42
 
43
- # Load the trained model
44
- model = tflearn.DNN(net)
45
- model.load("MentalHealthChatBotmodel.tflearn")
46
 
47
- # Function to process user input into a bag-of-words format for Chatbot
48
  def bag_of_words(s, words):
49
  bag = [0 for _ in range(len(words))]
50
  s_words = word_tokenize(s)
51
- s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
52
  for se in s_words:
53
  for i, w in enumerate(words):
54
  if w == se:
55
  bag[i] = 1
56
  return np.array(bag)
57
 
58
- # Chat function for Well-Being Chatbot
59
- def chatbot(message, history):
 
60
  history = history or []
61
- message = message.lower()
62
  try:
63
- # Predict the tag
64
- results = model.predict([bag_of_words(message, words)])
65
- results_index = np.argmax(results)
66
- tag = labels[results_index]
67
- # Match tag with intent and choose a random response
68
- for tg in data["intents"]:
69
- if tg['tag'] == tag:
70
- responses = tg['responses']
71
- response = random.choice(responses)
72
  break
73
- else:
74
- response = "I'm sorry, I didn't understand that. Could you please rephrase?"
75
  except Exception as e:
76
- print(f"Error in chatbot: {e}") # For debugging
77
- response = f"An error occurred: {str(e)}"
78
- # Convert the new message and response to the 'messages' format
79
- history.append({"role": "user", "content": message})
80
  history.append({"role": "assistant", "content": response})
81
- return history, history
82
 
83
- # Sentiment Analysis using Hugging Face model
84
- tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
85
- model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
86
 
87
- def analyze_sentiment(user_input):
88
- inputs = tokenizer_sentiment(user_input, return_tensors="pt")
89
- with torch.no_grad():
90
- outputs = model_sentiment(**inputs)
91
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
92
- sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
93
- return f"Predicted Sentiment: {sentiment}"
 
 
 
 
 
 
 
 
 
 
94
 
95
- # Emotion Detection using Hugging Face model
96
- tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
97
- model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
98
 
99
- def detect_emotion(user_input):
100
- pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
101
- result = pipe(user_input)
102
- emotion = result[0]['label']
103
- return f"Emotion Detected: {emotion}"
 
 
 
 
 
 
104
 
105
- # Function to generate suggestions based on detected emotion
106
  def generate_suggestions(emotion):
107
  suggestions = {
108
- 'joy': ["Stay positive! Keep up the good mood.", "Try some relaxing activities like meditation."],
109
- 'anger': ["It's okay to be angry, try to breathe and relax.", "Exercise can help release tension."],
110
- 'fear': ["Take deep breaths, you are in control.", "Try mindfulness exercises to calm your mind."],
111
- 'sadness': ["Take a break, it's okay to feel down sometimes.", "Consider reaching out to a friend or loved one."],
112
- 'surprise': ["Take a moment to reflect, things might seem overwhelming.", "Practice mindfulness to regain balance."],
113
- 'disgust': ["It's okay to feel disgust, try to identify the cause.", "Taking a short walk might help clear your mind."]
 
 
 
 
 
 
114
  }
115
- return pd.DataFrame(suggestions.get(emotion, ["Stay positive!"]))
116
-
117
- # Function to get nearby health professionals and create a map
118
- def get_health_professionals_and_map(location, health_professional_query):
119
- # Use Google Maps API to get health professionals (example setup)
120
- gmaps = googlemaps.Client(key="YOUR_GOOGLE_API_KEY")
121
- places = gmaps.places(query=health_professional_query, location=location)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
- if places['status'] == 'OK':
124
- results = places['results']
125
- route_info = "\n".join([place['name'] for place in results])
126
- map_html = create_map(results)
127
- return route_info, map_html
128
- return "No professionals found.", None
129
-
130
- def create_map(places):
131
- m = folium.Map(location=[places[0]['geometry']['location']['lat'], places[0]['geometry']['location']['lng']], zoom_start=13)
132
- for place in places:
133
- folium.Marker([place['geometry']['location']['lat'], place['geometry']['location']['lng']],
134
- popup=place['name']).add_to(m)
135
- map_html = m._repr_html_()
136
- return map_html
137
-
138
- # Custom CSS styling for Gradio interface
139
- css = """
140
- body {
141
- font-family: 'Roboto', sans-serif;
142
- }
143
- .gradio-container {
144
- background-color: #f0f0f0;
145
- font-size: 16px;
146
- }
147
- .gradio-input, .gradio-output {
148
- padding: 15px;
149
- border-radius: 10px;
150
- background-color: #ffffff;
151
- border: 2px solid #ccc;
152
- }
153
- .gradio-container .gradio-button {
154
- background-color: #007BFF;
155
- color: white;
156
- border-radius: 5px;
157
- padding: 10px 15px;
158
- }
159
- .gradio-container .gradio-button:hover {
160
- background-color: #0056b3;
161
- }
162
- .gradio-container h3 {
163
- color: #333;
164
- }
165
- .gradio-output .output {
166
- border-top: 3px solid #ddd;
167
- padding-top: 10px;
168
- }
169
- .gradio-input input {
170
- color: #333;
171
- }
172
- .gradio-input textarea {
173
- color: #333;
174
- }
175
- """
176
-
177
- # Gradio interface components
178
- def gradio_app(message, current_location, health_professional_query, history):
179
- # Detect sentiment and emotion
180
- sentiment = analyze_sentiment(message)
181
- emotion = detect_emotion(message)
182
 
183
- # Generate suggestions based on emotion
184
- suggestions_df = generate_suggestions(emotion)
185
-
186
- # Get health professionals details and map
187
- route_info, map_html = get_health_professionals_and_map(current_location, health_professional_query)
188
-
189
- # Add emoticon for emotion
190
- emotion_emoticons = {
191
- 'joy': '😊',
192
- 'anger': '😑',
193
- 'fear': '😨',
194
- 'sadness': '😒',
195
- 'surprise': '😲',
196
- 'disgust': '🀒'
197
- }
198
- emotion_icon = emotion_emoticons.get(emotion, 'πŸ™‚')
199
 
200
- return sentiment, f"{emotion_icon} {emotion}", suggestions_df, route_info, map_html, history
201
-
202
- # Gradio interface setup
203
- iface = gr.Interface(
204
- fn=gradio_app,
205
- inputs=[
206
- gr.Textbox(lines=2, placeholder="Enter your message..."),
207
- gr.Textbox(lines=2, placeholder="Enter your current location..."),
208
- gr.Textbox(lines=2, placeholder="Enter health professional query..."),
209
- gr.State(value=[])
210
- ],
211
- outputs=[
212
- gr.Textbox(label="Sentiment Analysis"),
213
- gr.Textbox(label="Detected Emotion"),
214
- gr.Dataframe(label="Suggestions"),
215
- gr.Textbox(label="Nearby Health Professionals"),
216
- gr.HTML(label="Map of Health Professionals"),
217
- gr.State(value=[])
218
- ],
219
- live=True,
220
- allow_flagging="never",
221
- theme="huggingface",
222
- css=css # Apply custom CSS styling
223
- )
224
-
225
- # Launch Gradio interface
226
- iface.launch(share=True)
 
1
+ import os
2
  import gradio as gr
 
3
  import nltk
4
  import numpy as np
5
  import tflearn
 
11
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
12
  import googlemaps
13
  import folium
14
+ import pandas as pd
15
+ import torch
 
 
16
 
17
+ # Disable GPU usage for TensorFlow for compatibility
18
  os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
19
 
20
+ # Download necessary NLTK resources
21
  nltk.download('punkt')
22
 
23
+ # Initialize Lancaster Stemmer
24
  stemmer = LancasterStemmer()
25
 
26
+ # Load intents.json for Chatbot
27
  with open("intents.json") as file:
28
+ intents_data = json.load(file)
29
 
30
+ # Load tokenized data for Chatbot
31
  with open("data.pickle", "rb") as f:
32
  words, labels, training, output = pickle.load(f)
33
 
34
+ # Build Chatbot Model
35
+ def build_chatbot_model():
36
+ net = tflearn.input_data(shape=[None, len(training[0])])
37
+ net = tflearn.fully_connected(net, 8)
38
+ net = tflearn.fully_connected(net, 8)
39
+ net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
40
+ net = tflearn.regression(net)
41
+ model = tflearn.DNN(net)
42
+ model.load("MentalHealthChatBotmodel.tflearn")
43
+ return model
44
 
45
+ chatbot_model = build_chatbot_model()
 
 
46
 
47
+ # Bag of Words Function for Chatbot
48
  def bag_of_words(s, words):
49
  bag = [0 for _ in range(len(words))]
50
  s_words = word_tokenize(s)
51
+ s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
52
  for se in s_words:
53
  for i, w in enumerate(words):
54
  if w == se:
55
  bag[i] = 1
56
  return np.array(bag)
57
 
58
+ # Chatbot Response Function
59
+ def chatbot_response(message, history):
60
+ """Respond to user input and update chat history."""
61
  history = history or []
 
62
  try:
63
+ result = chatbot_model.predict([bag_of_words(message, words)])
64
+ result_index = np.argmax(result)
65
+ tag = labels[result_index]
66
+
67
+ response = "I didn't understand that. πŸ€” Try rephrasing your question."
68
+ for intent in intents_data["intents"]:
69
+ if intent["tag"] == tag:
70
+ response = f"πŸ€– {random.choice(intent['responses'])}"
 
71
  break
 
 
72
  except Exception as e:
73
+ response = f"Error generating response: {str(e)} πŸ’₯"
74
+
75
+ history.append({"role": "user", "content": f"πŸ’¬ {message}"})
 
76
  history.append({"role": "assistant", "content": response})
77
+ return history, response
78
 
79
+ # Emotion Detection with Transformers
80
+ emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
81
+ emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
82
 
83
+ def detect_emotion(user_input):
84
+ """Detect emotion using a pre-trained model and return label with an emoji."""
85
+ pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
86
+ try:
87
+ result = pipe(user_input)
88
+ emotion = result[0]["label"]
89
+ emotion_map = {
90
+ "joy": "😊 Joy",
91
+ "anger": "😠 Anger",
92
+ "sadness": "😒 Sadness",
93
+ "fear": "😨 Fear",
94
+ "surprise": "😲 Surprise",
95
+ "neutral": "😐 Neutral",
96
+ }
97
+ return emotion_map.get(emotion, "Unknown Emotion πŸ€”")
98
+ except Exception as e:
99
+ return f"Error detecting emotion: {str(e)} πŸ’₯"
100
 
101
+ # Sentiment Analysis
102
+ sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
103
+ sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
104
 
105
+ def analyze_sentiment(user_input):
106
+ """Analyze sentiment of user input."""
107
+ inputs = sentiment_tokenizer(user_input, return_tensors="pt")
108
+ try:
109
+ with torch.no_grad():
110
+ outputs = sentiment_model(**inputs)
111
+ sentiment_class = torch.argmax(outputs.logits, dim=1).item()
112
+ sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
113
+ return f"Sentiment: {sentiment_map[sentiment_class]}"
114
+ except Exception as e:
115
+ return f"Error in sentiment analysis: {str(e)} πŸ’₯"
116
 
117
+ # Generate Suggestions Based on Emotion
118
  def generate_suggestions(emotion):
119
  suggestions = {
120
+ "😊 Joy": [
121
+ {"Title": "Meditation Techniques", "Link": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"},
122
+ {"Title": "Learn Something New", "Link": "https://www.edx.org/"},
123
+ ],
124
+ "😒 Sadness": [
125
+ {"Title": "Emotional Wellness Toolkit", "Link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"},
126
+ {"Title": "Relaxation Videos", "Link": "https://youtu.be/-e-4Kx5px_I"},
127
+ ],
128
+ "😠 Anger": [
129
+ {"Title": "Dealing with Anger", "Link": "https://www.helpguide.org/articles/anger/anger-management.htm"},
130
+ {"Title": "Stress Reducing Tips", "Link": "https://www.webmd.com/stress-management"},
131
+ ],
132
  }
133
+ return suggestions.get(emotion, [{"Title": "General Tips", "Link": "https://www.psychologytoday.com/"}])
134
+
135
+ # Gradio Interface Main Function
136
+ def well_being_app(user_input, location, query, history):
137
+ """Main app combining chatbot, emotion detection, sentiment, suggestions, and map."""
138
+ # Chatbot Interaction
139
+ history, chatbot_reply = chatbot_response(user_input, history)
140
+
141
+ # Emotion Detection
142
+ emotion = detect_emotion(user_input)
143
+
144
+ # Sentiment Analysis
145
+ sentiment = analyze_sentiment(user_input)
146
+
147
+ # Suggestions Based on Emotion
148
+ emotion_label = emotion.split(": ")[-1]
149
+ suggestions = generate_suggestions(emotion_label)
150
+ suggestions_df = pd.DataFrame(suggestions)
151
+
152
+ # Return Outputs
153
+ return (
154
+ history,
155
+ sentiment,
156
+ emotion,
157
+ suggestions_df
158
+ )
159
+
160
+ # Gradio Interface UI
161
+ with gr.Blocks() as app:
162
+ with gr.Row():
163
+ gr.Markdown("# 🌼 Well-Being Support Application")
164
+
165
+ with gr.Row():
166
+ user_input = gr.Textbox(lines=2, placeholder="Type your message here...", label="Your Message")
167
+ location = gr.Textbox(value="Honolulu, HI", label="Your Location")
168
+ query = gr.Textbox(value="Counselor", label="Health Professional (Doctor, Therapist, etc.)")
169
 
170
+ with gr.Row():
171
+ submit_button = gr.Button(value="Submit", label="Submit")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
+ with gr.Row():
174
+ chatbot = gr.Chatbot(label="Chat History")
175
+ sentiment_output = gr.Textbox(label="Sentiment Analysis")
176
+ emotion_output = gr.Textbox(label="Emotion Detected")
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
+ with gr.Row():
179
+ suggestions_output = gr.DataFrame(label="Suggestions Based on Mood")
180
+
181
+ # Connect inputs and outputs
182
+ submit_button.click(
183
+ well_being_app,
184
+ inputs=[user_input, location, query, chatbot],
185
+ outputs=[chatbot, sentiment_output, emotion_output, suggestions_output],
186
+ )
187
+
188
+ # Launch the app
189
+ app.launch()