DreamStream-1 commited on
Commit
274d1f4
Β·
verified Β·
1 Parent(s): b377ce7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -132
app.py CHANGED
@@ -9,173 +9,217 @@ import pickle
9
  from nltk.tokenize import word_tokenize
10
  from nltk.stem.lancaster import LancasterStemmer
11
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
 
12
  import pandas as pd
13
  import torch
14
 
15
- # Disable TensorFlow GPU warnings
16
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
17
 
18
- # Download necessary NLTK resources
19
- nltk.download("punkt")
20
 
21
- # Initialize Lancaster Stemmer
22
  stemmer = LancasterStemmer()
23
 
24
- # Load intents.json for chatbot
25
  with open("intents.json") as file:
26
- intents_data = json.load(file)
27
 
28
- # Load tokenized training data
29
  with open("data.pickle", "rb") as f:
30
  words, labels, training, output = pickle.load(f)
31
 
32
- # Build TFlearn Chatbot Model
33
- def build_chatbot_model():
34
- net = tflearn.input_data(shape=[None, len(training[0])])
35
- net = tflearn.fully_connected(net, 8)
36
- net = tflearn.fully_connected(net, 8)
37
- net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
38
- net = tflearn.regression(net)
39
- model = tflearn.DNN(net)
40
- model.load("MentalHealthChatBotmodel.tflearn")
41
- return model
42
 
43
- chatbot_model = build_chatbot_model()
 
 
44
 
45
- # Bag of Words Function
46
  def bag_of_words(s, words):
47
  bag = [0 for _ in range(len(words))]
48
  s_words = word_tokenize(s)
49
- s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
50
  for se in s_words:
51
  for i, w in enumerate(words):
52
  if w == se:
53
  bag[i] = 1
54
  return np.array(bag)
55
 
56
- # Chatbot Response Function
57
- def chatbot_response(message, history):
58
  history = history or []
 
59
  try:
60
- result = chatbot_model.predict([bag_of_words(message, words)])
61
- idx = np.argmax(result)
62
- tag = labels[idx]
63
- response = "I'm not sure how to respond to that. πŸ€”"
64
- for intent in intents_data["intents"]:
65
- if intent["tag"] == tag:
66
- response = random.choice(intent["responses"])
 
 
 
67
  break
 
 
68
  except Exception as e:
69
- response = f"Error generating response: {str(e)} πŸ’₯"
70
-
 
71
  history.append({"role": "user", "content": message})
72
  history.append({"role": "assistant", "content": response})
73
- return history, response
74
-
75
- # Emotion Detection
76
- emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
77
- emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
78
-
79
- def detect_emotion(user_input):
80
- pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
81
- try:
82
- result = pipe(user_input)
83
- emotion = result[0]["label"]
84
- emotion_map = {
85
- "joy": "😊 Joy",
86
- "anger": "😠 Anger",
87
- "sadness": "😒 Sadness",
88
- "fear": "😨 Fear",
89
- "surprise": "😲 Surprise",
90
- "neutral": "😐 Neutral",
91
- }
92
- return emotion_map.get(emotion, "Unknown Emotion πŸ€”")
93
- except Exception as e:
94
- return f"Error detecting emotion: {str(e)} πŸ’₯"
95
 
96
- # Sentiment Analysis
97
- sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
98
- sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
99
 
100
  def analyze_sentiment(user_input):
101
- inputs = sentiment_tokenizer(user_input, return_tensors="pt")
102
- try:
103
- with torch.no_grad():
104
- outputs = sentiment_model(**inputs)
105
- sentiment = torch.argmax(outputs.logits, dim=1).item()
106
- sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
107
- return sentiment_map[sentiment]
108
- except Exception as e:
109
- return f"Error in sentiment analysis: {str(e)} πŸ’₯"
 
110
 
111
- # Suggestions Based on Emotion
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  def generate_suggestions(emotion):
113
- suggestions_map = {
114
- "😊 Joy": [
115
- {"Title": "Mindful Meditation 🧘", "Link": "https://www.helpguide.org/meditation"},
116
- {"Title": "Learn a New Skill ✨", "Link": "https://www.skillshare.com/"},
117
- ],
118
- "😒 Sadness": [
119
- {"Title": "Talk to a Professional πŸ’¬", "Link": "https://www.betterhelp.com/"},
120
- {"Title": "Mental Health Toolkit πŸ› οΈ", "Link": "https://www.psychologytoday.com/"},
121
  ],
122
- "😠 Anger": [
123
- {"Title": "Anger Management Tips πŸ”₯", "Link": "https://www.mentalhealth.org.uk"},
124
- {"Title": "Stress Relieving Exercises 🌿", "Link": "https://www.calm.com/"},
 
 
125
  ],
 
126
  }
127
- return suggestions_map.get(emotion, [{"Title": "General Wellness Resources 🌈", "Link": "https://www.helpguide.org/wellness"}])
128
-
129
- # Nearby Professionals Function
130
- def search_nearby_professionals(location, query):
131
- """Returns a list of professionals as a list of lists for compatibility with DataFrame."""
132
- if location and query:
133
- results = [
134
- {"Name": "Wellness Center", "Address": "123 Wellness Way"},
135
- {"Name": "Mental Health Clinic", "Address": "456 Recovery Road"},
136
- {"Name": "Therapy Hub", "Address": "789 Peace Avenue"},
137
- ]
138
- return [[item["Name"], item["Address"]] for item in results]
139
- return []
140
-
141
- # Main App Logic
142
- def well_being_app(user_input, location, query, history):
143
- history, _ = chatbot_response(user_input, history)
144
- emotion = detect_emotion(user_input)
145
- sentiment = analyze_sentiment(user_input)
146
- emotion_name = emotion.split(": ")[-1]
147
- suggestions = generate_suggestions(emotion_name)
148
- suggestions_df = pd.DataFrame(suggestions)
149
- professionals = search_nearby_professionals(location, query)
150
- return history, sentiment, emotion, suggestions_df, professionals
151
-
152
- # Gradio Interface
153
- with gr.Blocks() as interface:
154
- gr.Markdown("## 🌱 Well-being Companion")
155
- gr.Markdown("> Empowering Your Mental Health! πŸ’š")
156
-
157
- with gr.Row():
158
- user_input = gr.Textbox(label="Your Message")
159
- location_input = gr.Textbox(label="Location")
160
- query_input = gr.Textbox(label="Search Query")
161
- submit_button = gr.Button("Submit")
162
-
163
- chatbot_output = gr.Chatbot(label="Chatbot Interaction", type="messages", value=[])
164
- sentiment_output = gr.Textbox(label="Sentiment Analysis")
165
- emotion_output = gr.Textbox(label="Emotion Detected")
166
- suggestions_output = gr.DataFrame(label="Suggestions", value=[], headers=["Title", "Link"])
167
- nearby_professionals_output = gr.DataFrame(label="Nearby Professionals", headers=["Name", "Address"])
168
-
169
- submit_button.click(
170
- well_being_app,
171
- inputs=[user_input, location_input, query_input, chatbot_output],
172
- outputs=[
173
- chatbot_output,
174
- sentiment_output,
175
- emotion_output,
176
- suggestions_output,
177
- nearby_professionals_output,
178
- ],
179
- )
180
 
181
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  from nltk.tokenize import word_tokenize
10
  from nltk.stem.lancaster import LancasterStemmer
11
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
12
+ import googlemaps
13
+ import folium
14
  import pandas as pd
15
  import torch
16
 
17
+ # Disable GPU usage for TensorFlow
18
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
19
 
20
+ # Ensure necessary NLTK resources are downloaded
21
+ nltk.download('punkt')
22
 
23
+ # Initialize the stemmer
24
  stemmer = LancasterStemmer()
25
 
26
+ # Load intents.json for Well-Being Chatbot
27
  with open("intents.json") as file:
28
+ data = json.load(file)
29
 
30
+ # Load preprocessed data for Well-Being Chatbot
31
  with open("data.pickle", "rb") as f:
32
  words, labels, training, output = pickle.load(f)
33
 
34
+ # Build the model structure for Well-Being Chatbot
35
+ net = tflearn.input_data(shape=[None, len(training[0])])
36
+ net = tflearn.fully_connected(net, 8)
37
+ net = tflearn.fully_connected(net, 8)
38
+ net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
39
+ net = tflearn.regression(net)
 
 
 
 
40
 
41
+ # Load the trained model
42
+ model = tflearn.DNN(net)
43
+ model.load("MentalHealthChatBotmodel.tflearn")
44
 
45
+ # Function to process user input into a bag-of-words format for Chatbot
46
  def bag_of_words(s, words):
47
  bag = [0 for _ in range(len(words))]
48
  s_words = word_tokenize(s)
49
+ s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
50
  for se in s_words:
51
  for i, w in enumerate(words):
52
  if w == se:
53
  bag[i] = 1
54
  return np.array(bag)
55
 
56
+ # Chat function for Well-Being Chatbot
57
+ def chatbot(message, history):
58
  history = history or []
59
+ message = message.lower()
60
  try:
61
+ # Predict the tag
62
+ results = model.predict([bag_of_words(message, words)])
63
+ results_index = np.argmax(results)
64
+ tag = labels[results_index]
65
+
66
+ # Match tag with intent and choose a random response
67
+ for tg in data["intents"]:
68
+ if tg['tag'] == tag:
69
+ responses = tg['responses']
70
+ response = random.choice(responses)
71
  break
72
+ else:
73
+ response = "I'm sorry, I didn't understand that. Could you please rephrase?"
74
  except Exception as e:
75
+ response = f"An error occurred: {str(e)}"
76
+
77
+ # Convert the new message and response to the 'messages' format
78
  history.append({"role": "user", "content": message})
79
  history.append({"role": "assistant", "content": response})
80
+
81
+ return history, history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ # Sentiment Analysis using Hugging Face model
84
+ tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
85
+ model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
86
 
87
  def analyze_sentiment(user_input):
88
+ inputs = tokenizer_sentiment(user_input, return_tensors="pt")
89
+ with torch.no_grad():
90
+ outputs = model_sentiment(**inputs)
91
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
92
+ sentiment = ["Negative", "Neutral", "Positive"][predicted_class] # Assuming 3 classes
93
+ return f"Predicted Sentiment: {sentiment}"
94
+
95
+ # Emotion Detection using Hugging Face model
96
+ tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
97
+ model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
98
 
99
+ def detect_emotion(user_input):
100
+ pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
101
+ result = pipe(user_input)
102
+ emotion = result[0]['label']
103
+ return f"Emotion Detected: {emotion}"
104
+
105
+ # Initialize Google Maps API client securely
106
+ gmaps = googlemaps.Client(key=os.getenv('GOOGLE_API_KEY'))
107
+
108
+ # Function to search for health professionals
109
+ def search_health_professionals(query, location, radius=10000):
110
+ places_result = gmaps.places_nearby(location, radius=radius, type='doctor', keyword=query)
111
+ return places_result.get('results', [])
112
+
113
+ # Function to get directions and display on Gradio UI
114
+ def get_health_professionals_and_map(current_location, health_professional_query):
115
+ location = gmaps.geocode(current_location)
116
+ if location:
117
+ lat = location[0]["geometry"]["location"]["lat"]
118
+ lng = location[0]["geometry"]["location"]["lng"]
119
+ location = (lat, lng)
120
+
121
+ professionals = search_health_professionals(health_professional_query, location)
122
+
123
+ # Generate map
124
+ map_center = location
125
+ m = folium.Map(location=map_center, zoom_start=13)
126
+
127
+ # Add markers to the map
128
+ for place in professionals:
129
+ folium.Marker(
130
+ location=[place['geometry']['location']['lat'], place['geometry']['location']['lng']],
131
+ popup=place['name']
132
+ ).add_to(m)
133
+
134
+ # Convert map to HTML for Gradio display
135
+ map_html = m._repr_html_()
136
+
137
+ # Route information
138
+ route_info = "\n".join([f"{place['name']} - {place['vicinity']}" for place in professionals])
139
+
140
+ return route_info, map_html
141
+ else:
142
+ return "Unable to find location.", ""
143
+
144
+ # Function to generate suggestions based on the detected emotion
145
  def generate_suggestions(emotion):
146
+ suggestions = {
147
+ 'joy': [
148
+ {"Title": "Relaxation Techniques 🌿", "Subject": "Relaxation", "Link": '<a href="https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation" target="_blank">Mindful Breathing Meditation</a>'},
149
+ {"Title": "Dealing with Stress πŸ’†", "Subject": "Stress Management", "Link": '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Tips for Dealing with Anxiety</a>'},
150
+ {"Title": "Emotional Wellness Toolkit πŸ’ͺ", "Subject": "Wellness", "Link": '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Emotional Wellness Toolkit</a>'},
151
+ {"Title": "Relaxation Video πŸŽ₯", "Subject": "Video", "Link": '<a href="https://youtu.be/m1vaUGtyo-A" target="_blank">Watch Video</a>'}
 
 
152
  ],
153
+ 'anger': [
154
+ {"Title": "Emotional Wellness Toolkit πŸ’‘", "Subject": "Wellness", "Link": '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Emotional Wellness Toolkit</a>'},
155
+ {"Title": "Stress Management Tips 🧘", "Subject": "Stress Management", "Link": '<a href="https://www.health.harvard.edu/health-a-to-z" target="_blank">Harvard Health: Stress Management</a>'},
156
+ {"Title": "Dealing with Anger πŸ’₯", "Subject": "Anger Management", "Link": '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Tips for Dealing with Anger</a>'},
157
+ {"Title": "Relaxation Video 🎬", "Subject": "Video", "Link": '<a href="https://youtu.be/MIc299Flibs" target="_blank">Watch Video</a>'}
158
  ],
159
+ # Add more suggestions for other emotions as required...
160
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
+ return suggestions.get(emotion, [])
163
+
164
+ # Gradio interface
165
+ def gradio_app(message, location, health_query, submit_button, history, state):
166
+ if submit_button:
167
+ # Chatbot interaction
168
+ history, _ = chatbot(message, history)
169
+
170
+ # Sentiment analysis
171
+ sentiment_response = analyze_sentiment(message)
172
+
173
+ # Emotion detection
174
+ emotion_response = detect_emotion(message)
175
+
176
+ # Health professional search and map display
177
+ route_info, map_html = get_health_professionals_and_map(location, health_query)
178
+
179
+ # Generate suggestions based on the detected emotion
180
+ suggestions = generate_suggestions(emotion_response.split(': ')[1])
181
+
182
+ # Create a DataFrame for displaying suggestions
183
+ suggestions_df = pd.DataFrame(suggestions)
184
+
185
+ return history, sentiment_response, emotion_response, route_info, map_html, gr.DataFrame(suggestions_df, headers=["Title", "Subject", "Link"]), state
186
+ else:
187
+ return history, "", "", "", "", gr.DataFrame([], headers=["Title", "Subject", "Link"]), state
188
+
189
+ # Gradio UI components
190
+ message_input = gr.Textbox(lines=1, label="πŸ’¬ Message")
191
+ location_input = gr.Textbox(value="Honolulu, HI", label="πŸ“ Current Location")
192
+ health_query_input = gr.Textbox(value="doctor", label="🩺 Health Professional Query (e.g., doctor, psychiatrist, psychologist)")
193
+ submit_button = gr.Button("πŸš€ Submit")
194
+
195
+ # Updated chat history component with 'messages' type
196
+ chat_history = gr.Chatbot(label="Well-Being Chat History", type='messages')
197
+
198
+ # Outputs
199
+ sentiment_output = gr.Textbox(label="πŸ’¬ Sentiment Analysis Result")
200
+ emotion_output = gr.Textbox(label="😊 Emotion Detection Result")
201
+ route_info_output = gr.Textbox(label="🩺 Health Professionals Information")
202
+ map_output = gr.HTML(label="πŸ—ΊοΈ Map with Health Professionals")
203
+ suggestions_output = gr.DataFrame(label="πŸ“ Well-Being Suggestions", headers=["Title", "Subject", "Link"])
204
+
205
+ # Create Gradio interface with custom CSS for gradient background
206
+ css = """
207
+ body {
208
+ background: linear-gradient(to right, #6ab04c, #34e89e);
209
+ font-family: Arial, sans-serif;
210
+ }
211
+ """
212
+
213
+ # Create Gradio interface
214
+ iface = gr.Interface(
215
+ fn=gradio_app,
216
+ inputs=[message_input, location_input, health_query_input, submit_button, gr.State()],
217
+ outputs=[chat_history, sentiment_output, emotion_output, route_info_output, map_output, suggestions_output, gr.State()],
218
+ allow_flagging="never",
219
+ live=False,
220
+ title="Well-Being App: Support, Sentiment, Emotion Detection & Health Professional Search",
221
+ css=css
222
+ )
223
+
224
+ # Launch the Gradio interface
225
+ iface.launch()