DreamStream-1 commited on
Commit
b377ce7
Β·
verified Β·
1 Parent(s): 9508310

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -39
app.py CHANGED
@@ -12,20 +12,20 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipe
12
  import pandas as pd
13
  import torch
14
 
15
- # Disable TensorFlow GPU warnings (safe since we are using CPU)
16
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
17
 
18
  # Download necessary NLTK resources
19
  nltk.download("punkt")
20
 
21
- # Initialize Lancaster Stemmer for text preprocessing
22
  stemmer = LancasterStemmer()
23
 
24
- # Load intents.json for the chatbot
25
  with open("intents.json") as file:
26
  intents_data = json.load(file)
27
 
28
- # Load tokenized training data for chatbot
29
  with open("data.pickle", "rb") as f:
30
  words, labels, training, output = pickle.load(f)
31
 
@@ -42,7 +42,7 @@ def build_chatbot_model():
42
 
43
  chatbot_model = build_chatbot_model()
44
 
45
- # Function: Bag of Words
46
  def bag_of_words(s, words):
47
  bag = [0 for _ in range(len(words))]
48
  s_words = word_tokenize(s)
@@ -55,13 +55,12 @@ def bag_of_words(s, words):
55
 
56
  # Chatbot Response Function
57
  def chatbot_response(message, history):
58
- """Generates a chatbot response."""
59
  history = history or []
60
  try:
61
  result = chatbot_model.predict([bag_of_words(message, words)])
62
  idx = np.argmax(result)
63
  tag = labels[idx]
64
- response = "I didn't understand that. πŸ€”"
65
  for intent in intents_data["intents"]:
66
  if intent["tag"] == tag:
67
  response = random.choice(intent["responses"])
@@ -73,7 +72,7 @@ def chatbot_response(message, history):
73
  history.append({"role": "assistant", "content": response})
74
  return history, response
75
 
76
- # Emotion Detection Function
77
  emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
78
  emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
79
 
@@ -94,12 +93,11 @@ def detect_emotion(user_input):
94
  except Exception as e:
95
  return f"Error detecting emotion: {str(e)} πŸ’₯"
96
 
97
- # Sentiment Analysis Function
98
  sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
99
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
100
 
101
  def analyze_sentiment(user_input):
102
- """Analyze sentiment based on input."""
103
  inputs = sentiment_tokenizer(user_input, return_tensors="pt")
104
  try:
105
  with torch.no_grad():
@@ -128,64 +126,46 @@ def generate_suggestions(emotion):
128
  }
129
  return suggestions_map.get(emotion, [{"Title": "General Wellness Resources 🌈", "Link": "https://www.helpguide.org/wellness"}])
130
 
131
- # Dummy Nearby Professionals Function
132
  def search_nearby_professionals(location, query):
133
- """Simulates the search for nearby professionals."""
134
  if location and query:
135
- return [
136
  {"Name": "Wellness Center", "Address": "123 Wellness Way"},
137
  {"Name": "Mental Health Clinic", "Address": "456 Recovery Road"},
138
  {"Name": "Therapy Hub", "Address": "789 Peace Avenue"},
139
  ]
 
140
  return []
141
 
142
  # Main App Logic
143
  def well_being_app(user_input, location, query, history):
144
- """Handles chatbot interaction, emotion detection, sentiment analysis, and professional search results."""
145
- # Chatbot Response
146
  history, _ = chatbot_response(user_input, history)
147
-
148
- # Emotion Detection
149
  emotion = detect_emotion(user_input)
150
-
151
- # Sentiment Analysis
152
  sentiment = analyze_sentiment(user_input)
153
-
154
- # Emotion-based Suggestions
155
  emotion_name = emotion.split(": ")[-1]
156
  suggestions = generate_suggestions(emotion_name)
157
  suggestions_df = pd.DataFrame(suggestions)
158
-
159
- # Nearby Professionals Lookup
160
  professionals = search_nearby_professionals(location, query)
161
-
162
  return history, sentiment, emotion, suggestions_df, professionals
163
 
164
  # Gradio Interface
165
  with gr.Blocks() as interface:
166
  gr.Markdown("## 🌱 Well-being Companion")
167
- gr.Markdown("> Empowering Your Health! πŸ’š")
168
 
169
  with gr.Row():
170
- user_input = gr.Textbox(label="Your Message", placeholder="How are you feeling today? (e.g. I feel happy)")
171
- location_input = gr.Textbox(label="Location", placeholder="Enter your city (e.g., New York)")
172
- query_input = gr.Textbox(label="Search Query", placeholder="What are you searching for? (e.g., therapists)")
173
- submit_button = gr.Button("Submit", variant="primary")
174
 
175
- # Chatbot Section
176
  chatbot_output = gr.Chatbot(label="Chatbot Interaction", type="messages", value=[])
177
-
178
- # Sentiment and Emotion Outputs
179
  sentiment_output = gr.Textbox(label="Sentiment Analysis")
180
  emotion_output = gr.Textbox(label="Emotion Detected")
181
-
182
- # Suggestions Table
183
  suggestions_output = gr.DataFrame(label="Suggestions", value=[], headers=["Title", "Link"])
 
184
 
185
- # Professionals Table
186
- nearby_professionals_output = gr.DataFrame(label="Nearby Professionals", value=[], headers=["Name", "Address"])
187
-
188
- # Connect Inputs to Outputs
189
  submit_button.click(
190
  well_being_app,
191
  inputs=[user_input, location_input, query_input, chatbot_output],
@@ -198,5 +178,4 @@ with gr.Blocks() as interface:
198
  ],
199
  )
200
 
201
- # Run Gradio Application
202
  interface.launch()
 
12
  import pandas as pd
13
  import torch
14
 
15
+ # Disable TensorFlow GPU warnings
16
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
17
 
18
  # Download necessary NLTK resources
19
  nltk.download("punkt")
20
 
21
+ # Initialize Lancaster Stemmer
22
  stemmer = LancasterStemmer()
23
 
24
+ # Load intents.json for chatbot
25
  with open("intents.json") as file:
26
  intents_data = json.load(file)
27
 
28
+ # Load tokenized training data
29
  with open("data.pickle", "rb") as f:
30
  words, labels, training, output = pickle.load(f)
31
 
 
42
 
43
  chatbot_model = build_chatbot_model()
44
 
45
+ # Bag of Words Function
46
  def bag_of_words(s, words):
47
  bag = [0 for _ in range(len(words))]
48
  s_words = word_tokenize(s)
 
55
 
56
  # Chatbot Response Function
57
  def chatbot_response(message, history):
 
58
  history = history or []
59
  try:
60
  result = chatbot_model.predict([bag_of_words(message, words)])
61
  idx = np.argmax(result)
62
  tag = labels[idx]
63
+ response = "I'm not sure how to respond to that. πŸ€”"
64
  for intent in intents_data["intents"]:
65
  if intent["tag"] == tag:
66
  response = random.choice(intent["responses"])
 
72
  history.append({"role": "assistant", "content": response})
73
  return history, response
74
 
75
+ # Emotion Detection
76
  emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
77
  emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
78
 
 
93
  except Exception as e:
94
  return f"Error detecting emotion: {str(e)} πŸ’₯"
95
 
96
+ # Sentiment Analysis
97
  sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
98
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
99
 
100
  def analyze_sentiment(user_input):
 
101
  inputs = sentiment_tokenizer(user_input, return_tensors="pt")
102
  try:
103
  with torch.no_grad():
 
126
  }
127
  return suggestions_map.get(emotion, [{"Title": "General Wellness Resources 🌈", "Link": "https://www.helpguide.org/wellness"}])
128
 
129
+ # Nearby Professionals Function
130
  def search_nearby_professionals(location, query):
131
+ """Returns a list of professionals as a list of lists for compatibility with DataFrame."""
132
  if location and query:
133
+ results = [
134
  {"Name": "Wellness Center", "Address": "123 Wellness Way"},
135
  {"Name": "Mental Health Clinic", "Address": "456 Recovery Road"},
136
  {"Name": "Therapy Hub", "Address": "789 Peace Avenue"},
137
  ]
138
+ return [[item["Name"], item["Address"]] for item in results]
139
  return []
140
 
141
  # Main App Logic
142
  def well_being_app(user_input, location, query, history):
 
 
143
  history, _ = chatbot_response(user_input, history)
 
 
144
  emotion = detect_emotion(user_input)
 
 
145
  sentiment = analyze_sentiment(user_input)
 
 
146
  emotion_name = emotion.split(": ")[-1]
147
  suggestions = generate_suggestions(emotion_name)
148
  suggestions_df = pd.DataFrame(suggestions)
 
 
149
  professionals = search_nearby_professionals(location, query)
 
150
  return history, sentiment, emotion, suggestions_df, professionals
151
 
152
  # Gradio Interface
153
  with gr.Blocks() as interface:
154
  gr.Markdown("## 🌱 Well-being Companion")
155
+ gr.Markdown("> Empowering Your Mental Health! πŸ’š")
156
 
157
  with gr.Row():
158
+ user_input = gr.Textbox(label="Your Message")
159
+ location_input = gr.Textbox(label="Location")
160
+ query_input = gr.Textbox(label="Search Query")
161
+ submit_button = gr.Button("Submit")
162
 
 
163
  chatbot_output = gr.Chatbot(label="Chatbot Interaction", type="messages", value=[])
 
 
164
  sentiment_output = gr.Textbox(label="Sentiment Analysis")
165
  emotion_output = gr.Textbox(label="Emotion Detected")
 
 
166
  suggestions_output = gr.DataFrame(label="Suggestions", value=[], headers=["Title", "Link"])
167
+ nearby_professionals_output = gr.DataFrame(label="Nearby Professionals", headers=["Name", "Address"])
168
 
 
 
 
 
169
  submit_button.click(
170
  well_being_app,
171
  inputs=[user_input, location_input, query_input, chatbot_output],
 
178
  ],
179
  )
180
 
 
181
  interface.launch()