Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,7 @@ import tflearn
|
|
18 |
import tensorflow as tf
|
19 |
import json
|
20 |
import pickle
|
|
|
21 |
|
22 |
# Ensure necessary NLTK resources are downloaded
|
23 |
nltk.download('punkt')
|
@@ -42,195 +43,121 @@ try:
|
|
42 |
except FileNotFoundError:
|
43 |
raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
|
44 |
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
89 |
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
|
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
|
95 |
-
#
|
96 |
-
|
|
|
|
|
97 |
|
98 |
-
|
99 |
-
|
|
|
100 |
|
101 |
-
#
|
102 |
-
|
|
|
103 |
|
104 |
-
|
105 |
-
def get_places_data(query, location, radius, api_key, next_page_token=None):
|
106 |
params = {
|
107 |
"query": query,
|
108 |
"location": location,
|
109 |
"radius": radius,
|
110 |
"key": api_key
|
111 |
}
|
|
|
|
|
112 |
|
113 |
-
if next_page_token:
|
114 |
-
params["pagetoken"] = next_page_token
|
115 |
-
|
116 |
-
response = requests.get(url, params=params)
|
117 |
-
|
118 |
-
if response.status_code == 200:
|
119 |
-
return response.json()
|
120 |
-
else:
|
121 |
-
return None
|
122 |
-
|
123 |
-
# Function to fetch detailed information for a specific place using its place_id
|
124 |
def get_place_details(place_id, api_key):
|
125 |
-
details_url =
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
data = get_places_data(query, location, radius, api_key, next_page_token)
|
149 |
-
if data:
|
150 |
-
results = data.get('results', [])
|
151 |
-
if not results:
|
152 |
-
break
|
153 |
-
|
154 |
-
for place in results:
|
155 |
-
place_id = place.get("place_id")
|
156 |
-
name = place.get("name")
|
157 |
-
address = place.get("formatted_address")
|
158 |
-
rating = place.get("rating", "Not available")
|
159 |
-
business_status = place.get("business_status", "Not available")
|
160 |
-
user_reviews_total = place.get("user_reviews_total", "Not available")
|
161 |
-
website = place.get("website", "Not available")
|
162 |
-
types = ", ".join(place.get("types", []))
|
163 |
-
location = place.get("geometry", {}).get("location", {})
|
164 |
-
latitude = location.get("lat", "Not available")
|
165 |
-
longitude = location.get("lng", "Not available")
|
166 |
-
|
167 |
-
details = get_place_details(place_id, api_key)
|
168 |
-
phone_number = details.get("phone_number", "Not available")
|
169 |
-
if phone_number == "Not available" and website != "Not available":
|
170 |
-
phone_number, email = scrape_div_for_contact_info(website)
|
171 |
-
else:
|
172 |
-
email = "Not available"
|
173 |
-
|
174 |
-
if website == "Not available":
|
175 |
-
website = scrape_div_from_google_maps(name)
|
176 |
-
|
177 |
-
all_results.append([name, address, phone_number, rating, business_status,
|
178 |
-
user_reviews_total, website, types, latitude, longitude,
|
179 |
-
details.get("opening_hours", "Not available"),
|
180 |
-
details.get("reviews", "Not available"), email])
|
181 |
-
|
182 |
-
next_page_token = data.get('next_page_token')
|
183 |
-
if not next_page_token:
|
184 |
-
break
|
185 |
-
|
186 |
-
time.sleep(2)
|
187 |
-
else:
|
188 |
-
break
|
189 |
-
|
190 |
-
return all_results
|
191 |
|
192 |
-
#
|
193 |
def save_to_csv(data, filename):
|
194 |
-
|
195 |
-
writer = csv.writer(file)
|
196 |
-
writer.writerow([
|
197 |
-
"Name", "Address", "Phone", "Rating", "Business Status",
|
198 |
-
"User Reviews Total", "Website", "Types", "Latitude", "Longitude",
|
199 |
-
"Opening Hours", "Reviews", "Email"
|
200 |
-
])
|
201 |
-
writer.writerows(data)
|
202 |
-
print(f"Data saved to {filename}")
|
203 |
-
|
204 |
-
# Geocoding function to convert location text to coordinates
|
205 |
-
def geocode_location(address):
|
206 |
-
params = {
|
207 |
-
"address": address,
|
208 |
-
"key": api_key
|
209 |
-
}
|
210 |
-
response = requests.get(geocoding_url, params=params)
|
211 |
|
212 |
-
|
213 |
-
data = response.json()
|
214 |
-
if data['status'] == 'OK':
|
215 |
-
location = data['results'][0]['geometry']['location']
|
216 |
-
return location['lat'], location['lng']
|
217 |
-
else:
|
218 |
-
raise ValueError("Geocoding failed.")
|
219 |
-
else:
|
220 |
-
raise ValueError("Failed to retrieve geocoding data.")
|
221 |
-
|
222 |
-
# Main function to execute script
|
223 |
-
def main():
|
224 |
-
google_places_data = get_all_places(query, location, radius, api_key)
|
225 |
-
if google_places_data:
|
226 |
-
save_to_csv(google_places_data, "wellness_professionals_hawaii.csv")
|
227 |
-
else:
|
228 |
-
print("No data found.")
|
229 |
-
|
230 |
-
# Gradio UI setup
|
231 |
with gr.Blocks() as demo:
|
232 |
-
#
|
233 |
-
gr.Markdown("# Emotion Detection and Well-Being Suggestions")
|
234 |
|
235 |
# User input for text (emotion detection)
|
236 |
user_input_emotion = gr.Textbox(lines=1, label="How are you feeling today?")
|
@@ -238,32 +165,27 @@ with gr.Blocks() as demo:
|
|
238 |
|
239 |
# Model prediction for emotion detection
|
240 |
def predict_emotion(text):
|
241 |
-
|
242 |
-
with torch.no_grad():
|
243 |
-
outputs = pytorch_model(inputs['input_ids'])
|
244 |
-
_, predicted_class = torch.max(outputs, dim=1)
|
245 |
-
emotion = labels[predicted_class.item()]
|
246 |
-
return emotion
|
247 |
|
248 |
# Show suggestions based on the detected emotion
|
249 |
def show_suggestions(emotion):
|
250 |
if emotion == 'joy':
|
251 |
-
return "You're feeling happy! Keep up the great mood
|
252 |
elif emotion == 'anger':
|
253 |
-
return "You're feeling angry. It's okay to feel this way. Let's try to calm down
|
254 |
elif emotion == 'fear':
|
255 |
-
return "You're feeling fearful. Take a moment to breathe and relax
|
256 |
elif emotion == 'sadness':
|
257 |
-
return "You're feeling sad. It's okay to take a break
|
258 |
elif emotion == 'surprise':
|
259 |
-
return "You're feeling surprised. It's okay to feel neutral
|
260 |
|
261 |
emotion_output = gr.Textbox(label="Emotion Detected")
|
262 |
submit_emotion.click(predict_emotion, inputs=user_input_emotion, outputs=emotion_output)
|
263 |
|
264 |
# Button for summary
|
265 |
def show_summary(emotion):
|
266 |
-
return f"Emotion Detected: {emotion}
|
267 |
|
268 |
summary_button = gr.Button("Show Summary")
|
269 |
summary_output = gr.Textbox(label="Summary")
|
@@ -307,21 +229,10 @@ with gr.Blocks() as demo:
|
|
307 |
|
308 |
# Fetch and display nearby health professionals
|
309 |
def fetch_nearby_health_professionals(location):
|
310 |
-
|
311 |
-
|
312 |
-
query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath"
|
313 |
-
radius = 50000 # 50 km radius
|
314 |
-
|
315 |
-
all_results = get_all_places(query, f"{lat},{lon}", radius, api_key)
|
316 |
-
if all_results:
|
317 |
-
df = pd.DataFrame(all_results, columns=["Name", "Address", "Phone", "Rating", "Business Status", "User Reviews Total", "Website", "Types", "Latitude", "Longitude", "Opening Hours", "Reviews", "Email"])
|
318 |
-
return df
|
319 |
-
else:
|
320 |
-
return "No data found."
|
321 |
-
except Exception as e:
|
322 |
-
return str(e)
|
323 |
|
324 |
-
nearby_health_professionals_table = gr.Dataframe(headers=["Name", "
|
325 |
submit_location.click(fetch_nearby_health_professionals, inputs=location_input, outputs=nearby_health_professionals_table)
|
326 |
|
327 |
# User input for text (sentiment analysis)
|
@@ -330,26 +241,18 @@ with gr.Blocks() as demo:
|
|
330 |
|
331 |
# Prediction button for sentiment analysis
|
332 |
def predict_sentiment(text):
|
333 |
-
|
334 |
-
with torch.no_grad():
|
335 |
-
outputs = model_sentiment(**inputs)
|
336 |
-
predicted_class = torch.argmax(outputs.logits, dim=1).item()
|
337 |
-
sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
|
338 |
-
return sentiment
|
339 |
|
340 |
sentiment_output = gr.Textbox(label="Predicted Sentiment")
|
341 |
submit_sentiment.click(predict_sentiment, inputs=user_input_sentiment, outputs=sentiment_output)
|
342 |
|
343 |
# Button to fetch wellness professionals data
|
344 |
-
fetch_button = gr.Button("Fetch Wellness
|
345 |
-
data_output = gr.Dataframe(headers=["Name", "
|
346 |
|
347 |
def fetch_data():
|
348 |
-
|
349 |
-
|
350 |
-
return pd.DataFrame(all_results, columns=["Name", "Address", "Phone", "Rating", "Business Status", "User Reviews Total", "Website", "Types", "Latitude", "Longitude", "Opening Hours", "Reviews", "Email"])
|
351 |
-
else:
|
352 |
-
return "No data found."
|
353 |
|
354 |
fetch_button.click(fetch_data, inputs=None, outputs=data_output)
|
355 |
|
|
|
18 |
import tensorflow as tf
|
19 |
import json
|
20 |
import pickle
|
21 |
+
import random
|
22 |
|
23 |
# Ensure necessary NLTK resources are downloaded
|
24 |
nltk.download('punkt')
|
|
|
43 |
except FileNotFoundError:
|
44 |
raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
|
45 |
|
46 |
+
# Build the model structure
|
47 |
+
net = tflearn.input_data(shape=[None, len(training[0])])
|
48 |
+
net = tflearn.fully_connected(net, 8)
|
49 |
+
net = tflearn.fully_connected(net, 8)
|
50 |
+
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
|
51 |
+
net = tflearn.regression(net)
|
52 |
+
|
53 |
+
# Load the trained model
|
54 |
+
model = tflearn.DNN(net)
|
55 |
+
try:
|
56 |
+
model.load("MentalHealthChatBotmodel.tflearn")
|
57 |
+
except FileNotFoundError:
|
58 |
+
raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.")
|
59 |
+
|
60 |
+
# Function to process user input into a bag-of-words format
|
61 |
+
def bag_of_words(s, words):
|
62 |
+
bag = [0 for _ in range(len(words))]
|
63 |
+
s_words = nltk.word_tokenize(s)
|
64 |
+
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
|
65 |
+
for se in s_words:
|
66 |
+
for i, w in enumerate(words):
|
67 |
+
if w == se:
|
68 |
+
bag[i] = 1
|
69 |
+
return np.array(bag)
|
70 |
+
|
71 |
+
# Chat function
|
72 |
+
def chat(message, history):
|
73 |
+
history = history or []
|
74 |
+
message = message.lower()
|
75 |
+
|
76 |
+
try:
|
77 |
+
# Predict the tag
|
78 |
+
results = model.predict([bag_of_words(message, words)])
|
79 |
+
results_index = np.argmax(results)
|
80 |
+
tag = labels[results_index]
|
81 |
+
|
82 |
+
# Match tag with intent and choose a random response
|
83 |
+
for tg in data["intents"]:
|
84 |
+
if tg['tag'] == tag:
|
85 |
+
responses = tg['responses']
|
86 |
+
response = random.choice(responses)
|
87 |
+
break
|
88 |
+
else:
|
89 |
+
response = "I'm sorry, I didn't understand that. Could be could you please rephrase?"
|
90 |
+
|
91 |
+
except Exception as e:
|
92 |
+
response = f"An error occurred: {str(e)}"
|
93 |
+
|
94 |
+
history.append((message, response))
|
95 |
+
return history, history
|
96 |
+
|
97 |
+
# Sentiment analysis
|
98 |
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
99 |
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
100 |
+
sentiment_pipeline = pipeline("sentiment-analysis")
|
101 |
|
102 |
+
def predict_sentiment(text):
|
103 |
+
result = sentiment_pipeline(text)[0]
|
104 |
+
return result['label']
|
105 |
|
106 |
+
# Emotion detection
|
107 |
+
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
108 |
+
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
|
109 |
+
emotion_pipeline = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
|
110 |
|
111 |
+
def predict_emotion(text):
|
112 |
+
result = emotion_pipeline(text)[0]
|
113 |
+
return result['label']
|
114 |
|
115 |
+
# Fetching nearby health professionals
|
116 |
+
google_places_url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
|
117 |
+
google_geocoding_url = "https://maps.googleapis.com/maps/api/geocode/json"
|
118 |
|
119 |
+
def get_places_data(query, location, radius, api_key):
|
|
|
120 |
params = {
|
121 |
"query": query,
|
122 |
"location": location,
|
123 |
"radius": radius,
|
124 |
"key": api_key
|
125 |
}
|
126 |
+
response = requests.get(google_places_url, params=params)
|
127 |
+
return response.json()
|
128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
def get_place_details(place_id, api_key):
|
130 |
+
details_url = f"https://maps.googleapis.com/maps/api/place/details/json?place_id={place_id}&fields=name,rating,formatted_phone_number&key={api_key}"
|
131 |
+
response = requests.get(details_url)
|
132 |
+
return response.json()
|
133 |
+
|
134 |
+
def fetch_nearby_health_professionals(location):
|
135 |
+
api_key = "GOOGLE_API_KEY" # Replace with your actual Google API key
|
136 |
+
query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner OR integrative medicine OR chiropractor OR naturopath"
|
137 |
+
radius = 50000 # 50 km radius
|
138 |
+
|
139 |
+
response = get_places_data(query, location, radius, api_key)
|
140 |
+
results = response.get('results', [])
|
141 |
+
|
142 |
+
data = []
|
143 |
+
for place in results:
|
144 |
+
place_id = place['place_id']
|
145 |
+
place_details = get_place_details(place_id, api_key)
|
146 |
+
name = place_details.get('result', {}).get('name', 'N/A')
|
147 |
+
rating = place_details.get('result', {}).get('rating', 'N/A')
|
148 |
+
phone_number = place_details.get('result', {}).get('formatted_phone_number', 'N/A')
|
149 |
+
|
150 |
+
data.append([name, rating, phone_number])
|
151 |
+
|
152 |
+
return pd.DataFrame(data, columns=['Name', 'Rating', 'Phone Number'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
+
# Save results to CSV
|
155 |
def save_to_csv(data, filename):
|
156 |
+
data.to_csv(filename, index=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
# Gradio interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
with gr.Blocks() as demo:
|
160 |
+
gr.Markdown("# Mental Health Assistant")
|
|
|
161 |
|
162 |
# User input for text (emotion detection)
|
163 |
user_input_emotion = gr.Textbox(lines=1, label="How are you feeling today?")
|
|
|
165 |
|
166 |
# Model prediction for emotion detection
|
167 |
def predict_emotion(text):
|
168 |
+
return predict_emotion(text)
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
# Show suggestions based on the detected emotion
|
171 |
def show_suggestions(emotion):
|
172 |
if emotion == 'joy':
|
173 |
+
return "You're feeling happy! Keep up the great mood!"
|
174 |
elif emotion == 'anger':
|
175 |
+
return "You're feeling angry. It's okay to feel this way. Let's try to calm down."
|
176 |
elif emotion == 'fear':
|
177 |
+
return "You're feeling fearful. Take a moment to breathe and relax."
|
178 |
elif emotion == 'sadness':
|
179 |
+
return "You're feeling sad. It's okay to take a break."
|
180 |
elif emotion == 'surprise':
|
181 |
+
return "You're feeling surprised. It's okay to feel neutral!"
|
182 |
|
183 |
emotion_output = gr.Textbox(label="Emotion Detected")
|
184 |
submit_emotion.click(predict_emotion, inputs=user_input_emotion, outputs=emotion_output)
|
185 |
|
186 |
# Button for summary
|
187 |
def show_summary(emotion):
|
188 |
+
return f"Emotion Detected: {emotion}"
|
189 |
|
190 |
summary_button = gr.Button("Show Summary")
|
191 |
summary_output = gr.Textbox(label="Summary")
|
|
|
229 |
|
230 |
# Fetch and display nearby health professionals
|
231 |
def fetch_nearby_health_professionals(location):
|
232 |
+
df = fetch_nearby_health_professionals(location)
|
233 |
+
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
|
235 |
+
nearby_health_professionals_table = gr.Dataframe(headers=["Name", "Rating", "Phone Number"])
|
236 |
submit_location.click(fetch_nearby_health_professionals, inputs=location_input, outputs=nearby_health_professionals_table)
|
237 |
|
238 |
# User input for text (sentiment analysis)
|
|
|
241 |
|
242 |
# Prediction button for sentiment analysis
|
243 |
def predict_sentiment(text):
|
244 |
+
return predict_sentiment(text)
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
sentiment_output = gr.Textbox(label="Predicted Sentiment")
|
247 |
submit_sentiment.click(predict_sentiment, inputs=user_input_sentiment, outputs=sentiment_output)
|
248 |
|
249 |
# Button to fetch wellness professionals data
|
250 |
+
fetch_button = gr.Button("Fetch Wellness Professionals Data")
|
251 |
+
data_output = gr.Dataframe(headers=["Name", "Rating", "Phone Number"])
|
252 |
|
253 |
def fetch_data():
|
254 |
+
df = fetch_nearby_health_professionals("Hawaii")
|
255 |
+
return df
|
|
|
|
|
|
|
256 |
|
257 |
fetch_button.click(fetch_data, inputs=None, outputs=data_output)
|
258 |
|