Spaces:
Sleeping
Sleeping
File size: 8,098 Bytes
f0734be 864d91e 2ae19d7 eefcaa7 881aad3 4184e5e 274d1f4 f0734be fa97be4 274d1f4 eefcaa7 4e61093 6858546 dacc7c0 4e61093 334ba26 4e61093 494aa89 6858546 334ba26 494aa89 0e313c1 4e61093 274d1f4 6858546 c69efb6 4e61093 9e5813b 4e61093 9e5813b 4e61093 936af04 4e61093 4184e5e 6858546 936af04 4525308 4e61093 274d1f4 4184e5e 6858546 4e61093 6858546 4184e5e 6858546 9508310 6858546 936af04 4e61093 f0734be 274d1f4 6858546 274d1f4 4e61093 274d1f4 6858546 274d1f4 4e61093 864d91e 4e61093 6858546 9e5813b 6858546 9e5813b 4e61093 9e5813b 4e61093 9e5813b 4e61093 37c8a73 4e61093 6858546 4e61093 6858546 4e61093 4568d77 4e61093 4568d77 4e61093 6858546 4e61093 4568d77 4e61093 6858546 9e5813b 4e61093 9e5813b 4e61093 6858546 4e61093 6858546 4e61093 6858546 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
import os
import gradio as gr
import nltk
import numpy as np
import tensorflow as tf
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import googlemaps
import folium
import torch
# Disable GPU usage for TensorFlow
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Ensure necessary NLTK resources are downloaded
nltk.download("punkt")
# Initialize stemmer
stemmer = LancasterStemmer()
# Load intents.json and training data for chatbot
with open("intents.json") as file:
intents_data = json.load(file)
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
# Build Chatbot Model
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
chatbot_model = tflearn.DNN(net)
chatbot_model.load("MentalHealthChatBotmodel.tflearn")
# Sentiment Analysis Model (Hugging Face)
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
# Emotion Detection Model
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
# Google Maps API Client
gmaps = googlemaps.Client(key=os.getenv('GOOGLE_API_KEY'))
# Process Text Input for Chatbot
def bag_of_words(s, words):
bag = [0] * len(words)
s_words = word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
# Chatbot Functionality
def chatbot(message, history):
history = history or []
try:
results = chatbot_model.predict([bag_of_words(message, words)])
tag = labels[np.argmax(results)]
response = "I'm not sure how to respond to that. π€"
for intent in intents_data["intents"]:
if intent["tag"] == tag:
response = random.choice(intent["responses"])
break
except Exception as e:
response = f"Error: {str(e)} π₯"
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, response
# Detect Sentiment
def analyze_sentiment(user_input):
inputs = tokenizer_sentiment(user_input, return_tensors="pt")
with torch.no_grad():
outputs = model_sentiment(**inputs)
sentiment_class = torch.argmax(outputs.logits, dim=1).item()
sentiment_map = ["Negative π", "Neutral π", "Positive π"]
return sentiment_map[sentiment_class]
# Detect Emotion
def detect_emotion(user_input):
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
result = pipe(user_input)
emotion = result[0]["label"]
emotion_map = {
"joy": "π Joy",
"anger": "π Anger",
"sadness": "π’ Sadness",
"fear": "π¨ Fear",
"surprise": "π² Surprise",
"neutral": "π Neutral",
}
return emotion_map.get(emotion, "Unknown Emotion π€")
# Generate Suggestions for Detected Emotion
def generate_suggestions(emotion):
resources = {
"π Joy": [
["Relaxation Techniques", "Relaxation", '<a href="https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation" target="_blank">Visit</a>'],
["Dealing with Stress", "Stress Management", '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Visit</a>'],
["Emotional Wellness Toolkit", "Wellness", '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Visit</a>'],
["Relaxation Videos", "Video", '<a href="https://youtu.be/m1vaUGtyo-A" target="_blank">Watch</a>']
],
"π’ Sadness": [
["Emotional Wellness Toolkit", "Wellness", '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Visit</a>'],
["Dealing with Anxiety", "Anxiety Management", '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Visit</a>'],
["Relaxation Videos", "Video", '<a href="https://youtu.be/-e-4Kx5px_I" target="_blank">Watch</a>']
],
"π¨ Fear": [
["Mindfulness Practices", "Mindfulness", '<a href="https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation" target="_blank">Visit</a>'],
["Coping with Anxiety", "Anxiety Management", '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Visit</a>'],
["Emotional Wellness Toolkit", "Wellness", '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Visit</a>'],
["Relaxation Videos", "Video", '<a href="https://youtu.be/yGKKz185M5o" target="_blank">Watch</a>']
]
}
return resources.get(emotion.split(" ")[1], [["No specific suggestions available", "", ""]])
# Search Professionals and Generate Map
def get_health_professionals_and_map(location, query):
try:
geo_location = gmaps.geocode(location)
if geo_location:
lat, lng = geo_location[0]["geometry"]["location"].values()
places_result = gmaps.places_nearby(
location=(lat, lng), radius=10000, type="doctor", keyword=query
)["results"]
map_ = folium.Map(location=(lat, lng), zoom_start=13)
professionals = []
for place in places_result:
professionals.append(f"{place['name']} - {place.get('vicinity', '')}")
lat, lng = place["geometry"]["location"]["lat"], place["geometry"]["location"]["lng"]
folium.Marker([lat, lng], popup=place["name"]).add_to(map_)
return professionals, map_._repr_html_()
return ["No professionals found"], ""
except Exception as e:
return [f"Error: {e}"], ""
# Gradio App Function
def app_function(message, location, query, history):
chatbot_history, _ = chatbot(message, history)
sentiment = analyze_sentiment(message)
emotion = detect_emotion(message)
suggestions = generate_suggestions(emotion)
professionals_info, map_html = get_health_professionals_and_map(location, query)
return chatbot_history, sentiment, emotion, suggestions, professionals_info, map_html
# Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("# π Well-Being Companion")
gr.Markdown("Empowering your mental health journey π")
with gr.Row():
user_input = gr.Textbox(label="Your Message")
location_input = gr.Textbox(label="Your Location")
query_input = gr.Textbox(label="Search Query")
submit_button = gr.Button("Submit")
chatbot_output = gr.Chatbot(label="Chat History", type="messages")
sentiment_output = gr.Textbox(label="Sentiment Detected")
emotion_output = gr.Textbox(label="Emotion Detected")
suggestions_output = gr.DataFrame(label="Suggestions", headers=["Title", "Subject", "Link"])
professionals_output = gr.Textbox(label="Nearby Professionals", lines=5)
map_output = gr.HTML(label="Map of Nearby Professionals")
submit_button.click(
app_function,
inputs=[user_input, location_input, query_input, chatbot_output],
outputs=[
chatbot_output, sentiment_output, emotion_output,
suggestions_output, professionals_output, map_output
],
)
demo.launch() |