File size: 7,160 Bytes
f0734be
864d91e
2ae19d7
 
881aad3
4184e5e
 
 
 
 
 
274d1f4
 
f0734be
fa97be4
d30f6a2
274d1f4
d30f6a2
eefcaa7
d30f6a2
6858546
dacc7c0
d30f6a2
334ba26
 
d30f6a2
494aa89
6858546
334ba26
494aa89
 
0e313c1
658d2e0
4e61093
274d1f4
 
 
 
6858546
 
c69efb6
658d2e0
9e5813b
 
 
 
 
658d2e0
4e61093
 
658d2e0
936af04
4e61093
4184e5e
6858546
936af04
 
 
 
 
4525308
274d1f4
4184e5e
 
6858546
 
4e61093
6858546
 
 
4184e5e
 
6858546
9508310
 
6858546
936af04
658d2e0
f0734be
274d1f4
 
 
6858546
 
 
274d1f4
658d2e0
274d1f4
 
 
6858546
658d2e0
274d1f4
658d2e0
864d91e
658d2e0
 
d30f6a2
 
 
658d2e0
 
 
d30f6a2
 
 
658d2e0
 
37c8a73
d30f6a2
6858546
d30f6a2
4e61093
 
 
 
 
658d2e0
 
4e61093
 
 
 
658d2e0
 
4e61093
d30f6a2
4e61093
 
 
d30f6a2
6858546
 
 
658d2e0
6858546
d30f6a2
 
4568d77
658d2e0
 
4e61093
d30f6a2
658d2e0
4568d77
658d2e0
d30f6a2
 
 
 
 
 
 
 
658d2e0
d30f6a2
658d2e0
 
6858546
658d2e0
4e61093
d30f6a2
 
4e61093
6858546
 
658d2e0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import os
import gradio as gr
import nltk
import numpy as np
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import googlemaps
import folium
import torch

# Disable GPU usage for TensorFlow and suppress TensorFlow warnings
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# Download NLTK resources
nltk.download("punkt")

# Initialize Lancaster Stemmer
stemmer = LancasterStemmer()

# Load intents.json and training data for the chatbot
with open("intents.json") as file:
    intents_data = json.load(file)

with open("data.pickle", "rb") as f:
    words, labels, training, output = pickle.load(f)

# Build the chatbot's neural network model
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
chatbot_model = tflearn.DNN(net)
chatbot_model.load("MentalHealthChatBotmodel.tflearn")

# Hugging Face models for sentiment and emotion detection
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")

# Google Maps API client
gmaps = googlemaps.Client(key=os.getenv('GOOGLE_API_KEY'))

# Chatbot logic
def bag_of_words(s, words):
    bag = [0] * len(words)
    s_words = word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
    for se in s_words:
        for i, w in enumerate(words):
            if w == se:
                bag[i] = 1
    return np.array(bag)

def chatbot(message, history):
    history = history or []
    try:
        results = chatbot_model.predict([bag_of_words(message, words)])
        tag = labels[np.argmax(results)]
        response = "I'm not sure how to respond to that. πŸ€”"
        for intent in intents_data["intents"]:
            if intent["tag"] == tag:
                response = random.choice(intent["responses"])
                break
    except Exception as e:
        response = f"Error: {str(e)} πŸ’₯"
    history.append({"role": "user", "content": message})
    history.append({"role": "assistant", "content": response})
    return history, response

# Sentiment analysis
def analyze_sentiment(user_input):
    inputs = tokenizer_sentiment(user_input, return_tensors="pt")
    with torch.no_grad():
        outputs = model_sentiment(**inputs)
    sentiment_class = torch.argmax(outputs.logits, dim=1).item()
    sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
    return sentiment_map[sentiment_class]

# Emotion detection
def detect_emotion(user_input):
    pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
    result = pipe(user_input)
    emotion = result[0]["label"]
    return emotion

# Generate suggestions based on detected emotion
def generate_suggestions(emotion):
    suggestions = {
        "joy": [
            ["Relaxation Techniques", '<a href="https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation" target="_blank">Visit</a>'],
            ["Dealing with Stress", '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Visit</a>'],
            ["Emotional Wellness Toolkit", '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Visit</a>'],
            ["Relaxation Video", '<a href="https://youtu.be/m1vaUGtyo-A" target="_blank">Watch</a>'],
        ],
        "anger": [
            ["Emotional Wellness Toolkit", '<a href="https://www.nih.gov/health-information/emotional-wellness-toolkit" target="_blank">Visit</a>'],
            ["Stress Management Tips", '<a href="https://www.health.harvard.edu/health-a-to-z" target="_blank">Visit</a>'],
            ["Dealing with Anger", '<a href="https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety" target="_blank">Visit</a>'],
            ["Relaxation Video", '<a href="https://youtu.be/MIc299Flibs" target="_blank">Watch</a>'],
        ],
    }
    return suggestions.get(emotion, [["No suggestions available", "", ""]])

# Search professionals and generate map
def get_health_professionals_and_map(location, query):
    try:
        geo_location = gmaps.geocode(location)
        if geo_location:
            lat, lng = geo_location[0]["geometry"]["location"].values()
            places_result = gmaps.places_nearby(location=(lat, lng), radius=10000, keyword=query)["results"]

            map_ = folium.Map(location=(lat, lng), zoom_start=13)
            professionals = []
            for place in places_result:
                professionals.append(f"{place['name']} - {place.get('vicinity', '')}")
                folium.Marker([place["geometry"]["location"]["lat"], place["geometry"]["location"]["lng"]],
                              popup=place["name"]).add_to(map_)
            return professionals, map_._repr_html_()
        return ["No professionals found"], ""
    except Exception as e:
        return [f"Error: {e}"], ""

# Main app function
def app_function(message, location, query, history):
    chatbot_history, _ = chatbot(message, history)
    sentiment = analyze_sentiment(message)
    emotion = detect_emotion(message.lower())
    suggestions = generate_suggestions(emotion)
    professionals, map_html = get_health_professionals_and_map(location, query)
    return chatbot_history, sentiment, emotion, suggestions, professionals, map_html

# Gradio app interface
with gr.Blocks() as app:
    gr.Markdown("# 🌟 Well-Being Companion")
    gr.Markdown("Empowering your Well-Being journey πŸ’š")

    with gr.Row():
        user_message = gr.Textbox(label="Your Message", placeholder="Enter your message...")
        user_location = gr.Textbox(label="Your Location", placeholder="Enter your location...")
        search_query = gr.Textbox(label="Query", placeholder="Search for professionals...")
        submit_btn = gr.Button("Submit")

    chatbot_box = gr.Chatbot(label="Chat History")
    emotion_output = gr.Textbox(label="Detected Emotion")
    sentiment_output = gr.Textbox(label="Detected Sentiment")
    suggestions_output = gr.DataFrame(headers=["Title", "Links"], label="Suggestions")
    map_output = gr.HTML(label="Nearby Professionals Map")
    professional_list = gr.Textbox(label="Nearby Professionals", lines=5)

    submit_btn.click(
        app_function,
        inputs=[user_message, user_location, search_query, chatbot_box],
        outputs=[
            chatbot_box, sentiment_output, emotion_output,
            suggestions_output, professional_list, map_output,
        ],
    )

app.launch()