File size: 7,485 Bytes
f0734be
864d91e
2ae19d7
 
eefcaa7
881aad3
4184e5e
 
 
 
 
 
274d1f4
 
f0734be
fa97be4
274d1f4
 
37d6095
6858546
eefcaa7
 
6858546
 
dacc7c0
6858546
334ba26
 
6858546
494aa89
6858546
334ba26
274d1f4
494aa89
 
0e313c1
6858546
 
274d1f4
 
 
 
c69efb6
6858546
 
 
c69efb6
6858546
936af04
 
4184e5e
6858546
936af04
 
 
 
 
4525308
6858546
274d1f4
4184e5e
274d1f4
4184e5e
6858546
 
 
 
 
 
 
4184e5e
 
6858546
9508310
 
6858546
936af04
6858546
274d1f4
 
936af04
f0734be
274d1f4
 
 
6858546
 
 
274d1f4
6858546
274d1f4
 
2f693ca
274d1f4
 
 
6858546
 
 
 
 
 
 
 
 
 
274d1f4
6858546
274d1f4
 
6858546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
864d91e
274d1f4
6858546
 
 
f0734be
6858546
 
 
f0734be
37c8a73
6858546
 
 
 
 
 
 
 
 
 
4568d77
 
 
6858546
 
 
4568d77
6858546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import os
import gradio as gr
import nltk
import numpy as np
import tensorflow as tf
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import googlemaps
import folium
import torch

# Disable GPU usage for TensorFlow
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# Suppress TensorFlow GPU warnings & logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# Download NLTK resources
nltk.download("punkt")

# Initialize Lancaster Stemmer
stemmer = LancasterStemmer()

# Load intents.json for the chatbot
with open("intents.json") as file:
    intents_data = json.load(file)

# Load preprocessed data for Well-Being Chatbot
with open("data.pickle", "rb") as f:
    words, labels, training, output = pickle.load(f)

# Build TFlearn Chatbot Model
net = tflearn.input_data(shape=[None, len(training[0])], dtype=tf.float32)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)

# Load and initialize the trained model
chatbot_model = tflearn.DNN(net)
chatbot_model.load("MentalHealthChatBotmodel.tflearn")

# Function to process user input into a bag-of-words format
def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]
    s_words = word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
    for se in s_words:
        for i, w in enumerate(words):
            if w == se:
                bag[i] = 1
    return np.array(bag)

# Chatbot Response Function
def chatbot(message, history):
    history = history or []
    message = message.lower()
    try:
        results = chatbot_model.predict([bag_of_words(message, words)])
        tag = labels[np.argmax(results)]

        response = "I'm not sure how to respond to that. πŸ€”"
        for intent in intents_data["intents"]:
            if intent["tag"] == tag:
                response = random.choice(intent["responses"])
                break
    except Exception as e:
        response = f"Error: {str(e)} πŸ’₯"
    history.append({"role": "user", "content": message})
    history.append({"role": "assistant", "content": response})
    return history, response

# Sentiment Analysis Function
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")

def analyze_sentiment(user_input):
    inputs = tokenizer_sentiment(user_input, return_tensors="pt")
    with torch.no_grad():
        outputs = model_sentiment(**inputs)
    sentiment_class = torch.argmax(outputs.logits, dim=1).item()
    sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
    return sentiment_map[sentiment_class]

# Emotion Detection Function
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")

def detect_emotion(user_input):
    pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
    result = pipe(user_input)
    emotion = result[0]["label"]
    emotion_map = {
        "joy": "😊 Joy",
        "anger": "😠 Anger",
        "sadness": "😒 Sadness",
        "fear": "😨 Fear",
        "surprise": "😲 Surprise",
        "neutral": "😐 Neutral",
    }
    return emotion_map.get(emotion, "Unknown Emotion πŸ€”")

# Health Professionals Search
gmaps = googlemaps.Client(key=os.getenv('GOOGLE_API_KEY'))

def get_health_professionals_and_map(location, query):
    """Search for health professionals and generate a map."""
    try:
        geo_location = gmaps.geocode(location)
        if geo_location:
            lat, lng = geo_location[0]["geometry"]["location"].values()
            places_result = gmaps.places_nearby(
                location=(lat, lng), radius=10000, type="doctor", keyword=query
            ).get("results", [])

            # Create map
            m = folium.Map(location=(lat, lng), zoom_start=13)
            for place in places_result:
                folium.Marker(
                    location=[
                        place["geometry"]["location"]["lat"],
                        place["geometry"]["location"]["lng"],
                    ],
                    popup=place["name"],
                ).add_to(m)
            map_html = m._repr_html_()
            professionals_info = [
                f"{place['name']} - {place.get('vicinity', 'No address available')}"
                for place in places_result
            ]
            return "\n".join(professionals_info), map_html
        return "Unable to find location", ""
    except Exception as e:
        return f"Error: {e}", ""

# Suggestions Based on Emotion
def generate_suggestions(emotion):
    suggestions = {
        "😊 Joy": [
            {"Title": "Meditation 🧘", "Subject": "Relaxation", "Link": "https://example.com/meditation"},
            {"Title": "Learn a skill πŸš€", "Subject": "Growth", "Link": "https://example.com/skills"},
        ],
        "😒 Sadness": [
            {"Title": "Therapist Help πŸ’¬", "Subject": "Support", "Link": "https://example.com/therapist"},
            {"Title": "Stress Management 🌿", "Subject": "Wellness", "Link": "https://example.com/stress"},
        ],
    }
    return suggestions.get(emotion.split(" ")[1].lower(), [])

# Main Gradio App Function
def app_function(message, location, query, history):
    chatbot_history, _ = chatbot(message, history)
    sentiment = analyze_sentiment(message)
    emotion = detect_emotion(message)
    suggestions = generate_suggestions(emotion)
    places_info, map_html = get_health_professionals_and_map(location, query)
    return chatbot_history, sentiment, emotion, suggestions, map_html, places_info

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# 🌟 Well-being Companion")
    gr.Markdown("Empowering your mental health journey πŸ’š")

    with gr.Row():
        user_input = gr.Textbox(label="Your Message", placeholder="Type your message...", lines=2)
        location_input = gr.Textbox(label="Your Location", placeholder="Enter location...", lines=2)
        query_input = gr.Textbox(label="Search Query", placeholder="Enter query (e.g., therapist)...", lines=1)
        submit_btn = gr.Button("Submit")

    with gr.Row():
        chatbot_output = gr.Chatbot(label="Chat History", type="messages")
    with gr.Row():
        sentiment_output = gr.Textbox(label="Sentiment Analysis")
        emotion_output = gr.Textbox(label="Emotion Detected")
    with gr.Row():
        suggestions_output = gr.DataFrame(label="Suggestions", headers=["Title", "Subject", "Link"])
    with gr.Row():
        map_display = gr.HTML(label="Map of Nearby Professionals")
        health_info_output = gr.Textbox(label="Health Professionals Info", lines=5)

    # Button interaction
    submit_btn.click(
        app_function,
        inputs=[user_input, location_input, query_input, chatbot_output],
        outputs=[
            chatbot_output,
            sentiment_output,
            emotion_output,
            suggestions_output,
            map_display,
            health_info_output,
        ],
    )

demo.launch()