File size: 8,218 Bytes
f0734be
864d91e
2ae19d7
 
881aad3
4184e5e
 
 
 
 
 
f0734be
 
fa97be4
a6192b5
 
37d6095
a6192b5
 
dacc7c0
f0734be
334ba26
 
7479a23
494aa89
f0734be
334ba26
7479a23
494aa89
 
0e313c1
7479a23
f0734be
 
 
 
 
 
 
 
 
c69efb6
f0734be
c69efb6
7479a23
936af04
 
4184e5e
f0734be
936af04
 
 
 
 
4525308
7479a23
f0734be
a6192b5
4184e5e
 
f0734be
7479a23
 
a6192b5
f0734be
 
7479a23
4184e5e
 
f0734be
 
a6192b5
 
f0734be
4184e5e
5d0e15d
f0734be
 
e623c13
f0734be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
936af04
5d0e15d
f0734be
 
936af04
f0734be
 
 
 
 
 
7479a23
f0734be
7479a23
f0734be
 
2f693ca
7479a23
864d91e
37c8a73
f0734be
5d0e15d
 
f0734be
 
5d0e15d
 
f0734be
 
5d0e15d
 
f0734be
37c8a73
5d0e15d
 
 
 
 
 
 
 
 
 
 
 
 
7479a23
f0734be
 
5d0e15d
f0734be
 
5d0e15d
f0734be
 
5d0e15d
7479a23
 
f0734be
 
5d0e15d
 
 
 
7479a23
5d0e15d
7479a23
 
a6192b5
5d0e15d
a6192b5
7479a23
 
5d0e15d
7479a23
5d0e15d
7479a23
5d0e15d
7479a23
 
 
5d0e15d
a6192b5
5d0e15d
a6192b5
 
 
5d0e15d
 
 
7479a23
 
 
 
 
a6192b5
5d0e15d
f0734be
5d0e15d
f0734be
5d0e15d
 
 
 
 
 
 
 
 
 
 
f0734be
5d0e15d
 
 
 
 
 
f0734be
5d0e15d
 
 
 
f0734be
5d0e15d
 
 
f0734be
 
5d0e15d
 
f0734be
 
5d0e15d
7479a23
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
import os
import gradio as gr
import nltk
import numpy as np
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import pandas as pd
import torch

# Disable GPU usage for TensorFlow compatibility
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

# Download necessary NLTK resources
nltk.download("punkt")

# Initialize Lancaster Stemmer
stemmer = LancasterStemmer()

# Load intents.json for the chatbot
with open("intents.json") as file:
    intents_data = json.load(file)

# Load tokenized training data
with open("data.pickle", "rb") as f:
    words, labels, training, output = pickle.load(f)

# Build the TFlearn model
def build_chatbot_model():
    net = tflearn.input_data(shape=[None, len(training[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
    net = tflearn.regression(net)
    model = tflearn.DNN(net)
    model.load("MentalHealthChatBotmodel.tflearn")
    return model

chatbot_model = build_chatbot_model()

# Function: Bag of words
def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]
    s_words = word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
    for se in s_words:
        for i, w in enumerate(words):
            if w == se:
                bag[i] = 1
    return np.array(bag)

# Chatbot response generator
def chatbot_response(message, history):
    """Generates a response from the chatbot and appends it to the history."""
    history = history or []
    try:
        result = chatbot_model.predict([bag_of_words(message, words)])
        idx = np.argmax(result)
        tag = labels[idx]
        response = "I'm not sure how to respond to that. πŸ€”"
        for intent in intents_data["intents"]:
            if intent["tag"] == tag:
                response = random.choice(intent["responses"])
                break
    except Exception as e:
        response = f"Error generating response: {str(e)} πŸ’₯"

    # Format output as tuples for Gradio Chatbot compatibility
    history.append((message, response))
    return history, response

# Emotion detection transformer model
emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")

def detect_emotion(user_input):
    pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
    try:
        result = pipe(user_input)
        emotion = result[0]["label"]
        emotion_map = {
            "joy": "😊 Joy",
            "anger": "😠 Anger",
            "sadness": "😒 Sadness",
            "fear": "😨 Fear",
            "surprise": "😲 Surprise",
            "neutral": "😐 Neutral",
        }
        return emotion_map.get(emotion, "Unknown Emotion πŸ€”")
    except Exception as e:
        return f"Error detecting emotion: {str(e)} πŸ’₯"

# Sentiment analysis model
sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")

def analyze_sentiment(user_input):
    """Analyze sentiment of user input."""
    inputs = sentiment_tokenizer(user_input, return_tensors="pt")
    try:
        with torch.no_grad():
            outputs = sentiment_model(**inputs)
        sentiment = torch.argmax(outputs.logits, dim=1).item()
        sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
        return sentiment_map[sentiment]
    except Exception as e:
        return f"Error in sentiment analysis: {str(e)} πŸ’₯"

# Suggestions based on emotion
def generate_suggestions(emotion):
    suggestions = {
        "😊 Joy": [
            {"Title": "Mindful Meditation 🧘", "Link": "https://www.helpguide.org/meditation"},
            {"Title": "Learn a new skill ✨", "Link": "https://www.skillshare.com/"},
        ],
        "😒 Sadness": [
            {"Title": "Talk to a professional πŸ’¬", "Link": "https://www.betterhelp.com/"},
            {"Title": "Mental health toolkit πŸ› οΈ", "Link": "https://www.psychologytoday.com/"},
        ],
        "😠 Anger": [
            {"Title": "Anger Management Tips πŸ”₯", "Link": "https://www.mentalhealth.org.uk"},
            {"Title": "Stress Relieving Exercises 🌿", "Link": "https://www.calm.com/"},
        ],
    }
    return suggestions.get(emotion, [{"Title": "Wellness Resources 🌈", "Link": "https://www.helpguide.org/wellness"}])

# Dummy Function for Location Query Simulation (replace this with actual map/search integration)
def search_nearby_professionals(location, query):
    """Simulate searching for nearby professionals and returning results."""
    return [
        {"Name": "Wellness Center One", "Address": "123 Wellness Way"},
        {"Name": "Mental Health Clinic", "Address": "456 Recovery Road"},
        {"Name": "Therapists Hub", "Address": "789 Peace Avenue"},
    ] if location and query else []

def well_being_app(user_input, location, query, history):
    """Main function for chatbot, emotion detection, sentiment, suggestions, and location query."""
    # Chatbot response
    history, chatbot_reply = chatbot_response(user_input, history)

    # Emotion Detection
    emotion = detect_emotion(user_input)

    # Sentiment Analysis
    sentiment = analyze_sentiment(user_input)

    # Suggestions
    detected_emotion = emotion.split(": ")[-1]
    suggestions = generate_suggestions(detected_emotion)
    suggestions_df = pd.DataFrame(suggestions)

    # Nearby Professionals (Location Query)
    professionals = search_nearby_professionals(location, query)

    return history, sentiment, emotion, suggestions_df, professionals

# Custom CSS for beautification
custom_css = """
body {
    background: linear-gradient(135deg, #28a745, #218838);
    font-family: Arial, sans-serif;
    color: black;
}
button {
    background-color: #1abc9c;
    color: white;
    padding: 10px 20px;
    font-size: 16px;
    border-radius: 8px;
    cursor: pointer;
}
button:hover {
    background-color: #16a085;
}
textarea, input[type="text"] {
    background: #ffffff;
    color: #000000;
    font-size: 14px;
    border: 1px solid #ced4da;
    padding: 10px;
    border-radius: 5px;
}
"""

# Gradio UI
with gr.Blocks(css=custom_css) as interface:
    gr.Markdown("# 🌱 **Well-being Companion**")
    gr.Markdown("### Empowering Your Mental Health Journey with AI πŸ’š")

    # Input Section
    with gr.Row():
        gr.Textbox(label="Your Message", lines=2, placeholder="How can I support you today?", elem_id="message_input")
        gr.Textbox(label="Location", placeholder="Enter your location (e.g., New York City)")
        gr.Textbox(label="Search Query", placeholder="Professionals nearby? (e.g., doctors, therapists)")
        submit_button = gr.Button("Submit")

    # Chatbot Section
    with gr.Row():
        chatbot_title = "### Chatbot Response"
        chatbot_output = gr.Chatbot(label=None)

    # Sentiment and Emotion Section
    with gr.Row():
        gr.Markdown("### Sentiment Analysis")
        sentiment_output = gr.Textbox(label=None)
        gr.Markdown("### Detected Emotion")
        emotion_output = gr.Textbox(label=None)

    # Suggestions Section
    with gr.Row():
        gr.Markdown("### Suggestions")
        suggestions_output = gr.DataFrame(headers=["Title", "Link"], interactive=False, max_height=300)

    # Location Search Results Section
    with gr.Row():
        gr.Markdown("### Nearby Professionals")
        location_output = gr.DataFrame(headers=["Name", "Address"], interactive=False, max_height=300)

    submit_button.click(
        well_being_app,
        inputs=["message_input", "Location", "Search Query", chatbot_output],
        outputs=[chatbot_output, sentiment_output, emotion_output, suggestions_output, location_output],
    )

# Launch the app
interface.launch()