File size: 6,871 Bytes
f0734be
864d91e
2ae19d7
 
881aad3
4184e5e
 
 
 
 
 
 
 
f0734be
 
fa97be4
f0734be
37d6095
 
f0734be
334ba26
dacc7c0
f0734be
334ba26
 
f0734be
494aa89
f0734be
334ba26
f0734be
494aa89
 
0e313c1
f0734be
 
 
 
 
 
 
 
 
 
c69efb6
f0734be
c69efb6
f0734be
936af04
 
4184e5e
f0734be
936af04
 
 
 
 
4525308
f0734be
 
 
4184e5e
 
f0734be
 
 
 
 
 
 
 
4184e5e
 
f0734be
 
 
83182e1
f0734be
4184e5e
f0734be
 
 
e623c13
f0734be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
936af04
f0734be
 
 
936af04
f0734be
 
 
 
 
 
 
 
 
 
 
2f693ca
f0734be
864d91e
37c8a73
f0734be
 
 
 
 
 
 
 
 
 
 
 
37c8a73
f0734be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d12be2
f0734be
 
2a2aa69
f0734be
 
 
 
fabcaa4
f0734be
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
import os
import gradio as gr
import nltk
import numpy as np
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import googlemaps
import folium
import pandas as pd
import torch

# Disable GPU usage for TensorFlow for compatibility
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# Download necessary NLTK resources
nltk.download('punkt')

# Initialize Lancaster Stemmer
stemmer = LancasterStemmer()

# Load intents.json for Chatbot
with open("intents.json") as file:
    intents_data = json.load(file)

# Load tokenized data for Chatbot
with open("data.pickle", "rb") as f:
    words, labels, training, output = pickle.load(f)

# Build Chatbot Model
def build_chatbot_model():
    net = tflearn.input_data(shape=[None, len(training[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
    net = tflearn.regression(net)
    model = tflearn.DNN(net)
    model.load("MentalHealthChatBotmodel.tflearn")
    return model

chatbot_model = build_chatbot_model()

# Bag of Words Function for Chatbot
def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]
    s_words = word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
    for se in s_words:
        for i, w in enumerate(words):
            if w == se:
                bag[i] = 1
    return np.array(bag)

# Chatbot Response Function
def chatbot_response(message, history):
    """Respond to user input and update chat history."""
    history = history or []
    try:
        result = chatbot_model.predict([bag_of_words(message, words)])
        result_index = np.argmax(result)
        tag = labels[result_index]

        response = "I didn't understand that. πŸ€” Try rephrasing your question."
        for intent in intents_data["intents"]:
            if intent["tag"] == tag:
                response = f"πŸ€– {random.choice(intent['responses'])}"
                break
    except Exception as e:
        response = f"Error generating response: {str(e)} πŸ’₯"

    history.append({"role": "user", "content": f"πŸ’¬ {message}"})
    history.append({"role": "assistant", "content": response})
    return history, response

# Emotion Detection with Transformers
emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")

def detect_emotion(user_input):
    """Detect emotion using a pre-trained model and return label with an emoji."""
    pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
    try:
        result = pipe(user_input)
        emotion = result[0]["label"]
        emotion_map = {
            "joy": "😊 Joy",
            "anger": "😠 Anger",
            "sadness": "😒 Sadness",
            "fear": "😨 Fear",
            "surprise": "😲 Surprise",
            "neutral": "😐 Neutral",
        }
        return emotion_map.get(emotion, "Unknown Emotion πŸ€”")
    except Exception as e:
        return f"Error detecting emotion: {str(e)} πŸ’₯"

# Sentiment Analysis
sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")

def analyze_sentiment(user_input):
    """Analyze sentiment of user input."""
    inputs = sentiment_tokenizer(user_input, return_tensors="pt")
    try:
        with torch.no_grad():
            outputs = sentiment_model(**inputs)
        sentiment_class = torch.argmax(outputs.logits, dim=1).item()
        sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
        return f"Sentiment: {sentiment_map[sentiment_class]}"
    except Exception as e:
        return f"Error in sentiment analysis: {str(e)} πŸ’₯"

# Generate Suggestions Based on Emotion
def generate_suggestions(emotion):
    suggestions = {
        "😊 Joy": [
            {"Title": "Meditation Techniques", "Link": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"},
            {"Title": "Learn Something New", "Link": "https://www.edx.org/"},
        ],
        "😒 Sadness": [
            {"Title": "Emotional Wellness Toolkit", "Link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"},
            {"Title": "Relaxation Videos", "Link": "https://youtu.be/-e-4Kx5px_I"},
        ],
        "😠 Anger": [
            {"Title": "Dealing with Anger", "Link": "https://www.helpguide.org/articles/anger/anger-management.htm"},
            {"Title": "Stress Reducing Tips", "Link": "https://www.webmd.com/stress-management"},
        ],
    }
    return suggestions.get(emotion, [{"Title": "General Tips", "Link": "https://www.psychologytoday.com/"}])

# Gradio Interface Main Function
def well_being_app(user_input, location, query, history):
    """Main app combining chatbot, emotion detection, sentiment, suggestions, and map."""
    # Chatbot Interaction
    history, chatbot_reply = chatbot_response(user_input, history)

    # Emotion Detection
    emotion = detect_emotion(user_input)

    # Sentiment Analysis
    sentiment = analyze_sentiment(user_input)

    # Suggestions Based on Emotion
    emotion_label = emotion.split(": ")[-1]
    suggestions = generate_suggestions(emotion_label)
    suggestions_df = pd.DataFrame(suggestions)

    # Return Outputs
    return (
        history,
        sentiment,
        emotion,
        suggestions_df
    )

# Gradio Interface UI
with gr.Blocks() as app:
    with gr.Row():
        gr.Markdown("# 🌼 Well-Being Support Application")

    with gr.Row():
        user_input = gr.Textbox(lines=2, placeholder="Type your message here...", label="Your Message")
        location = gr.Textbox(value="Honolulu, HI", label="Your Location")
        query = gr.Textbox(value="Counselor", label="Health Professional (Doctor, Therapist, etc.)")
    
    with gr.Row():
        submit_button = gr.Button(value="Submit", label="Submit")
    
    with gr.Row():
        chatbot = gr.Chatbot(label="Chat History")
        sentiment_output = gr.Textbox(label="Sentiment Analysis")
        emotion_output = gr.Textbox(label="Emotion Detected")
    
    with gr.Row():
        suggestions_output = gr.DataFrame(label="Suggestions Based on Mood")

    # Connect inputs and outputs
    submit_button.click(
        well_being_app,
        inputs=[user_input, location, query, chatbot],
        outputs=[chatbot, sentiment_output, emotion_output, suggestions_output],
    )

# Launch the app
app.launch()