Testing / app.py
DreamStream-1's picture
Update app.py
5d0e15d verified
raw
history blame
8.22 kB
import os
import gradio as gr
import nltk
import numpy as np
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import pandas as pd
import torch
# Disable GPU usage for TensorFlow compatibility
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# Download necessary NLTK resources
nltk.download("punkt")
# Initialize Lancaster Stemmer
stemmer = LancasterStemmer()
# Load intents.json for the chatbot
with open("intents.json") as file:
intents_data = json.load(file)
# Load tokenized training data
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
# Build the TFlearn model
def build_chatbot_model():
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
model.load("MentalHealthChatBotmodel.tflearn")
return model
chatbot_model = build_chatbot_model()
# Function: Bag of words
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
# Chatbot response generator
def chatbot_response(message, history):
"""Generates a response from the chatbot and appends it to the history."""
history = history or []
try:
result = chatbot_model.predict([bag_of_words(message, words)])
idx = np.argmax(result)
tag = labels[idx]
response = "I'm not sure how to respond to that. πŸ€”"
for intent in intents_data["intents"]:
if intent["tag"] == tag:
response = random.choice(intent["responses"])
break
except Exception as e:
response = f"Error generating response: {str(e)} πŸ’₯"
# Format output as tuples for Gradio Chatbot compatibility
history.append((message, response))
return history, response
# Emotion detection transformer model
emotion_tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
emotion_model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
def detect_emotion(user_input):
pipe = pipeline("text-classification", model=emotion_model, tokenizer=emotion_tokenizer)
try:
result = pipe(user_input)
emotion = result[0]["label"]
emotion_map = {
"joy": "😊 Joy",
"anger": "😠 Anger",
"sadness": "😒 Sadness",
"fear": "😨 Fear",
"surprise": "😲 Surprise",
"neutral": "😐 Neutral",
}
return emotion_map.get(emotion, "Unknown Emotion πŸ€”")
except Exception as e:
return f"Error detecting emotion: {str(e)} πŸ’₯"
# Sentiment analysis model
sentiment_tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
sentiment_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
def analyze_sentiment(user_input):
"""Analyze sentiment of user input."""
inputs = sentiment_tokenizer(user_input, return_tensors="pt")
try:
with torch.no_grad():
outputs = sentiment_model(**inputs)
sentiment = torch.argmax(outputs.logits, dim=1).item()
sentiment_map = ["Negative πŸ˜”", "Neutral 😐", "Positive 😊"]
return sentiment_map[sentiment]
except Exception as e:
return f"Error in sentiment analysis: {str(e)} πŸ’₯"
# Suggestions based on emotion
def generate_suggestions(emotion):
suggestions = {
"😊 Joy": [
{"Title": "Mindful Meditation 🧘", "Link": "https://www.helpguide.org/meditation"},
{"Title": "Learn a new skill ✨", "Link": "https://www.skillshare.com/"},
],
"😒 Sadness": [
{"Title": "Talk to a professional πŸ’¬", "Link": "https://www.betterhelp.com/"},
{"Title": "Mental health toolkit πŸ› οΈ", "Link": "https://www.psychologytoday.com/"},
],
"😠 Anger": [
{"Title": "Anger Management Tips πŸ”₯", "Link": "https://www.mentalhealth.org.uk"},
{"Title": "Stress Relieving Exercises 🌿", "Link": "https://www.calm.com/"},
],
}
return suggestions.get(emotion, [{"Title": "Wellness Resources 🌈", "Link": "https://www.helpguide.org/wellness"}])
# Dummy Function for Location Query Simulation (replace this with actual map/search integration)
def search_nearby_professionals(location, query):
"""Simulate searching for nearby professionals and returning results."""
return [
{"Name": "Wellness Center One", "Address": "123 Wellness Way"},
{"Name": "Mental Health Clinic", "Address": "456 Recovery Road"},
{"Name": "Therapists Hub", "Address": "789 Peace Avenue"},
] if location and query else []
def well_being_app(user_input, location, query, history):
"""Main function for chatbot, emotion detection, sentiment, suggestions, and location query."""
# Chatbot response
history, chatbot_reply = chatbot_response(user_input, history)
# Emotion Detection
emotion = detect_emotion(user_input)
# Sentiment Analysis
sentiment = analyze_sentiment(user_input)
# Suggestions
detected_emotion = emotion.split(": ")[-1]
suggestions = generate_suggestions(detected_emotion)
suggestions_df = pd.DataFrame(suggestions)
# Nearby Professionals (Location Query)
professionals = search_nearby_professionals(location, query)
return history, sentiment, emotion, suggestions_df, professionals
# Custom CSS for beautification
custom_css = """
body {
background: linear-gradient(135deg, #28a745, #218838);
font-family: Arial, sans-serif;
color: black;
}
button {
background-color: #1abc9c;
color: white;
padding: 10px 20px;
font-size: 16px;
border-radius: 8px;
cursor: pointer;
}
button:hover {
background-color: #16a085;
}
textarea, input[type="text"] {
background: #ffffff;
color: #000000;
font-size: 14px;
border: 1px solid #ced4da;
padding: 10px;
border-radius: 5px;
}
"""
# Gradio UI
with gr.Blocks(css=custom_css) as interface:
gr.Markdown("# 🌱 **Well-being Companion**")
gr.Markdown("### Empowering Your Mental Health Journey with AI πŸ’š")
# Input Section
with gr.Row():
gr.Textbox(label="Your Message", lines=2, placeholder="How can I support you today?", elem_id="message_input")
gr.Textbox(label="Location", placeholder="Enter your location (e.g., New York City)")
gr.Textbox(label="Search Query", placeholder="Professionals nearby? (e.g., doctors, therapists)")
submit_button = gr.Button("Submit")
# Chatbot Section
with gr.Row():
chatbot_title = "### Chatbot Response"
chatbot_output = gr.Chatbot(label=None)
# Sentiment and Emotion Section
with gr.Row():
gr.Markdown("### Sentiment Analysis")
sentiment_output = gr.Textbox(label=None)
gr.Markdown("### Detected Emotion")
emotion_output = gr.Textbox(label=None)
# Suggestions Section
with gr.Row():
gr.Markdown("### Suggestions")
suggestions_output = gr.DataFrame(headers=["Title", "Link"], interactive=False, max_height=300)
# Location Search Results Section
with gr.Row():
gr.Markdown("### Nearby Professionals")
location_output = gr.DataFrame(headers=["Name", "Address"], interactive=False, max_height=300)
submit_button.click(
well_being_app,
inputs=["message_input", "Location", "Search Query", chatbot_output],
outputs=[chatbot_output, sentiment_output, emotion_output, suggestions_output, location_output],
)
# Launch the app
interface.launch()