Testing / app.py
DreamStream-1's picture
Update app.py
fabcaa4 verified
raw
history blame
8.19 kB
import gradio as gr
import pandas as pd
import nltk
import numpy as np
import tflearn
import random
import json
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import googlemaps
import folium
import os
import base64
import torch # Added missing import for torch
from PIL import Image
# Disable GPU usage for TensorFlow
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Ensure necessary NLTK resources are downloaded
nltk.download('punkt')
# Initialize the stemmer
stemmer = LancasterStemmer()
# Load intents.json for Well-Being Chatbot
with open("intents.json") as file:
data = json.load(file)
# Load preprocessed data for Well-Being Chatbot
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
# Build the model structure for Well-Being Chatbot
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
# Load the trained model
model = tflearn.DNN(net)
model.load("MentalHealthChatBotmodel.tflearn")
# Function to process user input into a bag-of-words format for Chatbot
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
# Chat function for Well-Being Chatbot
def chatbot(message, history):
history = history or []
message = message.lower()
try:
# Predict the tag
results = model.predict([bag_of_words(message, words)])
results_index = np.argmax(results)
tag = labels[results_index]
# Match tag with intent and choose a random response
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
response = random.choice(responses)
break
else:
response = "I'm sorry, I didn't understand that. Could you please rephrase?"
except Exception as e:
print(f"Error in chatbot: {e}") # For debugging
response = f"An error occurred: {str(e)}"
# Convert the new message and response to the 'messages' format
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return history, history
# Sentiment Analysis using Hugging Face model
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
def analyze_sentiment(user_input):
inputs = tokenizer_sentiment(user_input, return_tensors="pt")
with torch.no_grad():
outputs = model_sentiment(**inputs)
predicted_class = torch.argmax(outputs.logits, dim=1).item()
sentiment = ["Negative", "Neutral", "Positive"][predicted_class]
return f"Predicted Sentiment: {sentiment}"
# Emotion Detection using Hugging Face model
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base")
def detect_emotion(user_input):
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion)
result = pipe(user_input)
emotion = result[0]['label']
return f"Emotion Detected: {emotion}"
# Function to generate suggestions based on detected emotion
def generate_suggestions(emotion):
suggestions = {
'joy': ["Stay positive! Keep up the good mood.", "Try some relaxing activities like meditation."],
'anger': ["It's okay to be angry, try to breathe and relax.", "Exercise can help release tension."],
'fear': ["Take deep breaths, you are in control.", "Try mindfulness exercises to calm your mind."],
'sadness': ["Take a break, it's okay to feel down sometimes.", "Consider reaching out to a friend or loved one."],
'surprise': ["Take a moment to reflect, things might seem overwhelming.", "Practice mindfulness to regain balance."],
'disgust': ["It's okay to feel disgust, try to identify the cause.", "Taking a short walk might help clear your mind."]
}
return pd.DataFrame(suggestions.get(emotion, ["Stay positive!"]))
# Function to get nearby health professionals and create a map
def get_health_professionals_and_map(location, health_professional_query):
# Use Google Maps API to get health professionals (example setup)
gmaps = googlemaps.Client(key="YOUR_GOOGLE_API_KEY")
places = gmaps.places(query=health_professional_query, location=location)
if places['status'] == 'OK':
results = places['results']
route_info = "\n".join([place['name'] for place in results])
map_html = create_map(results)
return route_info, map_html
return "No professionals found.", None
def create_map(places):
m = folium.Map(location=[places[0]['geometry']['location']['lat'], places[0]['geometry']['location']['lng']], zoom_start=13)
for place in places:
folium.Marker([place['geometry']['location']['lat'], place['geometry']['location']['lng']],
popup=place['name']).add_to(m)
map_html = m._repr_html_()
return map_html
# Custom CSS styling for Gradio interface
css = """
body {
font-family: 'Roboto', sans-serif;
}
.gradio-container {
background-color: #f0f0f0;
font-size: 16px;
}
.gradio-input, .gradio-output {
padding: 15px;
border-radius: 10px;
background-color: #ffffff;
border: 2px solid #ccc;
}
.gradio-container .gradio-button {
background-color: #007BFF;
color: white;
border-radius: 5px;
padding: 10px 15px;
}
.gradio-container .gradio-button:hover {
background-color: #0056b3;
}
.gradio-container h3 {
color: #333;
}
.gradio-output .output {
border-top: 3px solid #ddd;
padding-top: 10px;
}
.gradio-input input {
color: #333;
}
.gradio-input textarea {
color: #333;
}
"""
# Gradio interface components
def gradio_app(message, current_location, health_professional_query, history):
# Detect sentiment and emotion
sentiment = analyze_sentiment(message)
emotion = detect_emotion(message)
# Generate suggestions based on emotion
suggestions_df = generate_suggestions(emotion)
# Get health professionals details and map
route_info, map_html = get_health_professionals_and_map(current_location, health_professional_query)
# Add emoticon for emotion
emotion_emoticons = {
'joy': '😊',
'anger': '😑',
'fear': '😨',
'sadness': '😒',
'surprise': '😲',
'disgust': '🀒'
}
emotion_icon = emotion_emoticons.get(emotion, 'πŸ™‚')
return sentiment, f"{emotion_icon} {emotion}", suggestions_df, route_info, map_html, history
# Gradio interface setup
iface = gr.Interface(
fn=gradio_app,
inputs=[
gr.Textbox(lines=2, placeholder="Enter your message..."),
gr.Textbox(lines=2, placeholder="Enter your current location..."),
gr.Textbox(lines=2, placeholder="Enter health professional query..."),
gr.State(value=[])
],
outputs=[
gr.Textbox(label="Sentiment Analysis"),
gr.Textbox(label="Detected Emotion"),
gr.Dataframe(label="Suggestions"),
gr.Textbox(label="Nearby Health Professionals"),
gr.HTML(label="Map of Health Professionals"),
gr.State(value=[])
],
live=True,
allow_flagging="never",
theme="huggingface",
css=css # Apply custom CSS styling
)
# Launch Gradio interface
iface.launch(share=True)