Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import time | |
import re | |
import csv | |
import json | |
import random | |
import nltk | |
import numpy as np | |
import tflearn | |
import os | |
from selenium import webdriver | |
from selenium.webdriver.chrome.options import Options | |
from bs4 import BeautifulSoup | |
import chromedriver_autoinstaller | |
import pandas as pd | |
from nltk.tokenize import word_tokenize | |
from nltk.stem.lancaster import LancasterStemmer | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
# Ensure necessary NLTK resources are downloaded | |
nltk.download('punkt') | |
# Initialize the stemmer | |
stemmer = LancasterStemmer() | |
# Load intents.json (directly in the app directory) | |
try: | |
with open("intents.json") as file: | |
data = json.load(file) | |
except FileNotFoundError: | |
raise FileNotFoundError("Error: 'intents.json' file not found in the app directory.") | |
# Load preprocessed data from pickle (directly in the app directory) | |
try: | |
with open("data.pickle", "rb") as f: | |
words, labels, training, output = pickle.load(f) | |
except FileNotFoundError: | |
raise FileNotFoundError("Error: 'data.pickle' file not found in the app directory.") | |
# Build the model structure | |
net = tflearn.input_data(shape=[None, len(training[0])]) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, len(output[0]), activation="softmax") | |
net = tflearn.regression(net) | |
# Load the trained model (directly in the app directory) | |
model = tflearn.DNN(net) | |
try: | |
model.load("MentalHealthChatBotmodel.tflearn") | |
except FileNotFoundError: | |
raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found in the app directory.") | |
# Function to process user input into a bag-of-words format | |
def bag_of_words(s, words): | |
bag = [0 for _ in range(len(words))] | |
s_words = word_tokenize(s) | |
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words] | |
for se in s_words: | |
for i, w in enumerate(words): | |
if w == se: | |
bag[i] = 1 | |
return np.array(bag) | |
# Chat function (Chatbot) | |
def chat(message, history): | |
history = history or [] | |
message = message.lower() | |
try: | |
# Predict the tag | |
results = model.predict([bag_of_words(message, words)]) | |
results_index = np.argmax(results) | |
tag = labels[results_index] | |
# Match tag with intent and choose a random response | |
for tg in data["intents"]: | |
if tg['tag'] == tag: | |
responses = tg['responses'] | |
response = random.choice(responses) | |
break | |
else: | |
response = "I'm sorry, I didn't understand that. Could you please rephrase?" | |
except Exception as e: | |
response = f"An error occurred: {str(e)}" | |
history.append((message, response)) | |
return history, history | |
# Sentiment Analysis | |
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
def analyze_sentiment(user_input): | |
inputs = tokenizer_sentiment(user_input, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = model_sentiment(**inputs) | |
predicted_class = torch.argmax(outputs.logits, dim=1).item() | |
sentiment = ["Negative", "Neutral", "Positive"][predicted_class] | |
return f"**Predicted Sentiment:** {sentiment}" | |
# Emotion Detection | |
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) | |
def detect_emotion(user_input): | |
result = pipe(user_input) | |
emotion = result[0]['label'] | |
return emotion | |
def provide_suggestions(emotion): | |
suggestions = pd.DataFrame(columns=["Subject", "Article URL", "Video URL"]) | |
if emotion == 'joy': | |
suggestions = suggestions.append({ | |
"Subject": "Relaxation Techniques", | |
"Article URL": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation", | |
"Video URL": "https://youtu.be/m1vaUGtyo-A" | |
}, ignore_index=True) | |
suggestions = suggestions.append({ | |
"Subject": "Dealing with Stress", | |
"Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", | |
"Video URL": "https://youtu.be/MIc299Flibs" | |
}, ignore_index=True) | |
elif emotion == 'anger': | |
suggestions = suggestions.append({ | |
"Subject": "Managing Anger", | |
"Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", | |
"Video URL": "https://youtu.be/MIc299Flibs" | |
}, ignore_index=True) | |
elif emotion == 'fear': | |
suggestions = suggestions.append({ | |
"Subject": "Coping with Anxiety", | |
"Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", | |
"Video URL": "https://youtu.be/yGKKz185M5o" | |
}, ignore_index=True) | |
elif emotion == 'sadness': | |
suggestions = suggestions.append({ | |
"Subject": "Dealing with Sadness", | |
"Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", | |
"Video URL": "https://youtu.be/-e-4Kx5px_I" | |
}, ignore_index=True) | |
elif emotion == 'surprise': | |
suggestions = suggestions.append({ | |
"Subject": "Managing Stress", | |
"Article URL": "https://www.health.harvard.edu/health-a-to-z", | |
"Video URL": "https://youtu.be/m1vaUGtyo-A" | |
}, ignore_index=True) | |
return suggestions | |
# Google Places API to get nearby wellness professionals | |
api_key = "YOUR_GOOGLE_API_KEY" # Replace with your actual API key | |
def install_chrome_and_driver(): | |
os.system("apt-get update") | |
os.system("apt-get install -y wget curl") | |
os.system("wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb") | |
os.system("dpkg -i google-chrome-stable_current_amd64.deb") | |
os.system("apt-get install -y -f") | |
os.system("google-chrome-stable --version") | |
chromedriver_autoinstaller.install() | |
# Install Chrome and Chromedriver | |
install_chrome_and_driver() | |
# Fetch places data using Google Places API | |
def get_places_data(query, location, radius, api_key): | |
url = "https://maps.googleapis.com/maps/api/place/textsearch/json" | |
params = { | |
"query": query, | |
"location": location, | |
"radius": radius, | |
"key": api_key | |
} | |
response = requests.get(url, params=params) | |
if response.status_code == 200: | |
return response.json() | |
return None | |
# Scrape website URL from Google Maps results (using Selenium) | |
def scrape_website_from_google_maps(place_name): | |
chrome_options = Options() | |
chrome_options.add_argument("--headless") | |
chrome_options.add_argument("--no-sandbox") | |
chrome_options.add_argument("--disable-dev-shm-usage") | |
driver = webdriver.Chrome(options=chrome_options) | |
search_url = f"https://www.google.com/maps/search/{place_name.replace(' ', '+')}" | |
driver.get(search_url) | |
time.sleep(5) | |
try: | |
website_element = driver.find_element_by_xpath('//a[contains(@aria-label, "Visit") and contains(@aria-label, "website")]') | |
website_url = website_element.get_attribute('href') | |
except: | |
website_url = "Not available" | |
driver.quit() | |
return website_url | |
# Get all wellness professionals based on the location | |
def get_wellness_professionals(location): | |
query = "therapist OR counselor OR mental health professional OR marriage and family therapist OR psychotherapist OR psychiatrist OR psychologist OR nutritionist OR wellness doctor OR holistic practitioner" | |
radius = 50000 # 50 km radius | |
data = get_places_data(query, location, radius, api_key) | |
if data: | |
results = data.get('results', []) | |
wellness_data = [] | |
for place in results: | |
name = place.get('name') | |
address = place.get('formatted_address') | |
website = place.get('website', 'Not available') | |
if website == 'Not available': | |
website = scrape_website_from_google_maps(name) | |
wellness_data.append([name, address, website]) | |
return pd.DataFrame(wellness_data, columns=["Name", "Address", "Website"]) | |
return pd.DataFrame() | |
# Gradio Interface Setup | |
iface = gr.Interface( | |
fn=gradio_interface, | |
inputs=[ | |
gr.Textbox(label="Enter your message", placeholder="How are you feeling today?"), | |
gr.Textbox(label="Enter your location (e.g., Hawaii, Oahu)", placeholder="Your location"), | |
gr.State() # One state input | |
], | |
outputs=[ | |
gr.Chatbot(label="Chat History", type="messages"), # Set type="messages" | |
gr.Textbox(label="Sentiment Analysis"), | |
gr.Textbox(label="Detected Emotion"), | |
gr.Dataframe(label="Suggestions & Resources"), | |
gr.Dataframe(label="Nearby Wellness Professionals"), | |
gr.State() # One state output | |
], | |
allow_flagging="never", | |
title="Mental Wellbeing App with AI Assistance", | |
description="This app provides a mental health chatbot, sentiment analysis, emotion detection, and wellness professional search functionality.", | |
) | |
iface.launch(debug=True, share=True) # Launch with share=True to create a public link | |