Spaces:
Sleeping
Sleeping
import json | |
import pickle | |
import random | |
import nltk | |
import numpy as np | |
import tflearn | |
import gradio as gr | |
import requests | |
import torch | |
import pandas as pd | |
from bs4 import BeautifulSoup | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
from nltk.tokenize import word_tokenize | |
from nltk.stem.lancaster import LancasterStemmer | |
import os | |
# Ensure necessary NLTK resources are downloaded | |
nltk.download('punkt') | |
# Initialize the stemmer | |
stemmer = LancasterStemmer() | |
# Load intents.json | |
try: | |
with open("intents.json") as file: | |
data = json.load(file) | |
except FileNotFoundError: | |
raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.") | |
# Load preprocessed data from pickle | |
try: | |
with open("data.pickle", "rb") as f: | |
words, labels, training, output = pickle.load(f) | |
except FileNotFoundError: | |
raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.") | |
# Build the model structure | |
net = tflearn.input_data(shape=[None, len(training[0])]) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, len(output[0]), activation="softmax") | |
net = tflearn.regression(net) | |
# Load the trained model | |
model = tflearn.DNN(net) | |
try: | |
model.load("MentalHealthChatBotmodel.tflearn") | |
except FileNotFoundError: | |
raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.") | |
# Function to process user input into a bag-of-words format | |
def bag_of_words(s, words): | |
bag = [0 for _ in range(len(words))] | |
s_words = word_tokenize(s) | |
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words] | |
for se in s_words: | |
for i, w in enumerate(words): | |
if w == se: | |
bag[i] = 1 | |
return np.array(bag) | |
# Chat function | |
def chat(message, history): | |
history = history or [] | |
message = message.lower() | |
try: | |
# Predict the tag | |
results = model.predict([bag_of_words(message, words)]) | |
results_index = np.argmax(results) | |
tag = labels[results_index] | |
# Match tag with intent and choose a random response | |
for tg in data["intents"]: | |
if tg['tag'] == tag: | |
responses = tg['responses'] | |
response = random.choice(responses) | |
break | |
else: | |
response = "I'm sorry, I didn't understand that. Could you please rephrase?" | |
except Exception as e: | |
response = f"An error occurred: {str(e)}" | |
history.append((message, response)) | |
return history, history | |
# Sentiment analysis setup | |
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
# Emotion detection setup | |
def load_emotion_model(): | |
tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
return tokenizer, model | |
tokenizer_emotion, model_emotion = load_emotion_model() | |
# Emotion detection function with suggestions in plain English | |
def detect_emotion(user_input): | |
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) | |
result = pipe(user_input) | |
emotion = result[0]['label'] | |
# Provide suggestions based on the detected emotion | |
if emotion == 'joy': | |
emotion_msg = "You're feeling happy! Keep up the great mood!" | |
resources = [ | |
{"subject": "Relaxation Techniques", "link": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"}, | |
{"subject": "Dealing with Stress", "link": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"}, | |
{"subject": "Emotional Wellness Toolkit", "link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"} | |
] | |
video_link = "Watch on YouTube: https://youtu.be/m1vaUGtyo-A" | |
elif emotion == 'anger': | |
emotion_msg = "You're feeling angry. It's okay to feel this way. Let's try to calm down." | |
resources = [ | |
{"subject": "Emotional Wellness Toolkit", "link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"}, | |
{"subject": "Stress Management Tips", "link": "https://www.health.harvard.edu/health-a-to-z"}, | |
{"subject": "Dealing with Anger", "link": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"} | |
] | |
video_link = "Watch on YouTube: https://youtu.be/MIc299Flibs" | |
elif emotion == 'fear': | |
emotion_msg = "You're feeling fearful. Take a moment to breathe and relax." | |
resources = [ | |
{"subject": "Mindfulness Practices", "link": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation"}, | |
{"subject": "Coping with Anxiety", "link": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"}, | |
{"subject": "Emotional Wellness Toolkit", "link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"} | |
] | |
video_link = "Watch on YouTube: https://youtu.be/yGKKz185M5o" | |
elif emotion == 'sadness': | |
emotion_msg = "You're feeling sad. It's okay to take a break." | |
resources = [ | |
{"subject": "Emotional Wellness Toolkit", "link": "https://www.nih.gov/health-information/emotional-wellness-toolkit"}, | |
{"subject": "Dealing with Anxiety", "link": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"} | |
] | |
video_link = "Watch on YouTube: https://youtu.be/-e-4Kx5px_I" | |
elif emotion == 'surprise': | |
emotion_msg = "You're feeling surprised. It's okay to feel neutral!" | |
resources = [ | |
{"subject": "Managing Stress", "link": "https://www.health.harvard.edu/health-a-to-z"}, | |
{"subject": "Coping Strategies", "link": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety"} | |
] | |
video_link = "Watch on YouTube: https://youtu.be/m1vaUGtyo-A" | |
else: | |
emotion_msg = "Could not detect emotion." | |
resources = [] | |
video_link = "" | |
return emotion_msg, resources, video_link | |
# Google Geocoding API setup to convert city name to latitude/longitude | |
geocode_url = "https://maps.googleapis.com/maps/api/geocode/json" | |
def get_lat_lon(location, api_key): | |
params = { | |
"address": location, | |
"key": api_key | |
} | |
response = requests.get(geocode_url, params=params) | |
if response.status_code == 200: | |
result = response.json() | |
if result['status'] == 'OK': | |
# Return the first result's latitude and longitude | |
location = result['results'][0]['geometry']['location'] | |
return location['lat'], location['lng'] | |
return None, None | |
# Google Places API setup for wellness professionals | |
url = "https://maps.googleapis.com/maps/api/place/textsearch/json" | |
places_details_url = "https://maps.googleapis.com/maps/api/place/details/json" | |
api_key = os.getenv("GOOGLE_API_KEY") # Use environment variable for security | |
# Function to get places data using Google Places API | |
def get_places_data(query, location, radius, api_key, next_page_token=None): | |
params = { | |
"query": query, | |
"location": location, | |
"radius": radius, | |
"key": api_key | |
} | |
if next_page_token: | |
params["pagetoken"] = next_page_token | |
response = requests.get(url, params=params) | |
if response.status_code == 200: | |
return response.json() | |
else: | |
return None | |
# Web scraping function to get wellness professional data (alternative to API) | |
def scrape_wellness_professionals(query, location): | |
# User-Agent header to simulate a browser request | |
headers = { | |
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" | |
} | |
search_url = f"https://www.google.com/search?q={query}+near+{location}" | |
# Make a request to the search URL with headers | |
response = requests.get(search_url, headers=headers) | |
if response.status_code == 200: | |
soup = BeautifulSoup(response.text, 'html.parser') | |
# Find the results based on HTML structure | |
# Note: This is a simplistic example, Google search results structure may change | |
result_divs = soup.find_all("div", class_="BVG0Nb") | |
results = [] | |
for div in result_divs: | |
name = div.get_text() | |
link = div.find("a")["href"] | |
results.append({"name": name, "link": link}) | |
return results | |
else: | |
return None | |
# Initialize the chatbot interface | |
iface = gr.Interface( | |
fn=chat, | |
inputs=["text", "state"], | |
outputs=["chatbot", "state"], | |
allow_flagging="never", | |
live=True | |
) | |
iface.launch(share=True) | |