Spaces:
Sleeping
Sleeping
import nltk | |
import numpy as np | |
import tflearn | |
import tensorflow | |
import random | |
import json | |
import pickle | |
import gradio as gr | |
from nltk.tokenize import word_tokenize | |
from nltk.stem.lancaster import LancasterStemmer | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
import googlemaps | |
import folium | |
import os | |
# Ensure necessary NLTK resources are downloaded | |
nltk.download('punkt') | |
# Initialize the stemmer | |
stemmer = LancasterStemmer() | |
# Load intents.json for Mental Health Chatbot | |
with open("intents.json") as file: | |
data = json.load(file) | |
# Load preprocessed data for Mental Health Chatbot | |
with open("data.pickle", "rb") as f: | |
words, labels, training, output = pickle.load(f) | |
# Build the model structure for Mental Health Chatbot | |
net = tflearn.input_data(shape=[None, len(training[0])]) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, len(output[0]), activation="softmax") | |
net = tflearn.regression(net) | |
# Load the trained model | |
model = tflearn.DNN(net) | |
model.load("MentalHealthChatBotmodel.tflearn") | |
# Function to process user input into a bag-of-words format for Chatbot | |
def bag_of_words(s, words): | |
bag = [0 for _ in range(len(words))] | |
s_words = word_tokenize(s) | |
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words] | |
for se in s_words: | |
for i, w in enumerate(words): | |
if w == se: | |
bag[i] = 1 | |
return np.array(bag) | |
# Chat function for Mental Health Chatbot | |
def chatbot(message, history): | |
history = history or [] | |
message = message.lower() | |
try: | |
# Predict the tag | |
results = model.predict([bag_of_words(message, words)]) | |
results_index = np.argmax(results) | |
tag = labels[results_index] | |
# Match tag with intent and choose a random response | |
for tg in data["intents"]: | |
if tg['tag'] == tag: | |
responses = tg['responses'] | |
response = random.choice(responses) | |
break | |
else: | |
response = "I'm sorry, I didn't understand that. Could you please rephrase?" | |
except Exception as e: | |
response = f"An error occurred: {str(e)}" | |
history.append((message, response)) | |
return history, history | |
# Sentiment Analysis using Hugging Face model | |
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
def analyze_sentiment(user_input): | |
inputs = tokenizer(user_input, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = model_sentiment(**inputs) | |
predicted_class = torch.argmax(outputs.logits, dim=1).item() | |
sentiment = ["Negative", "Neutral", "Positive"][predicted_class] # Assuming 3 classes | |
return f"Predicted Sentiment: {sentiment}" | |
# Emotion Detection using Hugging Face model | |
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
def detect_emotion(user_input): | |
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) | |
result = pipe(user_input) | |
emotion = result[0]['label'] | |
return f"Emotion Detected: {emotion}" | |
# Initialize Google Maps API client securely | |
gmaps = googlemaps.Client(key=os.getenv('GOOGLE_API_KEY')) | |
# Function to search for health professionals | |
def search_health_professionals(query, location, radius=10000): | |
places_result = gmaps.places_nearby(location, radius=radius, type='doctor', keyword=query) | |
return places_result.get('results', []) | |
# Function to get directions and display on Gradio UI | |
def get_health_professionals_and_map(current_location, health_professional_query): | |
route_info = "" | |
m = None # Default to None | |
try: | |
# Geocode the current location (i.e., convert it to latitude and longitude) | |
geocode_result = gmaps.geocode(current_location) | |
if not geocode_result: | |
route_info = "Could not retrieve location coordinates. Please enter a valid location." | |
return route_info, m | |
location_coords = geocode_result[0]['geometry']['location'] | |
lat, lon = location_coords['lat'], location_coords['lng'] | |
# Search for health professionals | |
health_professionals = search_health_professionals(health_professional_query, (lat, lon)) | |
if health_professionals: | |
route_info = "Health professionals found:\n" | |
m = folium.Map(location=[lat, lon], zoom_start=12) | |
for professional in health_professionals: | |
name = professional['name'] | |
vicinity = professional.get('vicinity', 'N/A') | |
rating = professional.get('rating', 'N/A') | |
folium.Marker([professional['geometry']['location']['lat'], professional['geometry']['location']['lng']], | |
popup=f"{name}\n{vicinity}\nRating: {rating}").add_to(m) | |
route_info += f"- {name} ({rating} stars): {vicinity}\n" | |
else: | |
route_info = "No health professionals found matching your query." | |
m = folium.Map(location=[lat, lon], zoom_start=12) # Default map if no professionals are found | |
except Exception as e: | |
route_info = f"Error: {str(e)}" | |
m = folium.Map(location=[20, 0], zoom_start=2) # Default map if any error occurs | |
return route_info, m._repr_html_() | |
# Gradio interface | |
def gradio_app(message, location, health_query, history): | |
# Chatbot interaction | |
history, _ = chatbot(message, history) | |
# Sentiment analysis | |
sentiment_response = analyze_sentiment(message) | |
# Emotion detection | |
emotion_response = detect_emotion(message) | |
# Health professional search and map display | |
route_info, map_html = get_health_professionals_and_map(location, health_query) | |
return history, sentiment_response, emotion_response, route_info, map_html | |
# Gradio UI components | |
message_input = gr.Textbox(lines=1, label="Message") | |
location_input = gr.Textbox(value="Honolulu, HI", label="Current Location") | |
health_query_input = gr.Textbox(value="doctor", label="Health Professional Query (e.g., doctor, psychiatrist, psychologist)") | |
chat_history = gr.Chatbot(label="Chat History") | |
# Outputs | |
sentiment_output = gr.Textbox(label="Sentiment Analysis Result") | |
emotion_output = gr.Textbox(label="Emotion Detection Result") | |
route_info_output = gr.Textbox(label="Health Professionals Information") | |
map_output = gr.HTML(label="Map with Health Professionals") | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=gradio_app, | |
inputs=[message_input, location_input, health_query_input, "state"], | |
outputs=[chat_history, sentiment_output, emotion_output, route_info_output, map_output], | |
allow_flagging="never", | |
live=True, | |
title="Wellbeing App: Mental Health, Sentiment, Emotion Detection & Health Professional Search" | |
) | |
iface.launch() |