import os import gradio as gr import nltk import numpy as np import tflearn import random import json import pickle from nltk.tokenize import word_tokenize from nltk.stem.lancaster import LancasterStemmer from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline import googlemaps import folium import torch # Suppress TensorFlow GPU usage and warnings os.environ["CUDA_VISIBLE_DEVICES"] = "-1" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Download necessary NLTK resources nltk.download("punkt") stemmer = LancasterStemmer() # Load chatbot training data with open("intents.json") as file: intents_data = json.load(file) with open("data.pickle", "rb") as f: words, labels, training, output = pickle.load(f) # Build the Chatbot Model net = tflearn.input_data(shape=[None, len(training[0])]) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, len(output[0]), activation="softmax") net = tflearn.regression(net) chatbot_model = tflearn.DNN(net) chatbot_model.load("MentalHealthChatBotmodel.tflearn") # Sentiment and Emotion Detection Models tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") # Google Maps API Client gmaps = googlemaps.Client(key=os.getenv("GOOGLE_API_KEY")) # Helper Functions def bag_of_words(s, words): """Convert user input to bag-of-words vector.""" bag = [0] * len(words) s_words = word_tokenize(s) s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()] for se in s_words: for i, w in enumerate(words): if w == se: bag[i] = 1 return np.array(bag) def chatbot(message, history): """Generate chatbot response and append to chat history.""" history = history or [] try: result = chatbot_model.predict([bag_of_words(message, words)]) tag = labels[np.argmax(result)] response = "I'm not sure how to respond to that. 🤔" for intent in intents_data["intents"]: if intent["tag"] == tag: response = random.choice(intent["responses"]) break except Exception as e: response = f"Error: {e}" history.append((message, response)) return history, response def analyze_sentiment(user_input): """Analyze sentiment from user input.""" inputs = tokenizer_sentiment(user_input, return_tensors="pt") with torch.no_grad(): outputs = model_sentiment(**inputs) sentiment_class = torch.argmax(outputs.logits, dim=1).item() sentiment_map = ["Negative 😔", "Neutral 😐", "Positive 😊"] return sentiment_map[sentiment_class] def detect_emotion(user_input): """Detect user emotion with emoji representation.""" pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) result = pipe(user_input) emotion = result[0]["label"].lower() emotion_map = { "joy": "😊 Joy", "anger": "😠 Anger", "sadness": "😢 Sadness", "fear": "😨 Fear", "surprise": "😲 Surprise", "neutral": "😐 Neutral", } return emotion_map.get(emotion, "Unknown 🤔") def generate_suggestions(emotion): """Provide suggestions for the detected emotion.""" suggestions = { "joy": [ ["Relaxation Techniques", 'Visit'], ["Dealing with Stress", 'Visit'], ["Emotional Wellness Toolkit", 'Visit'], ["Relaxation Video", 'Watch'], ], "anger": [ ["Stress Management Tips", 'Visit'], ["Relaxation Video", 'Watch'], ], "fear": [ ["Coping with Anxiety", 'Visit'], ["Mindfulness Practices", 'Watch'], ], "sadness": [ ["Overcoming Sadness", 'Watch'], ], "surprise": [ ["Managing Surprises", 'Visit'], ["Calm Relaxation", 'Watch'], ], } return suggestions.get(emotion.lower(), [["No suggestions are available.", ""]]) def get_health_professionals_and_map(location, query): """Search for nearby healthcare professionals and generate a map.""" try: if not location or not query: return ["Please provide a valid location and query."], "" geo_location = gmaps.geocode(location) if geo_location: lat, lng = geo_location[0]["geometry"]["location"].values() places_result = gmaps.places_nearby(location=(lat, lng), radius=10000, keyword=query)["results"] professionals = [] map_ = folium.Map(location=(lat, lng), zoom_start=13) for place in places_result: professionals.append(f"{place['name']} - {place.get('vicinity', 'No address available')}") folium.Marker( location=[place["geometry"]["location"]["lat"], place["geometry"]["location"]["lng"]], popup=f"{place['name']}" ).add_to(map_) return professionals, map_._repr_html_() return ["No professionals found for the given location."], "" except Exception as e: return [f"An error occurred: {str(e)}"], "" # Application Logic def app_function(user_message, location, query, history): chatbot_history, _ = chatbot(user_message, history) sentiment = analyze_sentiment(user_message) emotion = detect_emotion(user_message) suggestions = generate_suggestions(emotion) professionals, map_html = get_health_professionals_and_map(location, query) return chatbot_history, sentiment, emotion, suggestions, professionals, map_html # CSS Styling for Centered and Bigger Titles custom_css = """ body { background: linear-gradient(135deg, #000000, #ff5722); font-family: 'Roboto', sans-serif; color: white; } h1 { font-size: 4rem; font-weight: bold; text-align: center; margin-bottom: 10px; text-shadow: 3px 3px 8px rgba(0, 0, 0, 0.7); } h3 { font-size: 2rem; text-align: center; margin-bottom: 40px; font-weight: lighter; color: white; } button { background: linear-gradient(45deg, #ff5722, #ff9800) !important; border: none; padding: 12px 20px; font-size: 16px; border-radius: 8px; color: white; cursor: pointer; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.3); } textarea, input { background: black !important; color: white !important; border: 1px solid #ff5722; border-radius: 8px; } """ # Gradio Application with gr.Blocks(css=custom_css) as app: gr.HTML("