import gradio as gr import nltk import numpy as np import tflearn import random import json import pickle import torch from nltk.tokenize import word_tokenize from nltk.stem.lancaster import LancasterStemmer from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline import requests import pandas as pd # Ensure necessary NLTK resources are downloaded nltk.download('punkt') # Initialize the stemmer stemmer = LancasterStemmer() # Load intents.json try: with open("intents.json") as file: data = json.load(file) except FileNotFoundError: raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.") # Load preprocessed data from pickle try: with open("data.pickle", "rb") as f: words, labels, training, output = pickle.load(f) except FileNotFoundError: raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.") # Build the model structure net = tflearn.input_data(shape=[None, len(training[0])]) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, len(output[0]), activation="softmax") net = tflearn.regression(net) # Load the trained model model = tflearn.DNN(net) try: model.load("MentalHealthChatBotmodel.tflearn") except FileNotFoundError: raise FileNotFoundError("Error: Trained model file 'MentalHealthChatBotmodel.tflearn' not found.") # Function to process user input into a bag-of-words format def bag_of_words(s, words): bag = [0 for _ in range(len(words))] s_words = word_tokenize(s) s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words] for se in s_words: for i, w in enumerate(words): if w == se: bag[i] = 1 return np.array(bag) # Chat function (Chatbot) def chat(message, history): history = history or [] message = message.lower() try: # Predict the tag results = model.predict([bag_of_words(message, words)]) results_index = np.argmax(results) tag = labels[results_index] # Match tag with intent and choose a random response for tg in data["intents"]: if tg['tag'] == tag: responses = tg['responses'] response = random.choice(responses) break else: response = "I'm sorry, I didn't understand that. Could you please rephrase?" except Exception as e: response = f"An error occurred: {str(e)}" history.append((message, response)) return history, history # Sentiment Analysis tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") def analyze_sentiment(user_input): inputs = tokenizer_sentiment(user_input, return_tensors="pt") with torch.no_grad(): outputs = model_sentiment(**inputs) predicted_class = torch.argmax(outputs.logits, dim=1).item() sentiment = ["Negative", "Neutral", "Positive"][predicted_class] return f"**Predicted Sentiment:** {sentiment}" # Emotion Detection tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) def detect_emotion(user_input): result = pipe(user_input) emotion = result[0]['label'] return emotion def provide_suggestions(emotion): suggestions = pd.DataFrame(columns=["Subject", "Article URL", "Video URL"]) if emotion == 'joy': suggestions = suggestions.append({ "Subject": "Relaxation Techniques", "Article URL": "https://www.helpguide.org/mental-health/meditation/mindful-breathing-meditation", "Video URL": "https://youtu.be/m1vaUGtyo-A" }, ignore_index=True) suggestions = suggestions.append({ "Subject": "Dealing with Stress", "Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", "Video URL": "https://youtu.be/MIc299Flibs" }, ignore_index=True) elif emotion == 'anger': suggestions = suggestions.append({ "Subject": "Managing Anger", "Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", "Video URL": "https://youtu.be/MIc299Flibs" }, ignore_index=True) elif emotion == 'fear': suggestions = suggestions.append({ "Subject": "Coping with Anxiety", "Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", "Video URL": "https://youtu.be/yGKKz185M5o" }, ignore_index=True) elif emotion == 'sadness': suggestions = suggestions.append({ "Subject": "Dealing with Sadness", "Article URL": "https://www.helpguide.org/mental-health/anxiety/tips-for-dealing-with-anxiety", "Video URL": "https://youtu.be/-e-4Kx5px_I" }, ignore_index=True) elif emotion == 'surprise': suggestions = suggestions.append({ "Subject": "Managing Stress", "Article URL": "https://www.health.harvard.edu/health-a-to-z", "Video URL": "https://youtu.be/m1vaUGtyo-A" }, ignore_index=True) return suggestions # Google Places API to get nearby wellness professionals api_key = "GOOGLE_API_KEY" # Replace with your API key def get_places_data(query, location, radius, api_key, next_page_token=None): url = "https://maps.googleapis.com/maps/api/place/textsearch/json" params = { "query": query, "location": location, "radius": radius, "key": api_key } if next_page_token: params["pagetoken"] = next_page_token response = requests.get(url, params=params) return response.json() if response.status_code == 200 else None def get_all_places(query, location, radius, api_key): all_results = [] next_page_token = None while True: data = get_places_data(query, location, radius, api_key, next_page_token) if data: results = data.get('results', []) for place in results: place_id = place.get("place_id") name = place.get("name") address = place.get("formatted_address") website = place.get("website", "Not available") all_results.append([name, address, website]) next_page_token = data.get('next_page_token') if not next_page_token: break else: break return all_results def search_wellness_professionals(location): query = "therapist OR counselor OR mental health professional" radius = 50000 google_places_data = get_all_places(query, location, radius, api_key) if google_places_data: df = pd.DataFrame(google_places_data, columns=["Name", "Address", "Website"]) return df else: return pd.DataFrame([["No data found.", "", ""]], columns=["Name", "Address", "Website"]) # Gradio Interface def gradio_interface(message, location, state): history = state or [] # If state is None, initialize it as an empty list # Stage 1: Mental Health Chatbot history, _ = chat(message, history) # Stage 2: Sentiment Analysis sentiment = analyze_sentiment(message) # Stage 3: Emotion Detection and Suggestions emotion = detect_emotion(message) suggestions = provide_suggestions(emotion) # Stage 4: Search for Wellness Professionals wellness_results = search_wellness_professionals(location) # Return the results in a tabular form within the Gradio interface return history, sentiment, emotion, suggestions, wellness_results, history # Last 'history' is for state # Gradio interface setup iface = gr.Interface( fn=gradio_interface, inputs=[ gr.Textbox(label="Enter your message", placeholder="How are you feeling today?"), gr.Textbox(label="Enter your location (e.g., Hawaii, Oahu)", placeholder="Your location"), gr.State() # One state input ], outputs=[ gr.Chatbot(label="Chat History"), gr.Textbox(label="Sentiment Analysis"), gr.Textbox(label="Detected Emotion"), gr.Dataframe(label="Suggestions & Resources"), gr.Dataframe(label="Nearby Wellness Professionals"), # Display results as a table gr.State() # One state output ], allow_flagging="never", title="Mental Wellbeing App with AI Assistance", description="This app provides a mental health chatbot, sentiment analysis, emotion detection, and wellness professional search functionality.", ) # Launch Gradio interface if __name__ == "__main__": iface.launch(debug=True, share=True) # Set share=True to create a public link