import os import gradio as gr import nltk import numpy as np import tflearn import random import json import pickle from nltk.tokenize import word_tokenize from nltk.stem.lancaster import LancasterStemmer from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline import googlemaps import folium import torch import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score # Suppress TensorFlow warnings os.environ["CUDA_VISIBLE_DEVICES"] = "-1" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Download necessary NLTK resources nltk.download("punkt") stemmer = LancasterStemmer() # Load intents and chatbot training data with open("intents.json") as file: intents_data = json.load(file) with open("data.pickle", "rb") as f: words, labels, training, output = pickle.load(f) # Build the chatbot model net = tflearn.input_data(shape=[None, len(training[0])]) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, len(output[0]), activation="softmax") net = tflearn.regression(net) chatbot_model = tflearn.DNN(net) chatbot_model.load("MentalHealthChatBotmodel.tflearn") # Hugging Face sentiment and emotion models tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") # Google Maps API Client gmaps = googlemaps.Client(key=os.getenv("GOOGLE_API_KEY")) # Disease Prediction Code def load_data(): df = pd.read_csv("Training.csv") tr = pd.read_csv("Testing.csv") disease_dict = { # Disease encoding dictionary... } df.replace({'prognosis': disease_dict}, inplace=True) df = df.infer_objects(copy=False) tr.replace({'prognosis': disease_dict}, inplace=True) tr = tr.infer_objects(copy=False) return df, tr, disease_dict df, tr, disease_dict = load_data() l1 = list(df.columns[:-1]) X = df[l1] y = df['prognosis'] X_test = tr[l1] y_test = tr['prognosis'] def train_models(): models = { "Decision Tree": DecisionTreeClassifier(), "Random Forest": RandomForestClassifier(), "Naive Bayes": GaussianNB() } trained_models = {} for model_name, model_obj in models.items(): model_obj.fit(X, y) acc = accuracy_score(y_test, model_obj.predict(X_test)) trained_models[model_name] = (model_obj, acc) return trained_models trained_models = train_models() def predict_disease(model, symptoms): input_test = np.zeros(len(l1)) for symptom in symptoms: if symptom in l1: input_test[l1.index(symptom)] = 1 prediction = model.predict([input_test])[0] return list(disease_dict.keys())[list(disease_dict.values()).index(prediction)] def disease_prediction_interface(symptoms): symptoms_selected = [s for s in symptoms if s != "None"] if len(symptoms_selected) < 3: return "Please select at least 3 symptoms for accurate prediction." results = [] for model_name, (model, acc) in trained_models.items(): prediction = predict_disease(model, symptoms_selected) results.append(f"{model_name} Prediction: Predicted Disease: **{prediction}** (Accuracy: **{acc * 100:.2f}%**)") return results # Helper Functions (for chatbot) def bag_of_words(s, words): bag = [0] * len(words) s_words = word_tokenize(s) s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()] for se in s_words: for i, w in enumerate(words): if w == se: bag[i] = 1 return np.array(bag) def generate_chatbot_response(message, history): history = history or [] try: result = chatbot_model.predict([bag_of_words(message, words)]) tag = labels[np.argmax(result)] response = "I'm sorry, I didn't understand that. 🤔" for intent in intents_data["intents"]: if intent["tag"] == tag: response = random.choice(intent["responses"]) break except Exception as e: response = f"Error: {e}" history.append((message, response)) return history, response def analyze_sentiment(user_input): inputs = tokenizer_sentiment(user_input, return_tensors="pt") with torch.no_grad(): outputs = model_sentiment(**inputs) sentiment_class = torch.argmax(outputs.logits, dim=1).item() sentiment_map = ["Negative 😔", "Neutral 😐", "Positive 😊"] return f"Sentiment: {sentiment_map[sentiment_class]}" def detect_emotion(user_input): pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) result = pipe(user_input) emotion = result[0]["label"].lower().strip() emotion_map = { "joy": "Joy 😊", "anger": "Anger 😠", "sadness": "Sadness 😢", "fear": "Fear 😨", "surprise": "Surprise 😲", "neutral": "Neutral 😐", } return emotion_map.get(emotion, "Unknown 🤔"), emotion def generate_suggestions(emotion): emotion_key = emotion.lower() suggestions = { # Suggestions based on emotion... } formatted_suggestions = [ [title, f'{link}'] for title, link in suggestions.get(emotion_key, [["No specific suggestions available.", "#"]]) ] return formatted_suggestions def get_health_professionals_and_map(location, query): try: if not location or not query: return [], "" geo_location = gmaps.geocode(location) if geo_location: lat, lng = geo_location[0]["geometry"]["location"].values() places_result = gmaps.places_nearby(location=(lat, lng), radius=10000, keyword=query)["results"] professionals = [] map_ = folium.Map(location=(lat, lng), zoom_start=13) for place in places_result: professionals.append([place['name'], place.get('vicinity', 'No address provided')]) folium.Marker( location=[place["geometry"]["location"]["lat"], place["geometry"]["location"]["lng"]], popup=f"{place['name']}" ).add_to(map_) return professionals, map_._repr_html_() return [], "" except Exception as e: return [], "" # Main Application Logic def app_function(user_input, location, query, symptoms, history): chatbot_history, _ = generate_chatbot_response(user_input, history) sentiment_result = analyze_sentiment(user_input) emotion_result, cleaned_emotion = detect_emotion(user_input) suggestions = generate_suggestions(cleaned_emotion) professionals, map_html = get_health_professionals_and_map(location, query) disease_results = disease_prediction_interface(symptoms) return ( chatbot_history, sentiment_result, emotion_result, suggestions, professionals, map_html, disease_results ) # CSS Styling custom_css = """ body { font-family: 'Roboto', sans-serif; background-color: #3c6487; color: white; } h1 { background: #ffffff; color: #000000; border-radius: 8px; padding: 10px; font-weight: bold; text-align: center; font-size: 2.5rem; } textarea, input { background: transparent; color: black; border: 2px solid orange; padding: 8px; font-size: 1rem; caret-color: black; outline: none; border-radius: 8px; } textarea:focus, input:focus { background: transparent; color: black; border: 2px solid orange; outline: none; } .df-container { background: white; color: black; border: 2px solid orange; border-radius: 10px; padding: 10px; font-size: 14px; max-height: 400px; height: auto; overflow-y: auto; } #suggestions-title { text-align: center !important; font-weight: bold !important; color: white !important; font-size: 4.2rem !important; margin-bottom: 20px !important; } .gr-button { background-color: #ae1c93; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1), 0 2px 4px rgba(0, 0, 0, 0.06); transition: background-color 0.3s ease; } .gr-button:hover { background-color: #8f167b; } .gr-button:active { background-color: #7f156b; } """ # Gradio Application with gr.Blocks(css=custom_css) as app: gr.HTML("