Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import nltk | |
import numpy as np | |
import tflearn | |
import random | |
import json | |
import pickle | |
from nltk.tokenize import word_tokenize | |
from nltk.stem.lancaster import LancasterStemmer | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
import googlemaps | |
import folium | |
import torch | |
import pandas as pd | |
from sklearn.tree import DecisionTreeClassifier | |
from sklearn.ensemble import RandomForestClassifier | |
from sklearn.naive_bayes import GaussianNB | |
from sklearn.metrics import accuracy_score | |
# Suppress TensorFlow warnings | |
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" | |
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" | |
# Download necessary NLTK resources | |
nltk.download("punkt") | |
stemmer = LancasterStemmer() | |
# Load intents and chatbot training data | |
with open("intents.json") as file: | |
intents_data = json.load(file) | |
with open("data.pickle", "rb") as f: | |
words, labels, training, output = pickle.load(f) | |
# Build the chatbot model | |
net = tflearn.input_data(shape=[None, len(training[0])]) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, len(output[0]), activation="softmax") | |
net = tflearn.regression(net) | |
chatbot_model = tflearn.DNN(net) | |
chatbot_model.load("MentalHealthChatBotmodel.tflearn") | |
# Hugging Face sentiment and emotion models | |
tokenizer_sentiment = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
model_sentiment = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
tokenizer_emotion = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
model_emotion = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
# Google Maps API Client | |
gmaps = googlemaps.Client(key=os.getenv("GOOGLE_API_KEY")) | |
# Disease Prediction Code | |
def load_data(): | |
try: | |
df = pd.read_csv("Training.csv") | |
tr = pd.read_csv("Testing.csv") | |
except FileNotFoundError: | |
raise RuntimeError("Data files not found. Please ensure `Training.csv` and `Testing.csv` are uploaded correctly.") | |
disease_dict = { | |
'Fungal infection': 0, 'Allergy': 1, 'GERD': 2, 'Chronic cholestasis': 3, 'Drug Reaction': 4, | |
'Peptic ulcer diseae': 5, 'AIDS': 6, 'Diabetes': 7, 'Gastroenteritis': 8, 'Bronchial Asthma': 9, | |
'Hypertension': 10, 'Migraine': 11, 'Cervical spondylosis': 12, 'Paralysis': 13, | |
'Jaundice': 14, 'Malaria': 15, 'Chicken pox': 16, 'Dengue': 17, 'Typhoid': 18, | |
'Hepatitis A': 19, 'Hepatitis B': 20, 'Hepatitis C': 21, 'Hepatitis D': 22, 'Hepatitis E': 23, | |
'Alcoholic hepatitis': 24, 'Tuberculosis': 25, 'Common Cold': 26, 'Pneumonia': 27, | |
'Heart attack': 28, 'Varicose veins': 29, 'Hypothyroidism': 30, 'Hyperthyroidism': 31, | |
'Hypoglycemia': 32, 'Osteoarthritis': 33, 'Arthritis': 34 | |
} | |
df.replace({'prognosis': disease_dict}, inplace=True) | |
df = df.infer_objects(copy=False) | |
tr.replace({'prognosis': disease_dict}, inplace=True) | |
tr = tr.infer_objects(copy=False) | |
return df, tr, disease_dict | |
df, tr, disease_dict = load_data() | |
l1 = list(df.columns[:-1]) | |
X = df[l1] | |
y = df['prognosis'] | |
X_test = tr[l1] | |
y_test = tr['prognosis'] | |
def train_models(): | |
models = { | |
"Decision Tree": DecisionTreeClassifier(), | |
"Random Forest": RandomForestClassifier(), | |
"Naive Bayes": GaussianNB() | |
} | |
trained_models = {} | |
for model_name, model_obj in models.items(): | |
model_obj.fit(X, y) | |
acc = accuracy_score(y_test, model_obj.predict(X_test)) | |
trained_models[model_name] = (model_obj, acc) | |
return trained_models | |
trained_models = train_models() | |
def predict_disease(model, symptoms): | |
input_test = np.zeros(len(l1)) | |
for symptom in symptoms: | |
if symptom in l1: | |
input_test[l1.index(symptom)] = 1 | |
prediction = model.predict([input_test])[0] | |
confidence = model.predict_proba([input_test])[0][prediction] if hasattr(model, 'predict_proba') else None | |
return { | |
"disease": list(disease_dict.keys())[list(disease_dict.values()).index(prediction)], | |
"confidence": confidence | |
} | |
def disease_prediction_interface(symptoms): | |
symptoms_selected = [s for s in symptoms if s != "None"] | |
if len(symptoms_selected) < 3: | |
return ["Please select at least 3 symptoms for accurate prediction."] | |
results = [] | |
for model_name, (model, acc) in trained_models.items(): | |
prediction_info = predict_disease(model, symptoms_selected) | |
predicted_disease = prediction_info["disease"] | |
confidence_score = prediction_info["confidence"] | |
result = f"{model_name} Prediction: Predicted Disease: **{predicted_disease}**" | |
if confidence_score is not None: | |
result += f" (Confidence: {confidence_score:.2f})" | |
result += f" (Accuracy: {acc * 100:.2f}%)" | |
results.append(result) | |
return results | |
# Helper Functions (for chatbot) | |
def bag_of_words(s, words): | |
bag = [0] * len(words) | |
s_words = word_tokenize(s) | |
s_words = [stemmer.stem(word.lower()) for word in s_words if word.isalnum()] | |
for se in s_words: | |
for i, w in enumerate(words): | |
if w == se: | |
bag[i] = 1 | |
return np.array(bag) | |
def generate_chatbot_response(message, history): | |
history = history or [] | |
try: | |
result = chatbot_model.predict([bag_of_words(message, words)]) | |
tag = labels[np.argmax(result)] | |
response = next((random.choice(intent["responses"]) for intent in intents_data["intents"] if intent["tag"] == tag), "I'm sorry, I didn't understand that. π€") | |
except Exception as e: | |
response = f"Error: {e}" | |
history.append((message, response)) | |
return history, response | |
def analyze_sentiment(user_input): | |
inputs = tokenizer_sentiment(user_input, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = model_sentiment(**inputs) | |
sentiment_class = torch.argmax(outputs.logits, dim=1).item() | |
sentiment_map = ["Negative π", "Neutral π", "Positive π"] | |
return f"Sentiment: {sentiment_map[sentiment_class]}" | |
def detect_emotion(user_input): | |
pipe = pipeline("text-classification", model=model_emotion, tokenizer=tokenizer_emotion) | |
result = pipe(user_input) | |
emotion = result[0]["label"].lower().strip() | |
emotion_map = { | |
"joy": "Joy π", | |
"anger": "Anger π ", | |
"sadness": "Sadness π’", | |
"fear": "Fear π¨", | |
"surprise": "Surprise π²", | |
"neutral": "Neutral π", | |
} | |
return emotion_map.get(emotion, "Unknown π€"), emotion | |
def generate_suggestions(emotion): | |
emotion_key = emotion.lower() | |
suggestions = { | |
# Replace with appropriate suggestions for each emotion | |
} | |
formatted_suggestions = [ | |
[title, f'<a href="{link}" target="_blank">{link}</a>'] for title, link in suggestions.get(emotion_key, [["No specific suggestions available.", "#"]]) | |
] | |
return formatted_suggestions | |
def get_health_professionals_and_map(location, query): | |
try: | |
if not location or not query: | |
return [], "" | |
geo_location = gmaps.geocode(location) | |
if geo_location: | |
lat, lng = geo_location[0]["geometry"]["location"].values() | |
places_result = gmaps.places_nearby(location=(lat, lng), radius=10000, keyword=query)["results"] | |
professionals = [] | |
map_ = folium.Map(location=(lat, lng), zoom_start=13) | |
for place in places_result: | |
professionals.append([place['name'], place.get('vicinity', 'No address provided')]) | |
folium.Marker( | |
location=[place["geometry"]["location"]["lat"], place["geometry"]["location"]["lng"]], | |
popup=f"{place['name']}" | |
).add_to(map_) | |
return professionals, map_._repr_html_() | |
return [], "" | |
except Exception as e: | |
return [], "" | |
# Main Application Logic | |
def app_function(user_input, location, query, symptoms, history): | |
chatbot_history, _ = generate_chatbot_response(user_input, history) | |
sentiment_result = analyze_sentiment(user_input) | |
emotion_result, cleaned_emotion = detect_emotion(user_input) | |
suggestions = generate_suggestions(cleaned_emotion) | |
professionals, map_html = get_health_professionals_and_map(location, query) | |
disease_results = disease_prediction_interface(symptoms) | |
return ( | |
chatbot_history, | |
sentiment_result, | |
emotion_result, | |
suggestions, | |
professionals, | |
map_html, | |
disease_results | |
) | |
# CSS Styling | |
custom_css = """ | |
body { | |
font-family: 'Roboto', sans-serif; | |
background-color: #3c6487; | |
color: white; | |
} | |
h1 { | |
background: #ffffff; | |
color: #000000; | |
border-radius: 8px; | |
padding: 10px; | |
font-weight: bold; | |
text-align: center; | |
font-size: 2.5rem; | |
} | |
textarea, input { | |
background: transparent; | |
color: black; | |
border: 2px solid orange; | |
padding: 8px; | |
font-size: 1rem; | |
caret-color: black; | |
outline: none; | |
border-radius: 8px; | |
} | |
textarea:focus, input:focus { | |
background: transparent; | |
color: black; | |
border: 2px solid orange; | |
outline: none; | |
} | |
.df-container { | |
background: white; | |
color: black; | |
border: 2px solid orange; | |
border-radius: 10px; | |
padding: 10px; | |
font-size: 14px; | |
max-height: 400px; | |
height: auto; | |
overflow-y: auto; | |
} | |
#suggestions-title { | |
text-align: center !important; | |
font-weight: bold !important; | |
color: white !important; | |
font-size: 4.2rem !important; | |
margin-bottom: 20px !important; | |
} | |
.gr-button { | |
background-color: #ae1c93; | |
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1), 0 2px 4px rgba(0, 0, 0, 0.06); | |
transition: background-color 0.3s ease; | |
} | |
.gr-button:hover { | |
background-color: #8f167b; | |
} | |
.gr-button:active { | |
background-color: #7f156b; | |
} | |
""" | |
# Gradio Application | |
with gr.Blocks(css=custom_css) as app: | |
gr.HTML("<h1>π Well-Being Companion</h1>") | |
with gr.Row(): | |
user_input = gr.Textbox(label="Please Enter Your Message Here") | |
location = gr.Textbox(label="Your Current Location Here") | |
query = gr.Textbox(label="Search Health Professionals Nearby") | |
with gr.Row(): | |
symptom1 = gr.Dropdown(choices=["None"] + l1, label="Symptom 1") | |
symptom2 = gr.Dropdown(choices=["None"] + l1, label="Symptom 2") | |
symptom3 = gr.Dropdown(choices=["None"] + l1, label="Symptom 3") | |
symptom4 = gr.Dropdown(choices=["None"] + l1, label="Symptom 4") | |
symptom5 = gr.Dropdown(choices=["None"] + l1, label="Symptom 5") | |
submit = gr.Button(value="Submit", variant="primary") | |
chatbot = gr.Chatbot(label="Chat History") | |
sentiment = gr.Textbox(label="Detected Sentiment") | |
emotion = gr.Textbox(label="Detected Emotion") | |
gr.Markdown("Suggestions", elem_id="suggestions-title") | |
suggestions = gr.DataFrame(headers=["Title", "Link"]) # Suggestions DataFrame | |
professionals = gr.DataFrame(label="Nearby Health Professionals", headers=["Name", "Address"]) # Professionals DataFrame | |
map_html = gr.HTML(label="Interactive Map") | |
disease_predictions = gr.Textbox(label="Disease Predictions") # For Disease Prediction Results | |
submit.click( | |
app_function, | |
inputs=[user_input, location, query, [symptom1, symptom2, symptom3, symptom4, symptom5], chatbot], | |
outputs=[chatbot, sentiment, emotion, suggestions, professionals, map_html, disease_predictions], | |
) | |
app.launch() |