Spaces:
Sleeping
Sleeping
from fastai.text.all import * | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, BlenderbotForConditionalGeneration, BlenderbotTokenizer | |
import torch | |
import gradio as gr | |
# Load the medical model | |
medical_learn = load_learner('model.pkl') | |
# Medical model configuration | |
medical_description = "Medical Diagnosis" | |
medical_categories = ['Allergy', 'Anemia', 'Bronchitis', 'Diabetes', 'Diarrhea', 'Fatigue', 'Flu', 'Malaria', 'Stress'] | |
def classify_medical_text(txt): | |
pred, idx, probs = medical_learn.predict(txt) | |
return dict(zip(medical_categories, map(float, probs))) | |
# Load the psychiatric model from Hugging Face | |
psychiatric_model_name = "nlp4good/psych-search" # Replace with the appropriate model | |
psychiatric_tokenizer = AutoTokenizer.from_pretrained(psychiatric_model_name) | |
psychiatric_model = AutoModelForSequenceClassification.from_pretrained(psychiatric_model_name) | |
# Psychiatric model configuration | |
psychiatric_description = "Psychiatric Analysis" | |
psychiatric_labels = ['Depression', 'Anxiety', 'Bipolar Disorder', 'PTSD', 'OCD', 'Stress', 'Schizophrenia'] # Adjust based on the model | |
def classify_psychiatric_text(txt): | |
inputs = psychiatric_tokenizer(txt, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = psychiatric_model(**inputs) | |
logits = outputs.logits | |
probabilities = torch.softmax(logits, dim=1).squeeze().tolist() | |
return dict(zip(psychiatric_labels, probabilities)) | |
# Load BlenderBot for Lifestyle and Nutrition Chatbot | |
blender_model_name = "facebook/blenderbot-3B" # Pre-trained BlenderBot 3B model | |
blender_tokenizer = BlenderbotTokenizer.from_pretrained(blender_model_name) | |
blender_model = BlenderbotForConditionalGeneration.from_pretrained(blender_model_name) | |
# Chat function for Lifestyle and Nutrition | |
chat_history = [] | |
def chatbot_response(user_input): | |
global chat_history | |
new_input_ids = blender_tokenizer.encode(user_input + blender_tokenizer.eos_token, return_tensors='pt') | |
bot_input_ids = torch.cat([chat_history, new_input_ids], dim=-1) if chat_history else new_input_ids | |
chat_history = blender_model.generate(bot_input_ids, max_length=1000, pad_token_id=blender_tokenizer.eos_token_id) | |
response = blender_tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) | |
return response | |
def clear_chat(): | |
global chat_history | |
chat_history = [] | |
return [] | |
# Gradio Interfaces | |
medical_text = gr.Textbox(lines=2, label='Describe your symptoms in detail') | |
medical_label = gr.Label() | |
medical_examples = ['I feel short of breath and have a high fever.', 'My throat hurts and I keep sneezing.', 'I am always thirsty.'] | |
psychiatric_text = gr.Textbox(lines=2, label='Describe your mental health concerns in detail') | |
psychiatric_label = gr.Label() | |
psychiatric_examples = ['I feel hopeless and have no energy.', 'I am unable to concentrate and feel anxious all the time.', 'I have recurring intrusive thoughts.'] | |
lifestyle_chatbot = gr.Chatbot(label="Chat with me about diet and nutrition!") | |
lifestyle_msg = gr.Textbox(placeholder="Ask your question here...", label="Your Question") | |
lifestyle_clear = gr.Button("Clear Chat") | |
def user_message(input_text): | |
if not input_text.strip(): | |
return lifestyle_chatbot, "Please enter a question." | |
response = chatbot_response(input_text) | |
lifestyle_chatbot.append((input_text, response)) | |
return lifestyle_chatbot, "" | |
# Lifestyle & Nutrition Interface | |
lifestyle_interface = gr.Interface( | |
fn=user_message, | |
inputs=[lifestyle_msg], | |
outputs=[lifestyle_chatbot, lifestyle_msg], | |
live=True, | |
title="Nutritionist Chatbot", | |
description="Ask me anything about diet, food, and nutrition!" | |
) | |
# Medical Diagnosis Interface | |
medical_interface = gr.Interface( | |
fn=classify_medical_text, | |
inputs=medical_text, | |
outputs=medical_label, | |
examples=medical_examples, | |
description=medical_description, | |
) | |
# Psychiatric Analysis Interface | |
psychiatric_interface = gr.Interface( | |
fn=classify_psychiatric_text, | |
inputs=psychiatric_text, | |
outputs=psychiatric_label, | |
examples=psychiatric_examples, | |
description=psychiatric_description, | |
) | |
# Combine interfaces using Tabs | |
app = gr.TabbedInterface( | |
[medical_interface, psychiatric_interface, lifestyle_interface], | |
["Medical Diagnosis", "Psychiatric Analysis", "Lifestyle & Nutrition Chat"] | |
) | |
app.launch(inline=False) | |