medical_advice / app.py
Filip
update
a1798f7
import gradio as gr
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
# Model configuration
REPO_ID = "forestav/medical_model"
MODEL_FILE = "unsloth.F16.gguf"
def download_model():
"""
Download the model from Hugging Face Hub if not already present
"""
try:
model_path = hf_hub_download(
repo_id=REPO_ID,
filename=MODEL_FILE,
)
return model_path
except Exception as e:
print(f"Error downloading model: {e}")
return None
def load_model(model_path):
"""
Load the GGUF model using llama_cpp
"""
try:
model = Llama(
model_path=model_path,
n_ctx=4096, # Adjust context window as needed
n_batch=512, # Batch size for prompt processing
verbose=False # Set to True for detailed loading info
)
return model
except Exception as e:
print(f"Error loading model: {e}")
return None
def generate_medical_response(model, prompt, max_tokens=300):
"""
Generate a medical advice response using the loaded model
"""
try:
# Generate response
output = model.create_chat_completion(
messages=[
{"role": "system", "content": "You are a professional medical assistant. If you don't have an answer, say I don't know."},
{"role": "user", "content": prompt}
],
max_tokens=max_tokens,
temperature=1.5,
min_p=0.1,
)
# Extract and return the response text
return output['choices'][0]['message']['content']
except Exception as e:
return f"An error occurred while generating a response: {e}"
def medical_chatbot_interface(message, history):
"""
Gradio interface function for the medical chatbot
"""
# Ensure model is loaded
if not hasattr(medical_chatbot_interface, 'model'):
model_path = download_model()
if not model_path:
return "Failed to download model"
medical_chatbot_interface.model = load_model(model_path)
if not medical_chatbot_interface.model:
return "Failed to load model"
# Generate response
response = generate_medical_response(medical_chatbot_interface.model, message)
return response
# Create Gradio interface with modern, professional medical-themed UI
def create_medical_chatbot_ui():
# Modern, professional medical-themed CSS
modern_medical_css = """
:root {
--primary-color: #2c7da0;
--secondary-color: #468faf;
--background-color: #f8fbfd;
--text-color: #333;
--card-background: #ffffff;
}
body {
font-family: 'Inter', 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background-color: var(--background-color);
color: var(--text-color);
line-height: 1.6;
}
.gradio-container {
background-color: var(--background-color);
max-width: 800px;
margin: 0 auto;
padding: 20px;
border-radius: 12px;
box-shadow: 0 10px 25px rgba(0, 0, 0, 0.05);
}
.chatbot-container {
background-color: var(--card-background);
border-radius: 12px;
border: 1px solid rgba(44, 125, 160, 0.1);
overflow: hidden;
}
.message-input {
border: 2px solid var(--primary-color);
border-radius: 8px;
padding: 12px;
font-size: 16px;
transition: all 0.3s ease;
}
.message-input:focus {
outline: none;
border-color: var(--secondary-color);
box-shadow: 0 0 0 3px rgba(44, 125, 160, 0.1);
}
.submit-button {
background-color: var(--primary-color);
color: white;
border: none;
border-radius: 8px;
padding: 12px 20px;
font-weight: 600;
transition: all 0.3s ease;
}
.submit-button:hover {
background-color: var(--secondary-color);
}
/* Chat message styling */
.message {
max-width: 80%;
margin: 10px 0;
padding: 12px 16px;
border-radius: 12px;
line-height: 1.5;
}
.user-message {
background-color: var(--primary-color);
color: white;
align-self: flex-end;
margin-left: auto;
}
.bot-message {
background-color: #f0f4f8;
color: var(--text-color);
align-self: flex-start;
}
"""
# Create Gradio interface with modern design
demo = gr.ChatInterface(
fn=medical_chatbot_interface,
title="🩺 MediAssist: AI Health Companion",
description="Get professional medical insights and guidance. Always consult a healthcare professional for personalized medical advice. 🌡️",
theme='soft',
css=modern_medical_css
)
return demo
# Launch the app
if __name__ == "__main__":
# Create and launch the Gradio app
medical_chatbot = create_medical_chatbot_ui()
medical_chatbot.launch(
server_name="0.0.0.0", # Make accessible outside the local machine
server_port=7860,
share=False # Generate a public link
)