|
|
|
import gradio as gr |
|
import torch |
|
from transformers import AutoProcessor, AutoModelForImageTextToText, pipeline |
|
from PIL import Image |
|
import os |
|
import logging |
|
from huggingface_hub import login |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def authenticate_hf(): |
|
"""Authenticate with Hugging Face using token""" |
|
try: |
|
hf_token = os.getenv('HF_TOKEN') |
|
if hf_token: |
|
login(token=hf_token) |
|
logger.info("β
Authenticated with Hugging Face") |
|
return True |
|
else: |
|
logger.warning("β οΈ No HF_TOKEN found in environment") |
|
return False |
|
except Exception as e: |
|
logger.error(f"β Authentication failed: {e}") |
|
return False |
|
|
|
|
|
MODEL_ID = "google/medgemma-4b-it" |
|
|
|
|
|
model = None |
|
processor = None |
|
pipeline_model = None |
|
|
|
def load_model(): |
|
"""Load MedGemma model using the recommended approach""" |
|
global model, processor, pipeline_model |
|
|
|
try: |
|
|
|
auth_success = authenticate_hf() |
|
if not auth_success: |
|
logger.error("β Authentication required for MedGemma") |
|
return False |
|
|
|
logger.info(f"Loading MedGemma: {MODEL_ID}") |
|
|
|
|
|
try: |
|
logger.info("Attempting to load using pipeline...") |
|
pipeline_model = pipeline( |
|
"image-text-to-text", |
|
model=MODEL_ID, |
|
torch_dtype=torch.float32, |
|
device_map="auto" if torch.cuda.is_available() else None, |
|
trust_remote_code=True |
|
) |
|
logger.info("β
Pipeline model loaded successfully!") |
|
return True |
|
except Exception as e: |
|
logger.warning(f"Pipeline loading failed: {e}") |
|
|
|
|
|
logger.info("Attempting direct model loading...") |
|
|
|
|
|
processor = AutoProcessor.from_pretrained( |
|
MODEL_ID, |
|
trust_remote_code=True, |
|
token=True |
|
) |
|
logger.info("β
Processor loaded") |
|
|
|
|
|
model = AutoModelForImageTextToText.from_pretrained( |
|
MODEL_ID, |
|
torch_dtype=torch.float32, |
|
device_map="auto" if torch.cuda.is_available() else None, |
|
trust_remote_code=True, |
|
token=True |
|
) |
|
logger.info("β
Model loaded successfully!") |
|
return True |
|
|
|
except Exception as e: |
|
logger.error(f"β Error loading model: {str(e)}") |
|
import traceback |
|
logger.error(f"Full traceback: {traceback.format_exc()}") |
|
return False |
|
|
|
|
|
model_loaded = load_model() |
|
|
|
def analyze_medical_image(image, clinical_question, patient_history=""): |
|
"""Analyze medical image with clinical context""" |
|
global model, processor, pipeline_model |
|
|
|
|
|
if not model_loaded: |
|
return """β **Model Loading Issue** |
|
|
|
MedGemma failed to load. This is likely due to: |
|
|
|
1. **Transformers version**: Make sure you're using transformers >= 4.52.0 |
|
2. **Authentication**: Ensure HF_TOKEN is properly set |
|
3. **Model compatibility**: MedGemma requires the latest transformers library |
|
|
|
**Status**: Model loading failed. Please try refreshing the page or contact support.""" |
|
|
|
if image is None: |
|
return "β οΈ Please upload a medical image first." |
|
|
|
if not clinical_question.strip(): |
|
return "β οΈ Please provide a clinical question." |
|
|
|
try: |
|
|
|
if pipeline_model is not None: |
|
logger.info("Using pipeline for analysis...") |
|
|
|
|
|
messages = [ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{"type": "image", "image": image}, |
|
{"type": "text", "text": f"Patient History: {patient_history}\n\nClinical Question: {clinical_question}\n\nAs MedGemma, provide a detailed medical analysis of this image for educational purposes only."} |
|
] |
|
} |
|
] |
|
|
|
|
|
result = pipeline_model(messages, max_new_tokens=1000) |
|
|
|
|
|
response = result[0]['generated_text'] if isinstance(result, list) else result['generated_text'] |
|
|
|
|
|
elif model is not None and processor is not None: |
|
logger.info("Using direct model for analysis...") |
|
|
|
|
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": [{"type": "text", "text": "You are MedGemma, an expert medical AI assistant. Provide detailed medical analysis for educational purposes only."}] |
|
}, |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{"type": "text", "text": f"Patient History: {patient_history}\n\nClinical Question: {clinical_question}"}, |
|
{"type": "image", "image": image} |
|
] |
|
} |
|
] |
|
|
|
|
|
inputs = processor.apply_chat_template( |
|
messages, |
|
add_generation_prompt=True, |
|
tokenize=True, |
|
return_dict=True, |
|
return_tensors="pt" |
|
) |
|
|
|
|
|
with torch.inference_mode(): |
|
outputs = model.generate( |
|
**inputs, |
|
max_new_tokens=1000, |
|
do_sample=True, |
|
temperature=0.3, |
|
top_p=0.9 |
|
) |
|
|
|
|
|
response = processor.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True) |
|
|
|
else: |
|
return "β No model available for analysis. Please try refreshing the page." |
|
|
|
|
|
response = response.strip() |
|
|
|
|
|
disclaimer = """ |
|
|
|
--- |
|
### β οΈ MEDICAL DISCLAIMER |
|
**This analysis is for educational and research purposes only.** |
|
- This AI assistant is not a substitute for professional medical advice |
|
- Always consult qualified healthcare professionals for diagnosis and treatment |
|
- Do not make medical decisions based solely on this analysis |
|
- In case of medical emergency, contact emergency services immediately |
|
--- |
|
""" |
|
|
|
logger.info("β
Analysis completed successfully") |
|
return response + disclaimer |
|
|
|
except Exception as e: |
|
logger.error(f"β Error in analysis: {str(e)}") |
|
import traceback |
|
logger.error(f"Full traceback: {traceback.format_exc()}") |
|
return f"β Analysis failed: {str(e)}\n\nPlease try again with a different image or question." |
|
|
|
|
|
def create_interface(): |
|
with gr.Blocks( |
|
title="MedGemma Medical Analysis", |
|
theme=gr.themes.Soft(), |
|
css=""" |
|
.gradio-container { max-width: 1200px !important; } |
|
.disclaimer { background-color: #fef2f2; border: 1px solid #fecaca; border-radius: 8px; padding: 16px; margin: 16px 0; } |
|
.success { background-color: #f0f9ff; border: 1px solid #bae6fd; border-radius: 8px; padding: 16px; margin: 16px 0; } |
|
.warning { background-color: #fffbeb; border: 1px solid #fed7aa; border-radius: 8px; padding: 16px; margin: 16px 0; } |
|
""" |
|
) as demo: |
|
|
|
|
|
gr.Markdown(""" |
|
# π₯ MedGemma Medical Image Analysis |
|
|
|
**Advanced Medical AI Assistant powered by Google's MedGemma-4B** |
|
|
|
Specialized in medical imaging across multiple modalities: |
|
π« **Radiology** β’ π¬ **Histopathology** β’ ποΈ **Ophthalmology** β’ π©Ί **Dermatology** |
|
""") |
|
|
|
|
|
if model_loaded: |
|
method = "Pipeline" if pipeline_model else "Direct Model" |
|
gr.Markdown(f""" |
|
<div class="success"> |
|
β
<strong>MEDGEMMA READY</strong><br> |
|
Model loaded successfully using {method} method. Ready for medical image analysis. |
|
</div> |
|
""") |
|
else: |
|
gr.Markdown(""" |
|
<div class="warning"> |
|
β οΈ <strong>MODEL LOADING FAILED</strong><br> |
|
MedGemma failed to load. Please ensure you have the latest transformers library and proper authentication. |
|
</div> |
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
<div class="disclaimer"> |
|
β οΈ <strong>IMPORTANT MEDICAL DISCLAIMER</strong><br> |
|
This tool is for <strong>educational and research purposes only</strong>. |
|
Do not upload real patient data. Always consult qualified healthcare professionals. |
|
</div> |
|
""") |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(scale=1): |
|
gr.Markdown("## π€ Medical Image Upload") |
|
|
|
image_input = gr.Image( |
|
label="Medical Image", |
|
type="pil", |
|
height=300 |
|
) |
|
|
|
clinical_question = gr.Textbox( |
|
label="Clinical Question *", |
|
placeholder="Examples:\nβ’ Describe findings in this chest X-ray\nβ’ What pathological changes are visible?\nβ’ Provide differential diagnosis\nβ’ Identify abnormalities", |
|
lines=4 |
|
) |
|
|
|
patient_history = gr.Textbox( |
|
label="Patient History (Optional)", |
|
placeholder="e.g., 65-year-old male with chronic cough", |
|
lines=2 |
|
) |
|
|
|
with gr.Row(): |
|
clear_btn = gr.Button("ποΈ Clear", variant="secondary") |
|
analyze_btn = gr.Button("π Analyze", variant="primary", size="lg") |
|
|
|
|
|
gr.Markdown(f""" |
|
**Status:** {'β
Ready' if model_loaded else 'β Failed'} |
|
**Method:** {'Pipeline' if pipeline_model else 'Direct' if model else 'None'} |
|
**Device:** {'CUDA' if torch.cuda.is_available() else 'CPU'} |
|
**Transformers:** {getattr(__import__('transformers'), '__version__', 'Unknown')} |
|
""") |
|
|
|
|
|
with gr.Column(scale=1): |
|
gr.Markdown("## π Medical Analysis Results") |
|
|
|
output = gr.Textbox( |
|
label="AI Medical Analysis", |
|
lines=20, |
|
show_copy_button=True, |
|
placeholder="Upload a medical image and ask a clinical question..." if model_loaded else "Model unavailable - please check system status" |
|
) |
|
|
|
|
|
if model_loaded: |
|
with gr.Accordion("π Example Cases", open=False): |
|
examples = gr.Examples( |
|
examples=[ |
|
[ |
|
"https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png", |
|
"Analyze this chest X-ray systematically. Comment on heart size, lung fields, and any abnormalities.", |
|
"Adult patient with respiratory symptoms" |
|
] |
|
], |
|
inputs=[image_input, clinical_question, patient_history] |
|
) |
|
|
|
|
|
analyze_btn.click( |
|
fn=analyze_medical_image, |
|
inputs=[image_input, clinical_question, patient_history], |
|
outputs=output, |
|
show_progress=True |
|
) |
|
|
|
clear_btn.click( |
|
fn=lambda: (None, "", "", ""), |
|
outputs=[image_input, clinical_question, patient_history, output] |
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
--- |
|
### π¬ About MedGemma |
|
|
|
MedGemma-4B is Google's specialized medical AI model requiring transformers >= 4.52.0. |
|
|
|
### π Privacy & Ethics |
|
- Real-time processing, no data storage |
|
- Educational and research purposes only |
|
- No patient data should be uploaded |
|
|
|
**Model:** Google MedGemma-4B | **License:** Apache 2.0 |
|
""") |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
demo = create_interface() |
|
demo.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_error=True |
|
) |