Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- .gitattributes +1 -0
- app.py +225 -0
- images/Infection.jpg +3 -0
- requirements.txt +6 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
images/Infection.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
import spaces
|
7 |
+
import time
|
8 |
+
|
9 |
+
# Initialize the model pipeline
|
10 |
+
print("Loading MedGemma model...")
|
11 |
+
pipe = pipeline(
|
12 |
+
"image-text-to-text",
|
13 |
+
model="unsloth/medgemma-4b-it-bnb-4bit",
|
14 |
+
torch_dtype=torch.bfloat16,
|
15 |
+
# device="cuda" if torch.cuda.is_available() else "cpu",
|
16 |
+
device_map="auto",
|
17 |
+
)
|
18 |
+
print("Model loaded successfully!")
|
19 |
+
|
20 |
+
|
21 |
+
@spaces.GPU()
|
22 |
+
def analyze_img(image, custom_prompt=None):
|
23 |
+
"""
|
24 |
+
Analyze image using MedGemma model
|
25 |
+
"""
|
26 |
+
if image is None:
|
27 |
+
return "Please upload an image first."
|
28 |
+
|
29 |
+
try:
|
30 |
+
# System prompt for the model
|
31 |
+
system_prompt_text = """You are a expert medical AI assistant with years of experience in interpreting medical images. Your purpose is to assist qualified clinicians by providing an detailed analysis of the provided medical image."""
|
32 |
+
# Use custom prompt if provided, otherwise use default
|
33 |
+
if custom_prompt and custom_prompt.strip():
|
34 |
+
prompt_text = custom_prompt.strip()
|
35 |
+
else:
|
36 |
+
prompt_text = "Describe this image in detail, including any abnormalities or notable findings."
|
37 |
+
|
38 |
+
messages = [
|
39 |
+
{
|
40 |
+
"role": "system",
|
41 |
+
"content": [
|
42 |
+
{
|
43 |
+
"type": "text",
|
44 |
+
"text": system_prompt_text,
|
45 |
+
}
|
46 |
+
],
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"role": "user",
|
50 |
+
"content": [
|
51 |
+
{"type": "text", "text": prompt_text},
|
52 |
+
{"type": "image", "image": image},
|
53 |
+
],
|
54 |
+
},
|
55 |
+
]
|
56 |
+
|
57 |
+
# Generate analysis
|
58 |
+
output = pipe(text=messages, max_new_tokens=1024)
|
59 |
+
full_response = output[0]["generated_text"][-1]["content"]
|
60 |
+
|
61 |
+
partial_message = ""
|
62 |
+
for char in full_response:
|
63 |
+
partial_message += char
|
64 |
+
time.sleep(0.01) # Add a small delay to make the typing visible
|
65 |
+
yield partial_message
|
66 |
+
|
67 |
+
except Exception as e:
|
68 |
+
return f"Error analyzing image: {str(e)}"
|
69 |
+
|
70 |
+
|
71 |
+
def load_sample_image():
|
72 |
+
"""Load the sample image if it exists"""
|
73 |
+
sample_path = "./images/Infection.jpg"
|
74 |
+
if os.path.exists(sample_path):
|
75 |
+
return Image.open(sample_path)
|
76 |
+
return None
|
77 |
+
|
78 |
+
|
79 |
+
# Create Gradio interface
|
80 |
+
with gr.Blocks(
|
81 |
+
theme=gr.themes.Citrus(),
|
82 |
+
title="MedGemma",
|
83 |
+
css="""
|
84 |
+
.header {
|
85 |
+
text-align: center;
|
86 |
+
background: linear-gradient(135deg, #f5af19 0%, #f12711 100%);
|
87 |
+
color: white;
|
88 |
+
padding: 2rem;
|
89 |
+
border-radius: 10px;
|
90 |
+
margin-bottom: 2rem;
|
91 |
+
}
|
92 |
+
.warning {
|
93 |
+
background-color: #fff0e6;
|
94 |
+
border: 3px solid #ffab73;
|
95 |
+
border-radius: 8px;
|
96 |
+
padding: 1rem;
|
97 |
+
margin: 1rem 0;
|
98 |
+
color: #8c2b00;
|
99 |
+
}
|
100 |
+
.gradio-container {
|
101 |
+
max-width: 1200px;
|
102 |
+
margin: auto;
|
103 |
+
}
|
104 |
+
.warning strong{
|
105 |
+
color: inherit;
|
106 |
+
}
|
107 |
+
""",
|
108 |
+
) as demo:
|
109 |
+
|
110 |
+
# Header
|
111 |
+
gr.HTML(
|
112 |
+
"""
|
113 |
+
<div class="header">
|
114 |
+
<h1> MedGemma Medical Image Analysis and QnA</h1>
|
115 |
+
<p>Advanced medical image analysis powered by Google's MedGemma</p>
|
116 |
+
</div>
|
117 |
+
"""
|
118 |
+
)
|
119 |
+
|
120 |
+
# Warning disclaimer
|
121 |
+
gr.HTML(
|
122 |
+
"""
|
123 |
+
<div class="warning">
|
124 |
+
<strong> Medical Disclaimer:</strong> This model is for educational and research purposes only.
|
125 |
+
It should not be used as a substitute for professional medical diagnosis or treatment.
|
126 |
+
Always consult qualified healthcare professionals for medical advice.
|
127 |
+
</div>
|
128 |
+
"""
|
129 |
+
)
|
130 |
+
|
131 |
+
with gr.Row():
|
132 |
+
with gr.Column(scale=1):
|
133 |
+
gr.Markdown("### π€ Upload Medical Image (Radiology, Pathology, Dermatology, CT, X-Ray)")
|
134 |
+
|
135 |
+
# Image input
|
136 |
+
image_input = gr.Image(label="Input Image", type="pil", height=400, sources=["upload", "clipboard"])
|
137 |
+
|
138 |
+
# Sample image button
|
139 |
+
sample_btn = gr.Button("π Load Sample Image", variant="secondary", size="sm")
|
140 |
+
|
141 |
+
# Custom prompt input
|
142 |
+
gr.Markdown("### π¬ Custom Analysis Prompt (Optional)")
|
143 |
+
custom_prompt = gr.Textbox(
|
144 |
+
label="Custom Prompt",
|
145 |
+
placeholder="Enter specific questions about the Image (e.g., 'Focus on the heart area' or 'Look for signs of pneumonia')",
|
146 |
+
value="Describe this Image and Generate a compact Clinical report",
|
147 |
+
lines=3,
|
148 |
+
max_lines=5,
|
149 |
+
)
|
150 |
+
|
151 |
+
# Analyze button
|
152 |
+
analyze_btn = gr.Button("π Analyze Image", variant="primary", size="lg")
|
153 |
+
|
154 |
+
with gr.Column(scale=1):
|
155 |
+
gr.Markdown("### π Analysis Report")
|
156 |
+
|
157 |
+
# Output text
|
158 |
+
output_text = gr.Textbox(
|
159 |
+
label="Generated Report",
|
160 |
+
lines=28,
|
161 |
+
max_lines=1024,
|
162 |
+
show_label=False,
|
163 |
+
show_copy_button=False,
|
164 |
+
placeholder="Upload an X-ray image or CT scan or any othe medical image and click 'Analyze Image' to see the AI analysis results here...",
|
165 |
+
)
|
166 |
+
|
167 |
+
# Quick action buttons
|
168 |
+
with gr.Row():
|
169 |
+
clear_btn = gr.Button("ποΈ Clear", variant="secondary", size="sm")
|
170 |
+
copy_btn = gr.Button("π Copy Results", variant="secondary", size="sm")
|
171 |
+
|
172 |
+
# Example prompts section
|
173 |
+
gr.Markdown("### π‘ Example Prompts")
|
174 |
+
with gr.Row():
|
175 |
+
example_prompts = [
|
176 |
+
"Describe this X-ray in detail, including any abnormalities or notable findings.",
|
177 |
+
"Describe the morphology of this skin lesion, focusing on color, border, and texture.",
|
178 |
+
"What are the key histological features visible in this tissue sample?",
|
179 |
+
"Look for any signs of fractures or bone abnormalities.",
|
180 |
+
"Analyze this fundus image and describe the condition of the optic disc and vasculature.",
|
181 |
+
]
|
182 |
+
|
183 |
+
for i, prompt in enumerate(example_prompts):
|
184 |
+
gr.Button(f"Example {i+1}", size="sm").click(lambda p=prompt: p, outputs=custom_prompt)
|
185 |
+
|
186 |
+
# Event handlers
|
187 |
+
def clear_all():
|
188 |
+
return None, "", ""
|
189 |
+
|
190 |
+
sample_btn.click(fn=load_sample_image, outputs=image_input)
|
191 |
+
|
192 |
+
analyze_btn.click(fn=analyze_img, inputs=[image_input, custom_prompt], outputs=output_text)
|
193 |
+
|
194 |
+
clear_btn.click(fn=clear_all, outputs=[image_input, custom_prompt, output_text])
|
195 |
+
|
196 |
+
copy_btn.click(
|
197 |
+
fn=None, # No Python function needed for this client-side action
|
198 |
+
inputs=[output_text],
|
199 |
+
js="""
|
200 |
+
(text_to_copy) => {
|
201 |
+
if (text_to_copy) {
|
202 |
+
navigator.clipboard.writeText(text_to_copy);
|
203 |
+
alert("Results copied to clipboard!");
|
204 |
+
} else {
|
205 |
+
alert("Nothing to copy!");
|
206 |
+
}
|
207 |
+
}
|
208 |
+
""",
|
209 |
+
)
|
210 |
+
|
211 |
+
# Auto-analyze when image is uploaded (optional)
|
212 |
+
image_input.change(
|
213 |
+
fn=lambda img: analyze_img(img) if img is not None else "", inputs=image_input, outputs=output_text
|
214 |
+
)
|
215 |
+
|
216 |
+
# Launch the app
|
217 |
+
if __name__ == "__main__":
|
218 |
+
print("Starting Gradio interface...")
|
219 |
+
demo.launch(
|
220 |
+
server_name="0.0.0.0",
|
221 |
+
server_port=7860,
|
222 |
+
share=False, # Set to True if you want to create a public link
|
223 |
+
show_error=True,
|
224 |
+
favicon_path=None,
|
225 |
+
)
|
images/Infection.jpg
ADDED
![]() |
Git LFS Details
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
Pillow
|
3 |
+
spaces
|
4 |
+
torch
|
5 |
+
accelerate
|
6 |
+
bitsandbytes
|