walaa2022 commited on
Commit
2ebe272
Β·
verified Β·
1 Parent(s): f1a972c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +322 -217
app.py CHANGED
@@ -1,189 +1,211 @@
1
- # app.py - Working MedGemma with Correct Implementation
2
  import gradio as gr
3
  import torch
4
- from transformers import AutoProcessor, AutoModelForImageTextToText, pipeline
5
- from PIL import Image
6
  import os
7
  import logging
 
 
 
 
 
8
  from huggingface_hub import login
 
 
9
 
10
  # Configure logging
11
  logging.basicConfig(level=logging.INFO)
12
  logger = logging.getLogger(__name__)
13
 
14
- # Authenticate with Hugging Face
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def authenticate_hf():
16
- """Authenticate with Hugging Face using token"""
17
  try:
18
  hf_token = os.getenv('HF_TOKEN')
19
  if hf_token:
20
  login(token=hf_token)
21
  logger.info("βœ… Authenticated with Hugging Face")
22
- return True
23
  else:
24
- logger.warning("⚠️ No HF_TOKEN found in environment")
25
- return False
26
  except Exception as e:
27
  logger.error(f"❌ Authentication failed: {e}")
28
- return False
29
-
30
- # Model configuration
31
- MODEL_ID = "google/medgemma-4b-it"
32
 
33
- # Global variables
34
- model = None
35
- processor = None
36
- pipeline_model = None
 
 
 
 
 
 
37
 
38
- def load_model():
39
- """Load MedGemma model using the recommended approach"""
40
- global model, processor, pipeline_model
41
-
42
  try:
43
- # First authenticate
44
- auth_success = authenticate_hf()
45
- if not auth_success:
46
- logger.error("❌ Authentication required for MedGemma")
47
- return False
48
-
49
- logger.info(f"Loading MedGemma: {MODEL_ID}")
50
-
51
- # Method 1: Try using pipeline (recommended by HuggingFace)
52
- try:
53
- logger.info("Attempting to load using pipeline...")
54
- pipeline_model = pipeline(
55
- "image-text-to-text",
56
- model=MODEL_ID,
57
- torch_dtype=torch.float32,
58
- device_map="auto" if torch.cuda.is_available() else None,
59
- trust_remote_code=True
60
- )
61
- logger.info("βœ… Pipeline model loaded successfully!")
62
- return True
63
- except Exception as e:
64
- logger.warning(f"Pipeline loading failed: {e}")
65
-
66
- # Method 2: Try direct model loading
67
- logger.info("Attempting direct model loading...")
68
-
69
- # Load processor
70
- processor = AutoProcessor.from_pretrained(
71
- MODEL_ID,
72
- trust_remote_code=True,
73
- token=True
74
- )
75
- logger.info("βœ… Processor loaded")
76
-
77
- # Load model
78
- model = AutoModelForImageTextToText.from_pretrained(
79
- MODEL_ID,
80
- torch_dtype=torch.float32,
81
- device_map="auto" if torch.cuda.is_available() else None,
82
- trust_remote_code=True,
83
- token=True
84
- )
85
- logger.info("βœ… Model loaded successfully!")
86
- return True
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  except Exception as e:
89
- logger.error(f"❌ Error loading model: {str(e)}")
90
- import traceback
91
- logger.error(f"Full traceback: {traceback.format_exc()}")
92
- return False
93
 
94
- # Initialize model at startup
95
- model_loaded = load_model()
96
-
97
- def analyze_medical_image(image, clinical_question, patient_history=""):
98
- """Analyze medical image with clinical context"""
99
- global model, processor, pipeline_model
100
 
101
- # Check if model is loaded
102
- if not model_loaded:
103
- return """❌ **Model Loading Issue**
104
-
105
- MedGemma failed to load. This is likely due to:
106
-
107
- 1. **Transformers version**: Make sure you're using transformers >= 4.52.0
108
- 2. **Authentication**: Ensure HF_TOKEN is properly set
109
- 3. **Model compatibility**: MedGemma requires the latest transformers library
110
-
111
- **Status**: Model loading failed. Please try refreshing the page or contact support."""
112
 
 
113
  if image is None:
114
  return "⚠️ Please upload a medical image first."
115
 
116
  if not clinical_question.strip():
117
  return "⚠️ Please provide a clinical question."
118
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  try:
120
- # Method 1: Use pipeline if available
121
- if pipeline_model is not None:
122
- logger.info("Using pipeline for analysis...")
123
-
124
- # Prepare message in the format expected by pipeline
125
- messages = [
126
- {
127
- "role": "user",
128
- "content": [
129
- {"type": "image", "image": image},
130
- {"type": "text", "text": f"Patient History: {patient_history}\n\nClinical Question: {clinical_question}\n\nAs MedGemma, provide a detailed medical analysis of this image for educational purposes only."}
131
- ]
132
- }
133
- ]
134
-
135
- # Generate response using pipeline
136
- result = pipeline_model(messages, max_new_tokens=1000)
137
-
138
- # Extract response text
139
- response = result[0]['generated_text'] if isinstance(result, list) else result['generated_text']
140
-
141
- # Method 2: Use direct model if pipeline failed
142
- elif model is not None and processor is not None:
143
- logger.info("Using direct model for analysis...")
144
-
145
- # Prepare messages for direct model
146
- messages = [
147
- {
148
- "role": "system",
149
- "content": [{"type": "text", "text": "You are MedGemma, an expert medical AI assistant. Provide detailed medical analysis for educational purposes only."}]
150
- },
151
- {
152
- "role": "user",
153
- "content": [
154
- {"type": "text", "text": f"Patient History: {patient_history}\n\nClinical Question: {clinical_question}"},
155
- {"type": "image", "image": image}
156
- ]
157
- }
158
- ]
159
-
160
- # Process inputs
161
- inputs = processor.apply_chat_template(
162
- messages,
163
- add_generation_prompt=True,
164
- tokenize=True,
165
- return_dict=True,
166
- return_tensors="pt"
167
- )
168
-
169
- # Generate response
170
- with torch.inference_mode():
171
- outputs = model.generate(
172
- **inputs,
173
- max_new_tokens=1000,
174
- do_sample=True,
175
- temperature=0.3,
176
- top_p=0.9
177
- )
178
-
179
- # Decode response
180
- response = processor.decode(outputs[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
181
 
182
- else:
183
- return "❌ No model available for analysis. Please try refreshing the page."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
  # Clean up response
186
- response = response.strip()
187
 
188
  # Add medical disclaimer
189
  disclaimer = """
@@ -196,19 +218,60 @@ MedGemma failed to load. This is likely due to:
196
  - Do not make medical decisions based solely on this analysis
197
  - In case of medical emergency, contact emergency services immediately
198
  ---
 
 
199
  """
200
 
201
- logger.info("βœ… Analysis completed successfully")
202
- return response + disclaimer
 
 
 
 
 
203
 
204
  except Exception as e:
205
- logger.error(f"❌ Error in analysis: {str(e)}")
206
- import traceback
207
- logger.error(f"Full traceback: {traceback.format_exc()}")
208
- return f"❌ Analysis failed: {str(e)}\n\nPlease try again with a different image or question."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
  # Create Gradio interface
211
  def create_interface():
 
 
 
212
  with gr.Blocks(
213
  title="MedGemma Medical Analysis",
214
  theme=gr.themes.Soft(),
@@ -224,26 +287,25 @@ def create_interface():
224
  gr.Markdown("""
225
  # πŸ₯ MedGemma Medical Image Analysis
226
 
227
- **Advanced Medical AI Assistant powered by Google's MedGemma-4B**
228
 
229
- Specialized in medical imaging across multiple modalities:
230
  🫁 **Radiology** β€’ πŸ”¬ **Histopathology** β€’ πŸ‘οΈ **Ophthalmology** β€’ 🩺 **Dermatology**
231
  """)
232
 
233
  # Status display
234
- if model_loaded:
235
- method = "Pipeline" if pipeline_model else "Direct Model"
236
- gr.Markdown(f"""
237
  <div class="success">
238
  βœ… <strong>MEDGEMMA READY</strong><br>
239
- Model loaded successfully using {method} method. Ready for medical image analysis.
240
  </div>
241
  """)
242
  else:
243
  gr.Markdown("""
244
  <div class="warning">
245
- ⚠️ <strong>MODEL LOADING FAILED</strong><br>
246
- MedGemma failed to load. Please ensure you have the latest transformers library and proper authentication.
247
  </div>
248
  """)
249
 
@@ -258,67 +320,81 @@ def create_interface():
258
 
259
  with gr.Row():
260
  # Left column
261
- with gr.Column(scale=1):
262
- gr.Markdown("## πŸ“€ Medical Image Upload")
263
-
264
- image_input = gr.Image(
265
- label="Medical Image",
266
- type="pil",
267
- height=300
268
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
 
270
- clinical_question = gr.Textbox(
271
- label="Clinical Question *",
272
- placeholder="Examples:\nβ€’ Describe findings in this chest X-ray\nβ€’ What pathological changes are visible?\nβ€’ Provide differential diagnosis\nβ€’ Identify abnormalities",
273
- lines=4
274
- )
275
 
276
- patient_history = gr.Textbox(
277
- label="Patient History (Optional)",
278
- placeholder="e.g., 65-year-old male with chronic cough",
279
- lines=2
 
 
280
  )
 
 
 
 
281
 
282
- with gr.Row():
283
- clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
284
- analyze_btn = gr.Button("πŸ” Analyze", variant="primary", size="lg")
285
 
286
- # System info
287
  gr.Markdown(f"""
288
- **Status:** {'βœ… Ready' if model_loaded else '❌ Failed'}
289
- **Method:** {'Pipeline' if pipeline_model else 'Direct' if model else 'None'}
290
- **Device:** {'CUDA' if torch.cuda.is_available() else 'CPU'}
291
- **Transformers:** {getattr(__import__('transformers'), '__version__', 'Unknown')}
292
  """)
293
 
294
- # Right column
295
- with gr.Column(scale=1):
296
- gr.Markdown("## πŸ“‹ Medical Analysis Results")
297
 
298
- output = gr.Textbox(
299
- label="AI Medical Analysis",
300
- lines=20,
301
- show_copy_button=True,
302
- placeholder="Upload a medical image and ask a clinical question..." if model_loaded else "Model unavailable - please check system status"
303
- )
304
 
305
- # Examples
306
- if model_loaded:
307
- with gr.Accordion("πŸ“š Example Cases", open=False):
308
- examples = gr.Examples(
309
- examples=[
310
- [
311
- "https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png",
312
- "Analyze this chest X-ray systematically. Comment on heart size, lung fields, and any abnormalities.",
313
- "Adult patient with respiratory symptoms"
314
- ]
315
- ],
316
- inputs=[image_input, clinical_question, patient_history]
317
- )
318
 
319
  # Event handlers
320
  analyze_btn.click(
321
- fn=analyze_medical_image,
322
  inputs=[image_input, clinical_question, patient_history],
323
  outputs=output,
324
  show_progress=True
@@ -329,19 +405,48 @@ def create_interface():
329
  outputs=[image_input, clinical_question, patient_history, output]
330
  )
331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  # Footer
333
  gr.Markdown("""
334
  ---
335
  ### πŸ”¬ About MedGemma
336
 
337
- MedGemma-4B is Google's specialized medical AI model requiring transformers >= 4.52.0.
 
 
 
 
 
 
 
338
 
339
- ### πŸ”’ Privacy & Ethics
340
- - Real-time processing, no data storage
341
- - Educational and research purposes only
342
- - No patient data should be uploaded
 
343
 
344
- **Model:** Google MedGemma-4B | **License:** Apache 2.0
345
  """)
346
 
347
  return demo
 
1
+ # app.py - Fixed MedGemma Implementation Based on Google's Official Approach
2
  import gradio as gr
3
  import torch
 
 
4
  import os
5
  import logging
6
+ import json
7
+ import requests
8
+ from PIL import Image
9
+ import base64
10
+ import io
11
  from huggingface_hub import login
12
+ from collections import defaultdict, Counter
13
+ import time
14
 
15
  # Configure logging
16
  logging.basicConfig(level=logging.INFO)
17
  logger = logging.getLogger(__name__)
18
 
19
+ # Usage tracking
20
+ class UsageTracker:
21
+ def __init__(self):
22
+ self.stats = {
23
+ 'total_analyses': 0,
24
+ 'successful_analyses': 0,
25
+ 'failed_analyses': 0,
26
+ 'average_processing_time': 0.0,
27
+ 'question_types': Counter()
28
+ }
29
+
30
+ def log_analysis(self, success, duration, question_type=None):
31
+ self.stats['total_analyses'] += 1
32
+ if success:
33
+ self.stats['successful_analyses'] += 1
34
+ else:
35
+ self.stats['failed_analyses'] += 1
36
+
37
+ total_time = self.stats['average_processing_time'] * (self.stats['total_analyses'] - 1)
38
+ self.stats['average_processing_time'] = (total_time + duration) / self.stats['total_analyses']
39
+
40
+ if question_type:
41
+ self.stats['question_types'][question_type] += 1
42
+
43
+ # Rate limiting
44
+ class RateLimiter:
45
+ def __init__(self, max_requests_per_hour=50):
46
+ self.max_requests_per_hour = max_requests_per_hour
47
+ self.requests = defaultdict(list)
48
+
49
+ def is_allowed(self, user_id="default"):
50
+ current_time = time.time()
51
+ hour_ago = current_time - 3600
52
+ self.requests[user_id] = [req_time for req_time in self.requests[user_id] if req_time > hour_ago]
53
+ if len(self.requests[user_id]) < self.max_requests_per_hour:
54
+ self.requests[user_id].append(current_time)
55
+ return True
56
+ return False
57
+
58
+ # Initialize components
59
+ usage_tracker = UsageTracker()
60
+ rate_limiter = RateLimiter()
61
+
62
+ # MedGemma API Configuration
63
+ MODEL_ID = "google/medgemma-4b-it"
64
+
65
  def authenticate_hf():
66
+ """Authenticate with Hugging Face"""
67
  try:
68
  hf_token = os.getenv('HF_TOKEN')
69
  if hf_token:
70
  login(token=hf_token)
71
  logger.info("βœ… Authenticated with Hugging Face")
72
+ return True, hf_token
73
  else:
74
+ logger.warning("⚠️ No HF_TOKEN found")
75
+ return False, None
76
  except Exception as e:
77
  logger.error(f"❌ Authentication failed: {e}")
78
+ return False, None
 
 
 
79
 
80
+ def image_to_base64(image):
81
+ """Convert PIL image to base64 string"""
82
+ try:
83
+ buffer = io.BytesIO()
84
+ image.save(buffer, format='PNG')
85
+ img_str = base64.b64encode(buffer.getvalue()).decode()
86
+ return f"data:image/png;base64,{img_str}"
87
+ except Exception as e:
88
+ logger.error(f"Error converting image: {e}")
89
+ return None
90
 
91
+ def call_medgemma_api(image, prompt, patient_history="", hf_token=None):
92
+ """Call MedGemma using Hugging Face Inference API"""
 
 
93
  try:
94
+ # Use HF Inference API endpoint
95
+ api_url = f"https://api-inference.huggingface.co/models/{MODEL_ID}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
+ headers = {
98
+ "Authorization": f"Bearer {hf_token}",
99
+ "Content-Type": "application/json"
100
+ }
101
+
102
+ # Prepare the payload following Google's format
103
+ system_instruction = "You are an expert medical AI assistant specialized in medical image analysis. Provide detailed analysis for educational purposes only."
104
+
105
+ # Build the full prompt
106
+ full_prompt = system_instruction + " "
107
+ if patient_history.strip():
108
+ full_prompt += f"Patient History: {patient_history} "
109
+ full_prompt += prompt
110
+
111
+ # Convert image to base64
112
+ image_b64 = image_to_base64(image)
113
+ if not image_b64:
114
+ return None, "Failed to process image"
115
+
116
+ # Prepare the request payload
117
+ payload = {
118
+ "inputs": {
119
+ "prompt": full_prompt,
120
+ "multi_modal_data": {
121
+ "image": image_b64
122
+ },
123
+ "max_tokens": 1000,
124
+ "temperature": 0.3,
125
+ "raw_response": True
126
+ }
127
+ }
128
+
129
+ # Make the API call
130
+ response = requests.post(api_url, headers=headers, json=payload, timeout=120)
131
+
132
+ if response.status_code == 200:
133
+ result = response.json()
134
+ if isinstance(result, list) and len(result) > 0:
135
+ return result[0].get('generated_text', ''), None
136
+ elif isinstance(result, dict):
137
+ return result.get('generated_text', result.get('text', str(result))), None
138
+ else:
139
+ return str(result), None
140
+ else:
141
+ error_msg = f"API Error {response.status_code}: {response.text}"
142
+ logger.error(error_msg)
143
+ return None, error_msg
144
+
145
+ except requests.exceptions.Timeout:
146
+ return None, "Request timeout - model may be loading"
147
  except Exception as e:
148
+ logger.error(f"API call failed: {e}")
149
+ return None, str(e)
 
 
150
 
151
+ def analyze_medical_image_medgemma(image, clinical_question, patient_history=""):
152
+ """Main analysis function using MedGemma"""
153
+ start_time = time.time()
 
 
 
154
 
155
+ # Rate limiting
156
+ if not rate_limiter.is_allowed():
157
+ usage_tracker.log_analysis(False, time.time() - start_time)
158
+ return "⚠️ Too many requests. Please wait before trying again."
 
 
 
 
 
 
 
159
 
160
+ # Validate inputs
161
  if image is None:
162
  return "⚠️ Please upload a medical image first."
163
 
164
  if not clinical_question.strip():
165
  return "⚠️ Please provide a clinical question."
166
 
167
+ # Authenticate
168
+ auth_success, hf_token = authenticate_hf()
169
+ if not auth_success or not hf_token:
170
+ usage_tracker.log_analysis(False, time.time() - start_time)
171
+ return """❌ **Authentication Required**
172
+
173
+ To use MedGemma, you need:
174
+ 1. Access to the model at https://huggingface.co/google/medgemma-4b-it
175
+ 2. HF_TOKEN set in Space Settings β†’ Repository secrets
176
+
177
+ **Current Status**: Authentication failed - cannot access MedGemma."""
178
+
179
  try:
180
+ logger.info("Calling MedGemma API...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
+ # Call MedGemma API
183
+ response_text, error = call_medgemma_api(
184
+ image=image,
185
+ prompt=clinical_question,
186
+ patient_history=patient_history,
187
+ hf_token=hf_token
188
+ )
189
+
190
+ if error:
191
+ usage_tracker.log_analysis(False, time.time() - start_time)
192
+ return f"""❌ **MedGemma API Error**
193
+
194
+ {error}
195
+
196
+ **Possible solutions:**
197
+ 1. The model may be loading - try again in a few minutes
198
+ 2. Check if you have proper access to MedGemma
199
+ 3. Verify your HF_TOKEN is valid
200
+
201
+ **Note**: MedGemma is a gated model and may have usage limits."""
202
+
203
+ if not response_text:
204
+ usage_tracker.log_analysis(False, time.time() - start_time)
205
+ return "❌ No response from MedGemma. Please try again."
206
 
207
  # Clean up response
208
+ response_text = response_text.strip()
209
 
210
  # Add medical disclaimer
211
  disclaimer = """
 
218
  - Do not make medical decisions based solely on this analysis
219
  - In case of medical emergency, contact emergency services immediately
220
  ---
221
+
222
+ **Powered by**: Google MedGemma-4B via Hugging Face Inference API
223
  """
224
 
225
+ # Log successful analysis
226
+ duration = time.time() - start_time
227
+ question_type = classify_question(clinical_question)
228
+ usage_tracker.log_analysis(True, duration, question_type)
229
+
230
+ logger.info("βœ… MedGemma analysis completed successfully")
231
+ return response_text + disclaimer
232
 
233
  except Exception as e:
234
+ duration = time.time() - start_time
235
+ usage_tracker.log_analysis(False, duration)
236
+ logger.error(f"❌ Analysis error: {str(e)}")
237
+ return f"❌ Analysis failed: {str(e)}\n\nPlease try again or use a different image."
238
+
239
+ def classify_question(question):
240
+ """Classify clinical question type"""
241
+ question_lower = question.lower()
242
+ if any(word in question_lower for word in ['describe', 'findings', 'observe']):
243
+ return 'descriptive'
244
+ elif any(word in question_lower for word in ['diagnosis', 'differential', 'condition']):
245
+ return 'diagnostic'
246
+ elif any(word in question_lower for word in ['abnormal', 'pathology', 'disease']):
247
+ return 'pathological'
248
+ else:
249
+ return 'general'
250
+
251
+ def get_usage_stats():
252
+ """Get usage statistics"""
253
+ stats = usage_tracker.stats
254
+ if stats['total_analyses'] == 0:
255
+ return "πŸ“Š **Usage Statistics**\n\nNo analyses performed yet."
256
+
257
+ success_rate = (stats['successful_analyses'] / stats['total_analyses']) * 100
258
+
259
+ return f"""πŸ“Š **Usage Statistics**
260
+
261
+ **Performance:**
262
+ - Total Analyses: {stats['total_analyses']}
263
+ - Success Rate: {success_rate:.1f}%
264
+ - Avg Processing Time: {stats['average_processing_time']:.2f}s
265
+
266
+ **Popular Question Types:**
267
+ {chr(10).join([f"- {qtype}: {count}" for qtype, count in stats['question_types'].most_common(3)])}
268
+ """
269
 
270
  # Create Gradio interface
271
  def create_interface():
272
+ # Check authentication status
273
+ auth_success, _ = authenticate_hf()
274
+
275
  with gr.Blocks(
276
  title="MedGemma Medical Analysis",
277
  theme=gr.themes.Soft(),
 
287
  gr.Markdown("""
288
  # πŸ₯ MedGemma Medical Image Analysis
289
 
290
+ **Google's Medical AI Assistant - MedGemma-4B**
291
 
292
+ Specialized medical AI trained specifically for:
293
  🫁 **Radiology** β€’ πŸ”¬ **Histopathology** β€’ πŸ‘οΈ **Ophthalmology** β€’ 🩺 **Dermatology**
294
  """)
295
 
296
  # Status display
297
+ if auth_success:
298
+ gr.Markdown("""
 
299
  <div class="success">
300
  βœ… <strong>MEDGEMMA READY</strong><br>
301
+ Authenticated with Google's MedGemma-4B model. Ready for professional medical image analysis.
302
  </div>
303
  """)
304
  else:
305
  gr.Markdown("""
306
  <div class="warning">
307
+ πŸ” <strong>AUTHENTICATION REQUIRED</strong><br>
308
+ Please ensure HF_TOKEN is set in Space Settings β†’ Repository secrets and you have access to MedGemma.
309
  </div>
310
  """)
311
 
 
320
 
321
  with gr.Row():
322
  # Left column
323
+ with gr.Column(scale=2):
324
+ with gr.Row():
325
+ with gr.Column():
326
+ gr.Markdown("## πŸ“€ Medical Image")
327
+ image_input = gr.Image(
328
+ label="Upload Medical Image",
329
+ type="pil",
330
+ height=300
331
+ )
332
+
333
+ with gr.Column():
334
+ gr.Markdown("## πŸ’¬ Clinical Query")
335
+ clinical_question = gr.Textbox(
336
+ label="Clinical Question *",
337
+ placeholder="Examples:\nβ€’ Describe this X-ray systematically\nβ€’ What pathological changes are visible?\nβ€’ Provide differential diagnosis\nβ€’ Assess image quality and findings",
338
+ lines=4
339
+ )
340
+
341
+ patient_history = gr.Textbox(
342
+ label="Patient History (Optional)",
343
+ placeholder="e.g., 65-year-old male with chronic cough, smoking history",
344
+ lines=2
345
+ )
346
 
347
+ with gr.Row():
348
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
349
+ analyze_btn = gr.Button("πŸ” Analyze with MedGemma", variant="primary", size="lg")
 
 
350
 
351
+ gr.Markdown("## πŸ“‹ MedGemma Analysis")
352
+ output = gr.Textbox(
353
+ label="Medical AI Analysis Results",
354
+ lines=20,
355
+ show_copy_button=True,
356
+ placeholder="Upload a medical image and ask a clinical question to get started..."
357
  )
358
+
359
+ # Right column - System info
360
+ with gr.Column(scale=1):
361
+ gr.Markdown("## ℹ️ System Status")
362
 
363
+ auth_status = "βœ… Authenticated" if auth_success else "πŸ” Auth Required"
 
 
364
 
 
365
  gr.Markdown(f"""
366
+ **Authentication:** {auth_status}
367
+ **Model:** Google MedGemma-4B
368
+ **API:** Hugging Face Inference
369
+ **Status:** {'Ready' if auth_success else 'Setup Required'}
370
  """)
371
 
372
+ gr.Markdown("## πŸ“Š Usage Statistics")
373
+ stats_display = gr.Markdown("")
374
+ refresh_stats_btn = gr.Button("πŸ”„ Refresh Stats", size="sm")
375
 
376
+ gr.Markdown("## 🎯 Quick Examples")
377
+
378
+ chest_btn = gr.Button("Chest X-ray", size="sm")
379
+ pathology_btn = gr.Button("Pathology", size="sm")
380
+ diagnosis_btn = gr.Button("Diagnosis", size="sm")
 
381
 
382
+ # Example cases
383
+ with gr.Accordion("πŸ“š Medical Cases", open=False):
384
+ examples = gr.Examples(
385
+ examples=[
386
+ [
387
+ "https://upload.wikimedia.org/wikipedia/commons/c/c8/Chest_Xray_PA_3-8-2010.png",
388
+ "You are an expert radiologist. Describe this X-ray systematically including heart size, lung fields, and any abnormalities.",
389
+ "Adult patient with respiratory symptoms"
390
+ ]
391
+ ],
392
+ inputs=[image_input, clinical_question, patient_history]
393
+ )
 
394
 
395
  # Event handlers
396
  analyze_btn.click(
397
+ fn=analyze_medical_image_medgemma,
398
  inputs=[image_input, clinical_question, patient_history],
399
  outputs=output,
400
  show_progress=True
 
405
  outputs=[image_input, clinical_question, patient_history, output]
406
  )
407
 
408
+ refresh_stats_btn.click(
409
+ fn=get_usage_stats,
410
+ outputs=stats_display
411
+ )
412
+
413
+ # Quick example handlers
414
+ chest_btn.click(
415
+ fn=lambda: ("Analyze this chest X-ray systematically. Comment on cardiac silhouette, lung fields, mediastinum, and any pathological findings.", "Adult with respiratory symptoms"),
416
+ outputs=[clinical_question, patient_history]
417
+ )
418
+
419
+ pathology_btn.click(
420
+ fn=lambda: ("What pathological changes are visible in this medical image? Provide structured analysis with clinical significance.", ""),
421
+ outputs=[clinical_question, patient_history]
422
+ )
423
+
424
+ diagnosis_btn.click(
425
+ fn=lambda: ("Based on the imaging findings, what are the most likely differential diagnoses? Consider clinical context.", "Patient with acute presentation"),
426
+ outputs=[clinical_question, patient_history]
427
+ )
428
+
429
  # Footer
430
  gr.Markdown("""
431
  ---
432
  ### πŸ”¬ About MedGemma
433
 
434
+ **MedGemma-4B** is Google's specialized medical AI model designed specifically for medical image analysis and clinical reasoning.
435
+ It represents state-of-the-art performance in medical AI applications.
436
+
437
+ **Key Features:**
438
+ - **Medical Specialization**: Trained specifically on medical imaging data
439
+ - **Multi-modal**: Handles both images and clinical text
440
+ - **Professional Grade**: Designed for medical education and research
441
+ - **Google Quality**: Built by Google's medical AI team
442
 
443
+ ### πŸ”’ Privacy & Compliance
444
+ - **Real-time processing** with no data retention
445
+ - **Educational purpose** design and disclaimers
446
+ - **HIPAA-aware** interface (no PHI uploads)
447
+ - **Professional standards** for medical AI applications
448
 
449
+ **Model:** Google MedGemma-4B | **API:** Hugging Face Inference | **License:** Apache 2.0
450
  """)
451
 
452
  return demo