scdrand23 commited on
Commit
2ffc244
·
1 Parent(s): 94c0701

integrated biomedllama

Browse files
Files changed (1) hide show
  1. app.py +22 -30
app.py CHANGED
@@ -285,33 +285,14 @@ def process_image(image_path, user_prompt, modality=None):
285
  image = read_rgb(image_path)
286
  pil_image = Image.fromarray(image)
287
 
288
- # Stage 1: Get modality identification only
289
- modality_question = "What is the medical imaging modality of this image? Please respond with only the modality type (CT, MRI, X-Ray, Pathology, Ultrasound, Endoscopy, Fundus, Dermoscopy, or OCT) and if applicable, the specific body part or sequence (e.g., CT-Chest, MRI-FLAIR-Brain)."
290
- modality_msgs = [{'role': 'user', 'content': [pil_image, modality_question]}]
291
-
292
- modality_response = ""
293
- for new_text in llm_model.chat(
294
- image=pil_image,
295
- msgs=modality_msgs,
296
- tokenizer=llm_tokenizer,
297
- sampling=True,
298
- temperature=0.7, # Lower temperature for more focused response
299
- stream=True
300
- ):
301
- modality_response += new_text
302
-
303
- detected_modality = extract_modality_from_llm(modality_response)
304
- if not detected_modality:
305
- raise ValueError("Could not determine image modality")
306
-
307
- # Stage 2: Get detailed analysis with known modality
308
- analysis_question = f"This is a {detected_modality} image. {user_prompt} Please analyze the anatomical structures and any abnormalities visible in this image."
309
- analysis_msgs = [{'role': 'user', 'content': [pil_image, analysis_question]}]
310
 
311
  llm_response = ""
312
  for new_text in llm_model.chat(
313
  image=pil_image,
314
- msgs=analysis_msgs,
315
  tokenizer=llm_tokenizer,
316
  sampling=True,
317
  temperature=0.95,
@@ -319,39 +300,50 @@ def process_image(image_path, user_prompt, modality=None):
319
  ):
320
  llm_response += new_text
321
 
322
- # Extract findings using known modality
 
 
 
 
 
323
  clinical_findings = extract_clinical_findings(llm_response, detected_modality)
324
 
325
- # Rest of the processing remains the same
326
  results = []
327
  analysis_results = []
328
- colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]
329
 
330
  for idx, finding in enumerate(clinical_findings):
331
  pred_mask = interactive_infer_image(model, pil_image, [finding])[0]
332
  p_value = check_mask_stats(image, pred_mask * 255, detected_modality, finding)
333
  analysis_results.append(f"P-value for '{finding}' ({detected_modality}): {p_value:.4f}")
334
 
 
335
  overlay_image = image.copy()
336
  color = colors[idx % len(colors)]
337
  overlay_image[pred_mask > 0.5] = color
338
  results.append(overlay_image)
339
 
340
- enhanced_response = f"Modality: {detected_modality}\n\n"
341
- enhanced_response += llm_response + "\n\nSegmentation Results:\n"
342
  for idx, finding in enumerate(clinical_findings):
343
  color_name = ["red", "green", "blue", "yellow", "magenta"][idx % len(colors)]
344
  enhanced_response += f"- {finding} (shown in {color_name})\n"
345
 
346
- combined_analysis = "\n".join(analysis_results)
 
 
 
 
347
  combined_analysis += enhanced_response
 
348
 
349
  return results, combined_analysis, detected_modality
350
 
351
  except Exception as e:
352
  error_msg = f"⚠️ An error occurred: {str(e)}"
353
  print(f"Error details: {str(e)}", flush=True)
354
- return None, error_msg, None
355
 
356
  # Define Gradio interface
357
  with gr.Blocks() as demo:
 
285
  image = read_rgb(image_path)
286
  pil_image = Image.fromarray(image)
287
 
288
+ # Step 1: Get LLM analysis
289
+ question = f"Analyze this medical image considering the following context: {user_prompt}. Include modality, anatomical structures, and any abnormalities."
290
+ msgs = [{'role': 'user', 'content': [pil_image, question]}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
  llm_response = ""
293
  for new_text in llm_model.chat(
294
  image=pil_image,
295
+ msgs=msgs,
296
  tokenizer=llm_tokenizer,
297
  sampling=True,
298
  temperature=0.95,
 
300
  ):
301
  llm_response += new_text
302
 
303
+ # Step 2: Extract modality from LLM output
304
+ detected_modality = extract_modality_from_llm(llm_response)
305
+ if not detected_modality:
306
+ raise ValueError("Could not determine image modality from LLM output")
307
+
308
+ # Step 3: Extract relevant clinical findings
309
  clinical_findings = extract_clinical_findings(llm_response, detected_modality)
310
 
311
+ # Step 4: Generate masks for each finding
312
  results = []
313
  analysis_results = []
314
+ colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)] # Different colors for different findings
315
 
316
  for idx, finding in enumerate(clinical_findings):
317
  pred_mask = interactive_infer_image(model, pil_image, [finding])[0]
318
  p_value = check_mask_stats(image, pred_mask * 255, detected_modality, finding)
319
  analysis_results.append(f"P-value for '{finding}' ({detected_modality}): {p_value:.4f}")
320
 
321
+ # Create colored overlay
322
  overlay_image = image.copy()
323
  color = colors[idx % len(colors)]
324
  overlay_image[pred_mask > 0.5] = color
325
  results.append(overlay_image)
326
 
327
+ # Update LLM response with color references
328
+ enhanced_response = llm_response + "\n\nSegmentation Results:\n"
329
  for idx, finding in enumerate(clinical_findings):
330
  color_name = ["red", "green", "blue", "yellow", "magenta"][idx % len(colors)]
331
  enhanced_response += f"- {finding} (shown in {color_name})\n"
332
 
333
+ # combined_analysis = "\n\n" + "="*50 + "\n"
334
+ # combined_analysis += "BiomedParse Analysis:\n"
335
+ combined_analysis += "\n".join(analysis_results)
336
+ # combined_analysis += "\n\n" + "="*50 + "\n"
337
+ # combined_analysis += "Enhanced LLM Analysis:\n"
338
  combined_analysis += enhanced_response
339
+ # combined_analysis += "\n" + "="*50
340
 
341
  return results, combined_analysis, detected_modality
342
 
343
  except Exception as e:
344
  error_msg = f"⚠️ An error occurred: {str(e)}"
345
  print(f"Error details: {str(e)}", flush=True)
346
+ return None, error_msg
347
 
348
  # Define Gradio interface
349
  with gr.Blocks() as demo: