Update app.py
Browse files
app.py
CHANGED
@@ -1,207 +1,274 @@
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
-
|
5 |
-
import io
|
6 |
from typing import Optional, Tuple, Any # For type hinting
|
7 |
|
8 |
# --- Configuration and Initialization ---
|
9 |
|
10 |
-
# Securely load API key
|
|
|
11 |
GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY", os.environ.get("GEMINI_API_KEY"))
|
12 |
|
13 |
-
# Configure Gemini Client
|
14 |
genai_client_configured = False
|
15 |
if GEMINI_API_KEY:
|
16 |
try:
|
17 |
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
genai_client_configured = True
|
19 |
except Exception as e:
|
20 |
-
st.error(f"
|
21 |
-
st.stop()
|
22 |
else:
|
23 |
st.error("⚠️ Gemini API Key not found. Please configure `GEMINI_API_KEY` in Streamlit secrets or environment variables.")
|
24 |
-
st.stop()
|
25 |
|
26 |
-
# Initialize models (
|
27 |
-
# Using
|
28 |
-
|
|
|
29 |
|
30 |
if genai_client_configured:
|
31 |
try:
|
32 |
-
model = genai.GenerativeModel(
|
33 |
-
|
34 |
except Exception as e:
|
35 |
-
st.error(f"
|
36 |
st.stop()
|
37 |
else:
|
38 |
-
|
|
|
|
|
39 |
st.stop()
|
40 |
|
41 |
|
42 |
-
# --- Core
|
43 |
-
|
44 |
-
# This prompt instructs the LLM to simulate a structured, agent-like reasoning process
|
45 |
-
# focused on differential diagnosis support.
|
46 |
-
AGENTIC_ANALYSIS_PROMPT_TEMPLATE = """
|
47 |
-
**Simulated Clinical Reasoning Agent Task:**
|
48 |
-
|
49 |
-
**Role:** You are an AI assistant simulating an agentic clinical reasoning process to support a healthcare professional. Your goal is NOT to diagnose, but to structure information, generate possibilities, and suggest logical next steps based *strictly* on the provided information.
|
50 |
-
|
51 |
-
**Input Data:** You will receive unstructured clinical information (e.g., symptoms, history, basic findings). Assume this is the *only* information available unless stated otherwise.
|
52 |
|
53 |
-
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
|
66 |
-
|
67 |
-
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
* Categorize suggestions (e.g., Further History Taking, Physical Examination Points, Laboratory Tests, Imaging Studies).
|
72 |
-
* Frame these as *suggestions* for the clinician's judgment (e.g., "Consider ordering...", "Assessment of X may be informative", "Further questioning about Y could clarify...").
|
73 |
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
**
|
77 |
---
|
78 |
-
{
|
79 |
---
|
80 |
|
81 |
-
**
|
82 |
"""
|
83 |
|
84 |
-
def
|
85 |
"""
|
86 |
-
|
87 |
|
88 |
Args:
|
89 |
-
text_input: The
|
90 |
|
91 |
Returns:
|
92 |
A tuple containing:
|
93 |
-
- The
|
94 |
- An error message (str) if an error occurred, None otherwise.
|
95 |
"""
|
96 |
-
if not text_input
|
97 |
return None, "Input text cannot be empty."
|
98 |
try:
|
99 |
-
prompt =
|
100 |
-
# Consider adding safety settings if dealing with potentially sensitive outputs
|
101 |
-
# safety_settings = [...]
|
102 |
-
# response = model.generate_content(prompt, safety_settings=safety_settings)
|
103 |
response = model.generate_content(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
#
|
106 |
if response.parts:
|
107 |
return response.text, None
|
108 |
elif response.prompt_feedback.block_reason:
|
109 |
-
|
110 |
else:
|
111 |
-
|
112 |
-
candidate = response.candidates[0] if response.candidates else None
|
113 |
-
if candidate and candidate.finish_reason != "STOP":
|
114 |
-
return None, f"Analysis stopped prematurely. Reason: {candidate.finish_reason.name}. The input might be too complex or ambiguous."
|
115 |
-
else:
|
116 |
-
return None, "Received an empty or unexpected response from the AI model. The model may not have been able to process the request."
|
117 |
|
118 |
except Exception as e:
|
119 |
-
st.error(f"
|
120 |
-
|
121 |
-
return None, f"An error occurred while communicating with the AI model. Please try again later or check the input. Details: {e}"
|
122 |
|
123 |
|
124 |
# --- Streamlit User Interface ---
|
125 |
|
126 |
def main():
|
127 |
-
st.set_page_config(
|
128 |
-
page_title="Agentic AI Clinical Reasoning Support",
|
129 |
-
layout="wide",
|
130 |
-
initial_sidebar_state="expanded"
|
131 |
-
)
|
132 |
|
133 |
-
|
134 |
-
st.
|
135 |
-
st.caption(f"Powered by Google Gemini ({MODEL_NAME})")
|
136 |
-
st.markdown("---")
|
137 |
|
138 |
# --- CRITICAL DISCLAIMER ---
|
139 |
st.warning(
|
140 |
"""
|
141 |
-
|
142 |
-
* This tool
|
143 |
-
*
|
144 |
-
* **
|
145 |
-
*
|
146 |
-
* **
|
147 |
-
"""
|
148 |
-
icon="⚠️"
|
149 |
)
|
150 |
-
st.markdown("---")
|
151 |
-
|
152 |
-
# --- Input Area ---
|
153 |
-
st.header("Clinical Information Input")
|
154 |
-
st.markdown("Enter de-identified clinical information below (e.g., symptoms, brief history, key findings). The AI will attempt a structured analysis.")
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
)
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
if __name__ == "__main__":
|
207 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
+
from PIL import Image
|
5 |
+
import io # Needed for handling image bytes
|
6 |
from typing import Optional, Tuple, Any # For type hinting
|
7 |
|
8 |
# --- Configuration and Initialization ---
|
9 |
|
10 |
+
# Securely load API key
|
11 |
+
# Prioritize Streamlit secrets, fall back to environment variable for flexibility
|
12 |
GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY", os.environ.get("GEMINI_API_KEY"))
|
13 |
|
14 |
+
# Configure Gemini Client (only if key is found)
|
15 |
genai_client_configured = False
|
16 |
if GEMINI_API_KEY:
|
17 |
try:
|
18 |
genai.configure(api_key=GEMINI_API_KEY)
|
19 |
genai_client_configured = True
|
20 |
except Exception as e:
|
21 |
+
st.error(f"Failed to configure Google Generative AI: {e}")
|
22 |
+
st.stop() # Stop execution if configuration fails
|
23 |
else:
|
24 |
st.error("⚠️ Gemini API Key not found. Please configure `GEMINI_API_KEY` in Streamlit secrets or environment variables.")
|
25 |
+
st.stop() # Stop execution if no API key
|
26 |
|
27 |
+
# Initialize models (only if client configured)
|
28 |
+
# Using specific model versions can be good practice for reproducibility
|
29 |
+
TEXT_MODEL_NAME = 'gemini-1.5-flash' # Or 'gemini-pro' if preferred
|
30 |
+
VISION_MODEL_NAME = 'gemini-1.5-flash' # Or 'gemini-pro-vision' if preferred
|
31 |
|
32 |
if genai_client_configured:
|
33 |
try:
|
34 |
+
model = genai.GenerativeModel(TEXT_MODEL_NAME)
|
35 |
+
vision_model = genai.GenerativeModel(VISION_MODEL_NAME)
|
36 |
except Exception as e:
|
37 |
+
st.error(f"Failed to initialize Gemini models ({TEXT_MODEL_NAME}, {VISION_MODEL_NAME}): {e}")
|
38 |
st.stop()
|
39 |
else:
|
40 |
+
# This state should technically not be reached due to earlier st.stop() calls,
|
41 |
+
# but it's good defensive programming.
|
42 |
+
st.error("AI Models could not be initialized due to configuration issues.")
|
43 |
st.stop()
|
44 |
|
45 |
|
46 |
+
# --- Core AI Interaction Functions ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
# Define more sophisticated prompts emphasizing analysis and *potential* findings
|
49 |
+
# CRITICAL: Avoid definitive "diagnosis" language. Focus on assisting clinical judgment.
|
50 |
+
TEXT_ANALYSIS_PROMPT_TEMPLATE = """
|
51 |
+
**Medical Information Analysis Request:**
|
52 |
|
53 |
+
**Context:** Analyze the provided medical text (symptoms, history, or reports).
|
54 |
+
**Task:**
|
55 |
+
1. **Identify Key Findings:** Extract significant symptoms, signs, or data points.
|
56 |
+
2. **Potential Considerations:** Based *only* on the provided text, list potential underlying conditions or areas of concern that *might* warrant further investigation by a qualified healthcare professional. Use cautious language (e.g., "suggests potential for," "could be consistent with," "warrants investigation into").
|
57 |
+
3. **Risk Factors (if applicable):** Mention any potential risk factors identifiable from the text.
|
58 |
+
4. **Information Gaps:** Highlight any missing information that would be crucial for a clinical assessment.
|
59 |
+
5. **Disclaimer:** Explicitly state that this analysis is AI-generated, not a diagnosis, and cannot replace professional medical evaluation.
|
60 |
|
61 |
+
**Input Text:**
|
62 |
+
---
|
63 |
+
{text_input}
|
64 |
+
---
|
65 |
|
66 |
+
**Analysis:**
|
67 |
+
"""
|
68 |
|
69 |
+
IMAGE_ANALYSIS_PROMPT_TEMPLATE = """
|
70 |
+
**Medical Image Analysis Request:**
|
|
|
|
|
71 |
|
72 |
+
**Context:** Analyze the provided medical image. User may provide additional context or questions.
|
73 |
+
**Task:**
|
74 |
+
1. **Describe Visible Structures:** Briefly describe the main anatomical structures visible.
|
75 |
+
2. **Identify Potential Anomalies:** Point out any visible areas that *appear* abnormal or deviate from typical presentation (e.g., "potential opacity in the lower left lung field," "area of altered signal intensity," "possible asymmetry"). Use cautious, descriptive language.
|
76 |
+
3. **Correlate with User Prompt (if provided):** If the user asked a specific question, address it based *only* on the visual information.
|
77 |
+
4. **Limitations:** State that image quality, view, and lack of clinical context limit the analysis.
|
78 |
+
5. **Disclaimer:** Explicitly state this is an AI-based visual analysis, not a radiological interpretation or diagnosis, and requires review by a qualified radiologist or physician alongside clinical information.
|
79 |
|
80 |
+
**User's Additional Context/Question (if any):**
|
81 |
---
|
82 |
+
{user_prompt}
|
83 |
---
|
84 |
|
85 |
+
**Image Analysis:**
|
86 |
"""
|
87 |
|
88 |
+
def analyze_medical_text(text_input: str) -> Tuple[Optional[str], Optional[str]]:
|
89 |
"""
|
90 |
+
Sends medical text to the Gemini model for analysis using a structured prompt.
|
91 |
|
92 |
Args:
|
93 |
+
text_input: The medical text provided by the user.
|
94 |
|
95 |
Returns:
|
96 |
A tuple containing:
|
97 |
+
- The analysis text (str) if successful, None otherwise.
|
98 |
- An error message (str) if an error occurred, None otherwise.
|
99 |
"""
|
100 |
+
if not text_input:
|
101 |
return None, "Input text cannot be empty."
|
102 |
try:
|
103 |
+
prompt = TEXT_ANALYSIS_PROMPT_TEMPLATE.format(text_input=text_input)
|
|
|
|
|
|
|
104 |
response = model.generate_content(prompt)
|
105 |
+
# Add safety check for response structure if needed (e.g., check for blocked content)
|
106 |
+
if response.parts:
|
107 |
+
return response.text, None
|
108 |
+
elif response.prompt_feedback.block_reason:
|
109 |
+
return None, f"Analysis blocked due to: {response.prompt_feedback.block_reason.name}. Please revise input."
|
110 |
+
else:
|
111 |
+
return None, "Received an empty or unexpected response from the AI."
|
112 |
+
|
113 |
+
except Exception as e:
|
114 |
+
st.error(f"An error occurred during text analysis: {e}") # Log for debugging
|
115 |
+
return None, f"Error communicating with the AI model. Details: {e}"
|
116 |
+
|
117 |
+
def analyze_medical_image(image_file: Any, user_prompt: str = "") -> Tuple[Optional[str], Optional[str]]:
|
118 |
+
"""
|
119 |
+
Sends a medical image (and optional prompt) to the Gemini Vision model for analysis.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
image_file: The uploaded image file object from Streamlit.
|
123 |
+
user_prompt: Optional text context or specific questions from the user.
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
A tuple containing:
|
127 |
+
- The analysis text (str) if successful, None otherwise.
|
128 |
+
- An error message (str) if an error occurred, None otherwise.
|
129 |
+
"""
|
130 |
+
if not image_file:
|
131 |
+
return None, "Image file cannot be empty."
|
132 |
+
try:
|
133 |
+
# Ensure image is opened correctly, handle potential errors
|
134 |
+
try:
|
135 |
+
image = Image.open(image_file)
|
136 |
+
# Optional: Convert image to a supported format if needed, e.g., RGB
|
137 |
+
if image.mode != 'RGB':
|
138 |
+
image = image.convert('RGB')
|
139 |
+
except Exception as img_e:
|
140 |
+
return None, f"Error opening or processing image file: {img_e}"
|
141 |
+
|
142 |
+
prompt_text = IMAGE_ANALYSIS_PROMPT_TEMPLATE.format(user_prompt=user_prompt if user_prompt else "N/A")
|
143 |
+
|
144 |
+
# Prepare content for the vision model
|
145 |
+
model_input = [prompt_text, image]
|
146 |
+
|
147 |
+
response = vision_model.generate_content(model_input)
|
148 |
|
149 |
+
# Add safety check for response structure
|
150 |
if response.parts:
|
151 |
return response.text, None
|
152 |
elif response.prompt_feedback.block_reason:
|
153 |
+
return None, f"Analysis blocked due to: {response.prompt_feedback.block_reason.name}. This might be due to sensitive content policies."
|
154 |
else:
|
155 |
+
return None, "Received an empty or unexpected response from the AI."
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
except Exception as e:
|
158 |
+
st.error(f"An error occurred during image analysis: {e}") # Log for debugging
|
159 |
+
return None, f"Error communicating with the AI model. Details: {e}"
|
|
|
160 |
|
161 |
|
162 |
# --- Streamlit User Interface ---
|
163 |
|
164 |
def main():
|
165 |
+
st.set_page_config(page_title="AI Medical Information Assistant", layout="wide")
|
|
|
|
|
|
|
|
|
166 |
|
167 |
+
st.title("🤖 AI Medical Information Assistant")
|
168 |
+
st.caption(f"Powered by Google Gemini ({TEXT_MODEL_NAME} / {VISION_MODEL_NAME})")
|
|
|
|
|
169 |
|
170 |
# --- CRITICAL DISCLAIMER ---
|
171 |
st.warning(
|
172 |
"""
|
173 |
+
**IMPORTANT DISCLAIMER:**
|
174 |
+
* This tool uses AI to analyze information but **DOES NOT PROVIDE MEDICAL ADVICE OR DIAGNOSIS.**
|
175 |
+
* The analysis is based solely on the input provided and may be incomplete, inaccurate, or lack clinical context.
|
176 |
+
* **ALWAYS consult a qualified healthcare professional** for any health concerns, diagnosis, or treatment decisions.
|
177 |
+
* Do not rely on this tool for medical decisions. It is for informational and educational purposes only, potentially assisting clinicians by highlighting areas for review.
|
178 |
+
* **Do not upload identifiable patient information** unless you have explicit consent and comply with all privacy regulations (e.g., HIPAA).
|
179 |
+
"""
|
|
|
180 |
)
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
+
st.sidebar.header("Input Options")
|
183 |
+
input_method = st.sidebar.radio(
|
184 |
+
"Select Input Type:",
|
185 |
+
("Text Description", "Medical Image"),
|
186 |
+
help="Choose whether to analyze text-based medical information or a medical image."
|
187 |
)
|
188 |
|
189 |
+
st.sidebar.markdown("---") # Visual separator
|
190 |
+
|
191 |
+
col1, col2 = st.columns(2)
|
192 |
+
|
193 |
+
with col1:
|
194 |
+
st.header("Input")
|
195 |
+
if input_method == "Text Description":
|
196 |
+
st.subheader("Enter Medical Text")
|
197 |
+
text_input = st.text_area(
|
198 |
+
"Paste symptoms, patient history excerpt, or non-identifiable report sections:",
|
199 |
+
height=300,
|
200 |
+
placeholder="Example: 55-year-old male presents with intermittent chest pain, worse on exertion. History of hypertension. Non-smoker...",
|
201 |
+
key="text_input_area" # Add key for potential state management
|
202 |
+
)
|
203 |
+
analyze_button = st.button("Analyze Text", key="analyze_text_button", type="primary")
|
204 |
+
|
205 |
+
if analyze_button and text_input:
|
206 |
+
with st.spinner("🧠 Analyzing Text... Please wait."):
|
207 |
+
analysis_result, error_message = analyze_medical_text(text_input)
|
208 |
+
|
209 |
+
if error_message:
|
210 |
+
with col2:
|
211 |
+
st.error(f"Analysis Failed: {error_message}")
|
212 |
+
elif analysis_result:
|
213 |
+
with col2:
|
214 |
+
st.header("Analysis Results")
|
215 |
+
st.markdown(analysis_result) # Use markdown for better formatting
|
216 |
+
else:
|
217 |
+
# Handle unexpected case where both are None
|
218 |
+
with col2:
|
219 |
+
st.error("An unknown error occurred during analysis.")
|
220 |
+
|
221 |
+
elif analyze_button and not text_input:
|
222 |
+
st.warning("Please enter some text to analyze.")
|
223 |
+
|
224 |
+
|
225 |
+
elif input_method == "Medical Image":
|
226 |
+
st.subheader("Upload Medical Image")
|
227 |
+
image_file = st.file_uploader(
|
228 |
+
"Upload an image (e.g., X-ray, CT slice, pathology slide). Supported: PNG, JPG, JPEG.",
|
229 |
+
type=["png", "jpg", "jpeg"],
|
230 |
+
key="image_uploader"
|
231 |
+
)
|
232 |
+
user_image_prompt = st.text_input(
|
233 |
+
"Optional: Add context or specific questions for the image analysis:",
|
234 |
+
placeholder="Example: 'Check for abnormalities in the left lung apex' or 'Patient context: Follow-up scan after treatment'",
|
235 |
+
key="image_prompt_input"
|
236 |
+
)
|
237 |
+
analyze_button = st.button("Analyze Image", key="analyze_image_button", type="primary")
|
238 |
+
|
239 |
+
if analyze_button and image_file:
|
240 |
+
# Display uploaded image immediately for confirmation
|
241 |
+
st.image(image_file, caption="Uploaded Image Preview", use_column_width=True)
|
242 |
+
with st.spinner("🖼️ Analyzing Image... Please wait."):
|
243 |
+
analysis_result, error_message = analyze_medical_image(image_file, user_image_prompt)
|
244 |
+
|
245 |
+
if error_message:
|
246 |
+
with col2:
|
247 |
+
st.error(f"Analysis Failed: {error_message}")
|
248 |
+
elif analysis_result:
|
249 |
+
with col2:
|
250 |
+
st.header("Image Analysis Results")
|
251 |
+
st.markdown(analysis_result)
|
252 |
+
else:
|
253 |
+
with col2:
|
254 |
+
st.error("An unknown error occurred during analysis.")
|
255 |
+
|
256 |
+
elif analyze_button and not image_file:
|
257 |
+
st.warning("Please upload an image file to analyze.")
|
258 |
+
|
259 |
+
|
260 |
+
# Display placeholder in result column if nothing submitted yet
|
261 |
+
# Check if 'analysis_result' or 'error_message' exists in session state if needed for persistence,
|
262 |
+
# but for this simpler flow, checking button presses is often sufficient.
|
263 |
+
# A simple way is to check if the button was pressed in the current run.
|
264 |
+
button_pressed = st.session_state.get('analyze_text_button', False) or st.session_state.get('analyze_image_button', False)
|
265 |
+
if not button_pressed:
|
266 |
+
with col2:
|
267 |
+
st.info("Analysis results will appear here once input is submitted.")
|
268 |
+
|
269 |
+
|
270 |
+
st.sidebar.markdown("---")
|
271 |
+
st.sidebar.info("Remember: This AI tool is an assistant, not a substitute for professional medical expertise.")
|
272 |
|
273 |
if __name__ == "__main__":
|
274 |
main()
|