Update app.py
Browse files
app.py
CHANGED
@@ -18,64 +18,82 @@ if GEMINI_API_KEY:
|
|
18 |
genai.configure(api_key=GEMINI_API_KEY)
|
19 |
genai_client_configured = True
|
20 |
except Exception as e:
|
21 |
-
st.error(f"Failed to configure Google Generative AI: {e}")
|
22 |
st.stop() # Stop execution if configuration fails
|
23 |
else:
|
24 |
st.error("⚠️ Gemini API Key not found. Please configure `GEMINI_API_KEY` in Streamlit secrets or environment variables.")
|
25 |
st.stop() # Stop execution if no API key
|
26 |
|
27 |
-
# Initialize models (
|
28 |
-
# Using
|
29 |
-
TEXT_MODEL_NAME = 'gemini-1.5-
|
30 |
-
VISION_MODEL_NAME = 'gemini-1.5-flash' #
|
31 |
|
32 |
if genai_client_configured:
|
33 |
try:
|
34 |
-
|
35 |
vision_model = genai.GenerativeModel(VISION_MODEL_NAME)
|
|
|
36 |
except Exception as e:
|
37 |
-
st.error(f"Failed to initialize Gemini models
|
38 |
st.stop()
|
39 |
else:
|
40 |
-
# This state should technically not be reached due to earlier st.stop() calls,
|
41 |
-
# but it's good defensive programming.
|
42 |
st.error("AI Models could not be initialized due to configuration issues.")
|
43 |
st.stop()
|
44 |
|
45 |
|
46 |
# --- Core AI Interaction Functions ---
|
47 |
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
**Medical Information Analysis Request:**
|
52 |
|
53 |
-
**
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
-
**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
---
|
63 |
{text_input}
|
64 |
---
|
65 |
|
66 |
-
**Analysis:**
|
67 |
"""
|
68 |
|
|
|
69 |
IMAGE_ANALYSIS_PROMPT_TEMPLATE = """
|
70 |
**Medical Image Analysis Request:**
|
71 |
|
72 |
**Context:** Analyze the provided medical image. User may provide additional context or questions.
|
73 |
**Task:**
|
74 |
-
1. **Describe Visible Structures:** Briefly describe
|
75 |
-
2. **Identify Potential Anomalies:** Point out
|
76 |
-
3. **Correlate with User Prompt (if provided):**
|
77 |
-
4. **Limitations:** State that image quality, view, and lack of clinical context limit
|
78 |
-
5. **Disclaimer:** Explicitly state this is
|
79 |
|
80 |
**User's Additional Context/Question (if any):**
|
81 |
---
|
@@ -85,190 +103,200 @@ IMAGE_ANALYSIS_PROMPT_TEMPLATE = """
|
|
85 |
**Image Analysis:**
|
86 |
"""
|
87 |
|
88 |
-
def
|
89 |
"""
|
90 |
-
Sends
|
91 |
|
92 |
Args:
|
93 |
-
text_input: The
|
94 |
|
95 |
Returns:
|
96 |
-
|
97 |
-
- The analysis text (str) if successful, None otherwise.
|
98 |
-
- An error message (str) if an error occurred, None otherwise.
|
99 |
"""
|
100 |
-
if not text_input:
|
101 |
return None, "Input text cannot be empty."
|
102 |
try:
|
103 |
-
prompt =
|
104 |
-
|
105 |
-
|
|
|
106 |
if response.parts:
|
107 |
return response.text, None
|
108 |
elif response.prompt_feedback.block_reason:
|
109 |
-
return None, f"Analysis blocked
|
110 |
else:
|
111 |
-
|
|
|
|
|
|
|
|
|
112 |
|
113 |
except Exception as e:
|
114 |
-
st.error(f"
|
115 |
-
return None, f"Error communicating with the AI model. Details: {e}"
|
116 |
|
117 |
def analyze_medical_image(image_file: Any, user_prompt: str = "") -> Tuple[Optional[str], Optional[str]]:
|
118 |
"""
|
119 |
Sends a medical image (and optional prompt) to the Gemini Vision model for analysis.
|
120 |
|
121 |
Args:
|
122 |
-
image_file:
|
123 |
-
user_prompt: Optional text context
|
124 |
|
125 |
Returns:
|
126 |
-
|
127 |
-
- The analysis text (str) if successful, None otherwise.
|
128 |
-
- An error message (str) if an error occurred, None otherwise.
|
129 |
"""
|
130 |
if not image_file:
|
131 |
return None, "Image file cannot be empty."
|
132 |
try:
|
133 |
-
# Ensure image is opened correctly, handle potential errors
|
134 |
try:
|
135 |
image = Image.open(image_file)
|
136 |
-
# Optional: Convert image to a supported format if needed, e.g., RGB
|
137 |
if image.mode != 'RGB':
|
138 |
image = image.convert('RGB')
|
139 |
except Exception as img_e:
|
140 |
return None, f"Error opening or processing image file: {img_e}"
|
141 |
|
142 |
prompt_text = IMAGE_ANALYSIS_PROMPT_TEMPLATE.format(user_prompt=user_prompt if user_prompt else "N/A")
|
143 |
-
|
144 |
-
# Prepare content for the vision model
|
145 |
model_input = [prompt_text, image]
|
146 |
-
|
147 |
response = vision_model.generate_content(model_input)
|
148 |
|
149 |
-
# Add safety check for response structure
|
150 |
if response.parts:
|
151 |
return response.text, None
|
152 |
elif response.prompt_feedback.block_reason:
|
153 |
-
return None, f"
|
154 |
else:
|
155 |
-
|
|
|
|
|
|
|
|
|
156 |
|
157 |
except Exception as e:
|
158 |
-
st.error(f"
|
159 |
-
return None, f"Error communicating with the AI model. Details: {e}"
|
160 |
|
161 |
|
162 |
# --- Streamlit User Interface ---
|
163 |
|
164 |
def main():
|
165 |
-
st.set_page_config(
|
|
|
|
|
|
|
|
|
166 |
|
167 |
-
st.title("🤖 AI
|
168 |
-
st.caption(f"
|
|
|
169 |
|
170 |
# --- CRITICAL DISCLAIMER ---
|
171 |
st.warning(
|
172 |
"""
|
173 |
-
|
174 |
-
* This tool
|
175 |
-
*
|
176 |
-
* **
|
177 |
-
*
|
178 |
-
* **
|
179 |
-
|
|
|
|
|
180 |
)
|
|
|
|
|
181 |
|
182 |
-
st.sidebar.header("
|
183 |
input_method = st.sidebar.radio(
|
184 |
-
"Select
|
185 |
-
("Text
|
186 |
-
|
|
|
187 |
)
|
188 |
-
|
189 |
st.sidebar.markdown("---") # Visual separator
|
190 |
|
191 |
col1, col2 = st.columns(2)
|
192 |
|
193 |
with col1:
|
194 |
-
st.header("Input")
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
197 |
text_input = st.text_area(
|
198 |
-
"Paste symptoms,
|
199 |
-
height=
|
200 |
-
placeholder="Example:
|
201 |
-
key="text_input_area"
|
202 |
)
|
203 |
-
analyze_button = st.button("
|
204 |
-
|
205 |
-
if analyze_button and text_input:
|
206 |
-
with st.spinner("🧠 Analyzing Text... Please wait."):
|
207 |
-
analysis_result, error_message = analyze_medical_text(text_input)
|
208 |
-
|
209 |
-
if error_message:
|
210 |
-
with col2:
|
211 |
-
st.error(f"Analysis Failed: {error_message}")
|
212 |
-
elif analysis_result:
|
213 |
-
with col2:
|
214 |
-
st.header("Analysis Results")
|
215 |
-
st.markdown(analysis_result) # Use markdown for better formatting
|
216 |
-
else:
|
217 |
-
# Handle unexpected case where both are None
|
218 |
-
with col2:
|
219 |
-
st.error("An unknown error occurred during analysis.")
|
220 |
-
|
221 |
-
elif analyze_button and not text_input:
|
222 |
-
st.warning("Please enter some text to analyze.")
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
-
|
226 |
-
|
|
|
227 |
image_file = st.file_uploader(
|
228 |
-
"Upload
|
229 |
type=["png", "jpg", "jpeg"],
|
230 |
key="image_uploader"
|
231 |
)
|
232 |
user_image_prompt = st.text_input(
|
233 |
-
"Optional: Add context or specific
|
234 |
-
placeholder="Example: '
|
235 |
key="image_prompt_input"
|
236 |
)
|
237 |
-
analyze_button = st.button("Analyze Image", key="analyze_image_button", type="primary")
|
238 |
-
|
239 |
-
if analyze_button
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
st.
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
# A simple way is to check if the button was pressed in the current run.
|
264 |
-
button_pressed = st.session_state.get('analyze_text_button', False) or st.session_state.get('analyze_image_button', False)
|
265 |
-
if not button_pressed:
|
266 |
-
with col2:
|
267 |
-
st.info("Analysis results will appear here once input is submitted.")
|
268 |
|
269 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
st.sidebar.markdown("---")
|
271 |
-
st.sidebar.
|
|
|
|
|
272 |
|
273 |
if __name__ == "__main__":
|
274 |
main()
|
|
|
18 |
genai.configure(api_key=GEMINI_API_KEY)
|
19 |
genai_client_configured = True
|
20 |
except Exception as e:
|
21 |
+
st.error(f"Fatal Error: Failed to configure Google Generative AI. Check API Key. Details: {e}")
|
22 |
st.stop() # Stop execution if configuration fails
|
23 |
else:
|
24 |
st.error("⚠️ Gemini API Key not found. Please configure `GEMINI_API_KEY` in Streamlit secrets or environment variables.")
|
25 |
st.stop() # Stop execution if no API key
|
26 |
|
27 |
+
# Initialize models (Consider more powerful model for agentic reasoning if needed)
|
28 |
+
# Using 1.5 Pro for text (agentic) and 1.5 Flash for vision might be a good balance
|
29 |
+
TEXT_MODEL_NAME = 'gemini-1.5-pro-latest' # Model for agentic text reasoning
|
30 |
+
VISION_MODEL_NAME = 'gemini-1.5-flash' # Model for image analysis
|
31 |
|
32 |
if genai_client_configured:
|
33 |
try:
|
34 |
+
text_model = genai.GenerativeModel(TEXT_MODEL_NAME)
|
35 |
vision_model = genai.GenerativeModel(VISION_MODEL_NAME)
|
36 |
+
st.success(f"Successfully initialized models: Text ({TEXT_MODEL_NAME}), Vision ({VISION_MODEL_NAME})", icon="✅")
|
37 |
except Exception as e:
|
38 |
+
st.error(f"Fatal Error: Failed to initialize Gemini models. Text: {TEXT_MODEL_NAME}, Vision: {VISION_MODEL_NAME}. Details: {e}")
|
39 |
st.stop()
|
40 |
else:
|
|
|
|
|
41 |
st.error("AI Models could not be initialized due to configuration issues.")
|
42 |
st.stop()
|
43 |
|
44 |
|
45 |
# --- Core AI Interaction Functions ---
|
46 |
|
47 |
+
# AGENTIC prompt for Text Analysis
|
48 |
+
AGENTIC_TEXT_ANALYSIS_PROMPT_TEMPLATE = """
|
49 |
+
**Simulated Clinical Reasoning Agent Task:**
|
|
|
50 |
|
51 |
+
**Role:** AI assistant simulating an agentic clinical reasoning process to support a healthcare professional by structuring information, generating possibilities, and suggesting investigation pathways based *strictly* on the provided text. **This is NOT a diagnosis.**
|
52 |
+
|
53 |
+
**Input Data:** Unstructured clinical information (e.g., symptoms, history, basic findings).
|
54 |
+
|
55 |
+
**Simulated Agentic Steps (Perform sequentially):**
|
56 |
+
|
57 |
+
1. **Information Extraction & Structuring:**
|
58 |
+
* Key Demographics (Age, Sex if provided).
|
59 |
+
* Primary Symptoms/Signs.
|
60 |
+
* Relevant Medical History.
|
61 |
+
* Pertinent Negatives (if mentioned).
|
62 |
+
|
63 |
+
2. **Differential Considerations Generation:**
|
64 |
+
* Based *only* on Step 1, list **potential differential considerations** (possible conditions).
|
65 |
+
* **Use cautious language:** "could be consistent with," "warrants consideration," "less likely but possible." **AVOID definitive statements.**
|
66 |
+
* Briefly justify each consideration based on findings.
|
67 |
+
|
68 |
+
3. **Information Gap Analysis:**
|
69 |
+
* Identify critical missing information (e.g., lab results, imaging, exam specifics, duration/onset).
|
70 |
|
71 |
+
4. **Suggested Next Steps for Investigation (for Clinician):**
|
72 |
+
* Propose logical next steps a **healthcare professional might consider**.
|
73 |
+
* Categorize (e.g., Further History, Exam Points, Labs, Imaging).
|
74 |
+
* Frame as *suggestions* (e.g., "Consider ordering...", "Assessment of X may be informative").
|
75 |
+
|
76 |
+
5. **Mandatory Disclaimer:** Conclude with: "This AI-generated analysis is for informational support only. It is **NOT** a diagnosis and cannot replace the judgment of a qualified healthcare professional."
|
77 |
+
|
78 |
+
**Input Clinical Information:**
|
79 |
---
|
80 |
{text_input}
|
81 |
---
|
82 |
|
83 |
+
**Agentic Analysis:**
|
84 |
"""
|
85 |
|
86 |
+
# Standard prompt for Image Analysis
|
87 |
IMAGE_ANALYSIS_PROMPT_TEMPLATE = """
|
88 |
**Medical Image Analysis Request:**
|
89 |
|
90 |
**Context:** Analyze the provided medical image. User may provide additional context or questions.
|
91 |
**Task:**
|
92 |
+
1. **Describe Visible Structures:** Briefly describe main anatomical structures.
|
93 |
+
2. **Identify Potential Anomalies:** Point out areas that *appear* abnormal or deviate from typical presentation (e.g., "potential opacity," "altered signal intensity," "possible asymmetry"). Use cautious, descriptive language.
|
94 |
+
3. **Correlate with User Prompt (if provided):** Address specific user questions based *only* on visual information.
|
95 |
+
4. **Limitations:** State that image quality, view, and lack of clinical context limit analysis.
|
96 |
+
5. **Disclaimer:** Explicitly state this is AI visual analysis, not radiological interpretation or diagnosis, requiring review by a qualified professional with clinical context.
|
97 |
|
98 |
**User's Additional Context/Question (if any):**
|
99 |
---
|
|
|
103 |
**Image Analysis:**
|
104 |
"""
|
105 |
|
106 |
+
def run_agentic_text_analysis(text_input: str) -> Tuple[Optional[str], Optional[str]]:
|
107 |
"""
|
108 |
+
Sends clinical text to the Gemini text model for simulated agentic analysis.
|
109 |
|
110 |
Args:
|
111 |
+
text_input: The clinical text provided by the user.
|
112 |
|
113 |
Returns:
|
114 |
+
Tuple: (analysis_text, error_message)
|
|
|
|
|
115 |
"""
|
116 |
+
if not text_input or not text_input.strip():
|
117 |
return None, "Input text cannot be empty."
|
118 |
try:
|
119 |
+
prompt = AGENTIC_TEXT_ANALYSIS_PROMPT_TEMPLATE.format(text_input=text_input)
|
120 |
+
# Using the designated text model
|
121 |
+
response = text_model.generate_content(prompt)
|
122 |
+
|
123 |
if response.parts:
|
124 |
return response.text, None
|
125 |
elif response.prompt_feedback.block_reason:
|
126 |
+
return None, f"Analysis blocked by safety filters: {response.prompt_feedback.block_reason.name}. Review input."
|
127 |
else:
|
128 |
+
candidate = response.candidates[0] if response.candidates else None
|
129 |
+
if candidate and candidate.finish_reason != "STOP":
|
130 |
+
return None, f"Analysis stopped prematurely. Reason: {candidate.finish_reason.name}."
|
131 |
+
else:
|
132 |
+
return None, "Received an empty or unexpected response from the AI model."
|
133 |
|
134 |
except Exception as e:
|
135 |
+
st.error(f"Error during agentic text analysis: {e}", icon="🚨")
|
136 |
+
return None, f"Error communicating with the AI model for text analysis. Details: {e}"
|
137 |
|
138 |
def analyze_medical_image(image_file: Any, user_prompt: str = "") -> Tuple[Optional[str], Optional[str]]:
|
139 |
"""
|
140 |
Sends a medical image (and optional prompt) to the Gemini Vision model for analysis.
|
141 |
|
142 |
Args:
|
143 |
+
image_file: Uploaded image file object from Streamlit.
|
144 |
+
user_prompt: Optional text context/questions from the user.
|
145 |
|
146 |
Returns:
|
147 |
+
Tuple: (analysis_text, error_message)
|
|
|
|
|
148 |
"""
|
149 |
if not image_file:
|
150 |
return None, "Image file cannot be empty."
|
151 |
try:
|
|
|
152 |
try:
|
153 |
image = Image.open(image_file)
|
|
|
154 |
if image.mode != 'RGB':
|
155 |
image = image.convert('RGB')
|
156 |
except Exception as img_e:
|
157 |
return None, f"Error opening or processing image file: {img_e}"
|
158 |
|
159 |
prompt_text = IMAGE_ANALYSIS_PROMPT_TEMPLATE.format(user_prompt=user_prompt if user_prompt else "N/A")
|
|
|
|
|
160 |
model_input = [prompt_text, image]
|
161 |
+
# Using the designated vision model
|
162 |
response = vision_model.generate_content(model_input)
|
163 |
|
|
|
164 |
if response.parts:
|
165 |
return response.text, None
|
166 |
elif response.prompt_feedback.block_reason:
|
167 |
+
return None, f"Image analysis blocked by safety filters: {response.prompt_feedback.block_reason.name}. May relate to sensitive content policies."
|
168 |
else:
|
169 |
+
candidate = response.candidates[0] if response.candidates else None
|
170 |
+
if candidate and candidate.finish_reason != "STOP":
|
171 |
+
return None, f"Image analysis stopped prematurely. Reason: {candidate.finish_reason.name}."
|
172 |
+
else:
|
173 |
+
return None, "Received an empty or unexpected response from the AI model for image analysis."
|
174 |
|
175 |
except Exception as e:
|
176 |
+
st.error(f"Error during image analysis: {e}", icon="🖼️")
|
177 |
+
return None, f"Error communicating with the AI model for image analysis. Details: {e}"
|
178 |
|
179 |
|
180 |
# --- Streamlit User Interface ---
|
181 |
|
182 |
def main():
|
183 |
+
st.set_page_config(
|
184 |
+
page_title="AI Clinical Support Demonstrator",
|
185 |
+
layout="wide",
|
186 |
+
initial_sidebar_state="expanded"
|
187 |
+
)
|
188 |
|
189 |
+
st.title("🤖 AI Clinical Support Demonstrator")
|
190 |
+
st.caption(f"Agentic Text Analysis ({TEXT_MODEL_NAME}) & Image Analysis ({VISION_MODEL_NAME})")
|
191 |
+
st.markdown("---")
|
192 |
|
193 |
# --- CRITICAL DISCLAIMER ---
|
194 |
st.warning(
|
195 |
"""
|
196 |
+
**🔴 IMPORTANT SAFETY & USE DISCLAIMER 🔴**
|
197 |
+
* This tool **DEMONSTRATES** AI capabilities. It **DOES NOT** provide medical advice or diagnosis.
|
198 |
+
* **Agentic Text Analysis:** Simulates reasoning on text input. Output is illustrative, not diagnostic.
|
199 |
+
* **Image Analysis:** Provides observations on images. Output is **NOT** a radiological interpretation.
|
200 |
+
* AI analysis lacks full clinical context, may be inaccurate, and **CANNOT** replace professional judgment.
|
201 |
+
* **ALWAYS consult qualified healthcare professionals** for diagnosis and treatment.
|
202 |
+
* **PRIVACY:** Do **NOT** upload identifiable patient information (PHI) without explicit consent and adherence to all privacy laws (e.g., HIPAA).
|
203 |
+
""",
|
204 |
+
icon="⚠️"
|
205 |
)
|
206 |
+
st.markdown("---")
|
207 |
+
|
208 |
|
209 |
+
st.sidebar.header("Analysis Options")
|
210 |
input_method = st.sidebar.radio(
|
211 |
+
"Select Analysis Type:",
|
212 |
+
("Agentic Text Analysis", "Medical Image Analysis"),
|
213 |
+
key="input_method_radio",
|
214 |
+
help="Choose 'Agentic Text Analysis' for reasoning simulation on clinical text, or 'Medical Image Analysis' for visual observations on images."
|
215 |
)
|
|
|
216 |
st.sidebar.markdown("---") # Visual separator
|
217 |
|
218 |
col1, col2 = st.columns(2)
|
219 |
|
220 |
with col1:
|
221 |
+
st.header("Input Data")
|
222 |
+
analysis_result = None # Initialize results variables
|
223 |
+
error_message = None
|
224 |
+
output_header = "Analysis Results" # Default header for the output column
|
225 |
+
|
226 |
+
# --- Agentic Text Analysis Input ---
|
227 |
+
if input_method == "Agentic Text Analysis":
|
228 |
+
st.subheader("Clinical Text for Agentic Analysis")
|
229 |
text_input = st.text_area(
|
230 |
+
"Paste de-identified clinical information (symptoms, history, findings):",
|
231 |
+
height=350, # Slightly larger text area
|
232 |
+
placeholder="Example: 68yo male, sudden SOB & pleuritic chest pain post-flight. HR 110, SpO2 92% RA. No known cardiac hx...",
|
233 |
+
key="text_input_area"
|
234 |
)
|
235 |
+
analyze_button = st.button("▶️ Run Agentic Text Analysis", key="analyze_text_button", type="primary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
+
if analyze_button:
|
238 |
+
if text_input:
|
239 |
+
with st.spinner("🧠 Simulating agentic reasoning..."):
|
240 |
+
analysis_result, error_message = run_agentic_text_analysis(text_input)
|
241 |
+
output_header = "Simulated Agentic Analysis Output"
|
242 |
+
else:
|
243 |
+
st.warning("Please enter clinical text to analyze.", icon="☝️")
|
244 |
|
245 |
+
# --- Medical Image Analysis Input ---
|
246 |
+
elif input_method == "Medical Image Analysis":
|
247 |
+
st.subheader("Medical Image for Analysis")
|
248 |
image_file = st.file_uploader(
|
249 |
+
"Upload a de-identified medical image (e.g., X-ray, CT slice). Supported: PNG, JPG, JPEG.",
|
250 |
type=["png", "jpg", "jpeg"],
|
251 |
key="image_uploader"
|
252 |
)
|
253 |
user_image_prompt = st.text_input(
|
254 |
+
"Optional: Add context or specific question for image analysis:",
|
255 |
+
placeholder="Example: 'Describe findings in the lung fields' or 'Any visible fractures?'",
|
256 |
key="image_prompt_input"
|
257 |
)
|
258 |
+
analyze_button = st.button("🖼️ Analyze Medical Image", key="analyze_image_button", type="primary")
|
259 |
+
|
260 |
+
if analyze_button:
|
261 |
+
if image_file:
|
262 |
+
st.image(image_file, caption="Uploaded Image Preview", use_column_width=True)
|
263 |
+
with st.spinner("👁️ Analyzing image..."):
|
264 |
+
analysis_result, error_message = analyze_medical_image(image_file, user_image_prompt)
|
265 |
+
output_header = "Medical Image Analysis Output"
|
266 |
+
else:
|
267 |
+
st.warning("Please upload an image file to analyze.", icon="☝️")
|
268 |
+
|
269 |
+
# --- Output Column ---
|
270 |
+
with col2:
|
271 |
+
st.header(output_header)
|
272 |
+
# Check if a button was pressed in this run (using session state keys is more robust for complex apps,
|
273 |
+
# but checking the result variables works here as they are reset on each run unless persisted).
|
274 |
+
button_pressed = st.session_state.get('analyze_text_button', False) or st.session_state.get('analyze_image_button', False)
|
275 |
+
|
276 |
+
if button_pressed: # Only show results if a button was pressed in this run cycle
|
277 |
+
if error_message:
|
278 |
+
st.error(f"Analysis Failed: {error_message}", icon="❌")
|
279 |
+
elif analysis_result:
|
280 |
+
st.markdown(analysis_result) # Display the successful result
|
281 |
+
# Removed the "unknown error" case here, as the functions should return either result or error message
|
282 |
+
else:
|
283 |
+
st.info("Analysis results will appear here after providing input and clicking the corresponding analysis button.")
|
|
|
|
|
|
|
|
|
|
|
284 |
|
285 |
|
286 |
+
# --- Sidebar Explanations ---
|
287 |
+
st.sidebar.markdown("---")
|
288 |
+
st.sidebar.header("About The Prompts")
|
289 |
+
with st.sidebar.expander("View Agentic Text Prompt Structure"):
|
290 |
+
st.markdown(f"```plaintext\n{AGENTIC_TEXT_ANALYSIS_PROMPT_TEMPLATE.split('---')[0]} ... [Input Text] ...\n```")
|
291 |
+
st.caption("Guides the AI through structured reasoning steps for text.")
|
292 |
+
with st.sidebar.expander("View Image Analysis Prompt Structure"):
|
293 |
+
st.markdown(f"```plaintext\n{IMAGE_ANALYSIS_PROMPT_TEMPLATE.split('---')[0]} ... [User Prompt] ...\n```")
|
294 |
+
st.caption("Guides the AI to describe visual features and potential anomalies in images.")
|
295 |
+
|
296 |
st.sidebar.markdown("---")
|
297 |
+
st.sidebar.error(
|
298 |
+
"**Ethical Use Reminder:** AI in medicine requires extreme caution. This tool is for demonstration and education, not clinical practice. Verify all information and rely on professional expertise."
|
299 |
+
)
|
300 |
|
301 |
if __name__ == "__main__":
|
302 |
main()
|