Update app.py
Browse files
app.py
CHANGED
@@ -1,274 +1,207 @@
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
-
from PIL import Image
|
5 |
-
import io
|
6 |
from typing import Optional, Tuple, Any # For type hinting
|
7 |
|
8 |
# --- Configuration and Initialization ---
|
9 |
|
10 |
-
# Securely load API key
|
11 |
-
# Prioritize Streamlit secrets, fall back to environment variable for flexibility
|
12 |
GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY", os.environ.get("GEMINI_API_KEY"))
|
13 |
|
14 |
-
# Configure Gemini Client
|
15 |
genai_client_configured = False
|
16 |
if GEMINI_API_KEY:
|
17 |
try:
|
18 |
genai.configure(api_key=GEMINI_API_KEY)
|
19 |
genai_client_configured = True
|
20 |
except Exception as e:
|
21 |
-
st.error(f"Failed to configure Google Generative AI: {e}")
|
22 |
-
st.stop()
|
23 |
else:
|
24 |
st.error("⚠️ Gemini API Key not found. Please configure `GEMINI_API_KEY` in Streamlit secrets or environment variables.")
|
25 |
-
st.stop()
|
26 |
|
27 |
-
# Initialize models (
|
28 |
-
# Using
|
29 |
-
|
30 |
-
VISION_MODEL_NAME = 'gemini-1.5-flash' # Or 'gemini-pro-vision' if preferred
|
31 |
|
32 |
if genai_client_configured:
|
33 |
try:
|
34 |
-
model = genai.GenerativeModel(
|
35 |
-
vision_model = genai.GenerativeModel(
|
36 |
except Exception as e:
|
37 |
-
st.error(f"Failed to initialize Gemini
|
38 |
st.stop()
|
39 |
else:
|
40 |
-
|
41 |
-
# but it's good defensive programming.
|
42 |
-
st.error("AI Models could not be initialized due to configuration issues.")
|
43 |
st.stop()
|
44 |
|
45 |
|
46 |
-
# --- Core AI
|
47 |
|
48 |
-
#
|
49 |
-
#
|
50 |
-
|
51 |
-
**
|
52 |
|
53 |
-
**
|
54 |
-
**Task:**
|
55 |
-
1. **Identify Key Findings:** Extract significant symptoms, signs, or data points.
|
56 |
-
2. **Potential Considerations:** Based *only* on the provided text, list potential underlying conditions or areas of concern that *might* warrant further investigation by a qualified healthcare professional. Use cautious language (e.g., "suggests potential for," "could be consistent with," "warrants investigation into").
|
57 |
-
3. **Risk Factors (if applicable):** Mention any potential risk factors identifiable from the text.
|
58 |
-
4. **Information Gaps:** Highlight any missing information that would be crucial for a clinical assessment.
|
59 |
-
5. **Disclaimer:** Explicitly state that this analysis is AI-generated, not a diagnosis, and cannot replace professional medical evaluation.
|
60 |
|
61 |
-
**Input
|
62 |
-
---
|
63 |
-
{text_input}
|
64 |
-
---
|
65 |
|
66 |
-
**
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
|
71 |
|
72 |
-
**
|
73 |
-
**
|
74 |
-
|
75 |
-
|
76 |
-
3. **Correlate with User Prompt (if provided):** If the user asked a specific question, address it based *only* on the visual information.
|
77 |
-
4. **Limitations:** State that image quality, view, and lack of clinical context limit the analysis.
|
78 |
-
5. **Disclaimer:** Explicitly state this is an AI-based visual analysis, not a radiological interpretation or diagnosis, and requires review by a qualified radiologist or physician alongside clinical information.
|
79 |
|
80 |
-
**
|
|
|
|
|
81 |
---
|
82 |
-
{
|
83 |
---
|
84 |
|
85 |
-
**
|
86 |
"""
|
87 |
|
88 |
-
def
|
89 |
"""
|
90 |
-
|
91 |
|
92 |
Args:
|
93 |
-
text_input: The
|
94 |
|
95 |
Returns:
|
96 |
A tuple containing:
|
97 |
-
- The analysis text (str) if successful, None otherwise.
|
98 |
- An error message (str) if an error occurred, None otherwise.
|
99 |
"""
|
100 |
-
if not text_input:
|
101 |
return None, "Input text cannot be empty."
|
102 |
try:
|
103 |
-
prompt =
|
|
|
|
|
|
|
104 |
response = model.generate_content(prompt)
|
105 |
-
# Add safety check for response structure if needed (e.g., check for blocked content)
|
106 |
-
if response.parts:
|
107 |
-
return response.text, None
|
108 |
-
elif response.prompt_feedback.block_reason:
|
109 |
-
return None, f"Analysis blocked due to: {response.prompt_feedback.block_reason.name}. Please revise input."
|
110 |
-
else:
|
111 |
-
return None, "Received an empty or unexpected response from the AI."
|
112 |
-
|
113 |
-
except Exception as e:
|
114 |
-
st.error(f"An error occurred during text analysis: {e}") # Log for debugging
|
115 |
-
return None, f"Error communicating with the AI model. Details: {e}"
|
116 |
-
|
117 |
-
def analyze_medical_image(image_file: Any, user_prompt: str = "") -> Tuple[Optional[str], Optional[str]]:
|
118 |
-
"""
|
119 |
-
Sends a medical image (and optional prompt) to the Gemini Vision model for analysis.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
image_file: The uploaded image file object from Streamlit.
|
123 |
-
user_prompt: Optional text context or specific questions from the user.
|
124 |
-
|
125 |
-
Returns:
|
126 |
-
A tuple containing:
|
127 |
-
- The analysis text (str) if successful, None otherwise.
|
128 |
-
- An error message (str) if an error occurred, None otherwise.
|
129 |
-
"""
|
130 |
-
if not image_file:
|
131 |
-
return None, "Image file cannot be empty."
|
132 |
-
try:
|
133 |
-
# Ensure image is opened correctly, handle potential errors
|
134 |
-
try:
|
135 |
-
image = Image.open(image_file)
|
136 |
-
# Optional: Convert image to a supported format if needed, e.g., RGB
|
137 |
-
if image.mode != 'RGB':
|
138 |
-
image = image.convert('RGB')
|
139 |
-
except Exception as img_e:
|
140 |
-
return None, f"Error opening or processing image file: {img_e}"
|
141 |
-
|
142 |
-
prompt_text = IMAGE_ANALYSIS_PROMPT_TEMPLATE.format(user_prompt=user_prompt if user_prompt else "N/A")
|
143 |
-
|
144 |
-
# Prepare content for the vision model
|
145 |
-
model_input = [prompt_text, image]
|
146 |
-
|
147 |
-
response = vision_model.generate_content(model_input)
|
148 |
|
149 |
-
#
|
150 |
if response.parts:
|
151 |
return response.text, None
|
152 |
elif response.prompt_feedback.block_reason:
|
153 |
-
|
154 |
else:
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
except Exception as e:
|
158 |
-
st.error(f"
|
159 |
-
|
|
|
160 |
|
161 |
|
162 |
# --- Streamlit User Interface ---
|
163 |
|
164 |
def main():
|
165 |
-
st.set_page_config(
|
|
|
|
|
|
|
|
|
166 |
|
167 |
-
|
168 |
-
st.
|
|
|
|
|
169 |
|
170 |
# --- CRITICAL DISCLAIMER ---
|
171 |
st.warning(
|
172 |
"""
|
173 |
-
|
174 |
-
* This tool
|
175 |
-
*
|
176 |
-
* **
|
177 |
-
*
|
178 |
-
* **Do
|
179 |
-
"""
|
|
|
180 |
)
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
-
st.
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
)
|
188 |
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
)
|
232 |
-
user_image_prompt = st.text_input(
|
233 |
-
"Optional: Add context or specific questions for the image analysis:",
|
234 |
-
placeholder="Example: 'Check for abnormalities in the left lung apex' or 'Patient context: Follow-up scan after treatment'",
|
235 |
-
key="image_prompt_input"
|
236 |
-
)
|
237 |
-
analyze_button = st.button("Analyze Image", key="analyze_image_button", type="primary")
|
238 |
-
|
239 |
-
if analyze_button and image_file:
|
240 |
-
# Display uploaded image immediately for confirmation
|
241 |
-
st.image(image_file, caption="Uploaded Image Preview", use_column_width=True)
|
242 |
-
with st.spinner("🖼️ Analyzing Image... Please wait."):
|
243 |
-
analysis_result, error_message = analyze_medical_image(image_file, user_image_prompt)
|
244 |
-
|
245 |
-
if error_message:
|
246 |
-
with col2:
|
247 |
-
st.error(f"Analysis Failed: {error_message}")
|
248 |
-
elif analysis_result:
|
249 |
-
with col2:
|
250 |
-
st.header("Image Analysis Results")
|
251 |
-
st.markdown(analysis_result)
|
252 |
-
else:
|
253 |
-
with col2:
|
254 |
-
st.error("An unknown error occurred during analysis.")
|
255 |
-
|
256 |
-
elif analyze_button and not image_file:
|
257 |
-
st.warning("Please upload an image file to analyze.")
|
258 |
-
|
259 |
-
|
260 |
-
# Display placeholder in result column if nothing submitted yet
|
261 |
-
# Check if 'analysis_result' or 'error_message' exists in session state if needed for persistence,
|
262 |
-
# but for this simpler flow, checking button presses is often sufficient.
|
263 |
-
# A simple way is to check if the button was pressed in the current run.
|
264 |
-
button_pressed = st.session_state.get('analyze_text_button', False) or st.session_state.get('analyze_image_button', False)
|
265 |
-
if not button_pressed:
|
266 |
-
with col2:
|
267 |
-
st.info("Analysis results will appear here once input is submitted.")
|
268 |
-
|
269 |
-
|
270 |
-
st.sidebar.markdown("---")
|
271 |
-
st.sidebar.info("Remember: This AI tool is an assistant, not a substitute for professional medical expertise.")
|
272 |
|
273 |
if __name__ == "__main__":
|
274 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
+
# from PIL import Image # Removing image focus for this agentic text example
|
5 |
+
import io
|
6 |
from typing import Optional, Tuple, Any # For type hinting
|
7 |
|
8 |
# --- Configuration and Initialization ---
|
9 |
|
10 |
+
# Securely load API key (Prioritize Secrets, then Env Vars)
|
|
|
11 |
GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY", os.environ.get("GEMINI_API_KEY"))
|
12 |
|
13 |
+
# Configure Gemini Client
|
14 |
genai_client_configured = False
|
15 |
if GEMINI_API_KEY:
|
16 |
try:
|
17 |
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
genai_client_configured = True
|
19 |
except Exception as e:
|
20 |
+
st.error(f"Fatal Error: Failed to configure Google Generative AI. Check API Key and permissions. Details: {e}")
|
21 |
+
st.stop()
|
22 |
else:
|
23 |
st.error("⚠️ Gemini API Key not found. Please configure `GEMINI_API_KEY` in Streamlit secrets or environment variables.")
|
24 |
+
st.stop()
|
25 |
|
26 |
+
# Initialize models (Using specific models for consistency)
|
27 |
+
# Using a potentially more powerful model like 1.5 Pro might be beneficial for complex reasoning
|
28 |
+
MODEL_NAME = 'gemini-1.5-pro-latest' # Or 'gemini-1.5-flash' if speed is critical
|
|
|
29 |
|
30 |
if genai_client_configured:
|
31 |
try:
|
32 |
+
model = genai.GenerativeModel(MODEL_NAME)
|
33 |
+
# vision_model = genai.GenerativeModel('gemini-pro-vision') # Keep commented unless adding image capability back
|
34 |
except Exception as e:
|
35 |
+
st.error(f"Fatal Error: Failed to initialize Gemini model ({MODEL_NAME}). Details: {e}")
|
36 |
st.stop()
|
37 |
else:
|
38 |
+
st.error("AI Model could not be initialized due to configuration issues.")
|
|
|
|
|
39 |
st.stop()
|
40 |
|
41 |
|
42 |
+
# --- Core Agentic AI Simulation Function ---
|
43 |
|
44 |
+
# This prompt instructs the LLM to simulate a structured, agent-like reasoning process
|
45 |
+
# focused on differential diagnosis support.
|
46 |
+
AGENTIC_ANALYSIS_PROMPT_TEMPLATE = """
|
47 |
+
**Simulated Clinical Reasoning Agent Task:**
|
48 |
|
49 |
+
**Role:** You are an AI assistant simulating an agentic clinical reasoning process to support a healthcare professional. Your goal is NOT to diagnose, but to structure information, generate possibilities, and suggest logical next steps based *strictly* on the provided information.
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
**Input Data:** You will receive unstructured clinical information (e.g., symptoms, history, basic findings). Assume this is the *only* information available unless stated otherwise.
|
|
|
|
|
|
|
52 |
|
53 |
+
**Simulated Agentic Steps (Perform these sequentially in your response):**
|
54 |
+
|
55 |
+
1. **Information Extraction & Structuring:**
|
56 |
+
* Identify and list the key patient demographics (age, sex, if provided).
|
57 |
+
* List the primary symptoms and signs presented.
|
58 |
+
* Summarize relevant medical history points.
|
59 |
+
* Note any explicitly mentioned negative findings (pertinent negatives).
|
60 |
+
|
61 |
+
2. **Differential Considerations Generation:**
|
62 |
+
* Based *only* on the structured information from Step 1, generate a list of **potential differential considerations** (possible conditions that *could* explain the findings).
|
63 |
+
* **Use cautious and probabilistic language:** "could be consistent with," "warrants consideration," "less likely but possible," "should be ruled out." **AVOID definitive statements.**
|
64 |
+
* Briefly state the primary rationale linking each consideration to the key findings.
|
65 |
|
66 |
+
3. **Information Gap Analysis:**
|
67 |
+
* Identify critical pieces of information typically needed for assessment that are missing from the input (e.g., specific lab results, imaging details, physical exam specifics, duration/onset details).
|
68 |
|
69 |
+
4. **Suggested Next Steps for Investigation (for the Clinician):**
|
70 |
+
* Propose logical next steps a **healthcare professional might consider** to narrow down the possibilities or gather missing information.
|
71 |
+
* Categorize suggestions (e.g., Further History Taking, Physical Examination Points, Laboratory Tests, Imaging Studies).
|
72 |
+
* Frame these as *suggestions* for the clinician's judgment (e.g., "Consider ordering...", "Assessment of X may be informative", "Further questioning about Y could clarify...").
|
|
|
|
|
|
|
73 |
|
74 |
+
5. **Mandatory Disclaimer:** Conclude with: "This AI-generated analysis is for informational support only. It is **NOT** a diagnosis and cannot replace the judgment of a qualified healthcare professional who must consider the full clinical context, conduct necessary examinations, and interpret investigations."
|
75 |
+
|
76 |
+
**Input Clinical Information:**
|
77 |
---
|
78 |
+
{text_input}
|
79 |
---
|
80 |
|
81 |
+
**Agentic Analysis:**
|
82 |
"""
|
83 |
|
84 |
+
def run_agentic_reasoning(text_input: str) -> Tuple[Optional[str], Optional[str]]:
|
85 |
"""
|
86 |
+
Simulates an agentic reasoning process on clinical text using the Gemini model.
|
87 |
|
88 |
Args:
|
89 |
+
text_input: The clinical information provided by the user.
|
90 |
|
91 |
Returns:
|
92 |
A tuple containing:
|
93 |
+
- The structured analysis text (str) if successful, None otherwise.
|
94 |
- An error message (str) if an error occurred, None otherwise.
|
95 |
"""
|
96 |
+
if not text_input or not text_input.strip():
|
97 |
return None, "Input text cannot be empty."
|
98 |
try:
|
99 |
+
prompt = AGENTIC_ANALYSIS_PROMPT_TEMPLATE.format(text_input=text_input)
|
100 |
+
# Consider adding safety settings if dealing with potentially sensitive outputs
|
101 |
+
# safety_settings = [...]
|
102 |
+
# response = model.generate_content(prompt, safety_settings=safety_settings)
|
103 |
response = model.generate_content(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
+
# Check for blocked content or other issues
|
106 |
if response.parts:
|
107 |
return response.text, None
|
108 |
elif response.prompt_feedback.block_reason:
|
109 |
+
return None, f"Analysis blocked by safety filters: {response.prompt_feedback.block_reason.name}. Please review input for potentially harmful content or adjust safety settings if appropriate."
|
110 |
else:
|
111 |
+
# Handle cases where the response might be empty but not explicitly blocked
|
112 |
+
candidate = response.candidates[0] if response.candidates else None
|
113 |
+
if candidate and candidate.finish_reason != "STOP":
|
114 |
+
return None, f"Analysis stopped prematurely. Reason: {candidate.finish_reason.name}. The input might be too complex or ambiguous."
|
115 |
+
else:
|
116 |
+
return None, "Received an empty or unexpected response from the AI model. The model may not have been able to process the request."
|
117 |
|
118 |
except Exception as e:
|
119 |
+
st.error(f"Critical Error during AI Analysis: {e}", icon="🚨") # Log for debugging
|
120 |
+
# Provide a user-friendly error message
|
121 |
+
return None, f"An error occurred while communicating with the AI model. Please try again later or check the input. Details: {e}"
|
122 |
|
123 |
|
124 |
# --- Streamlit User Interface ---
|
125 |
|
126 |
def main():
|
127 |
+
st.set_page_config(
|
128 |
+
page_title="Agentic AI Clinical Reasoning Support",
|
129 |
+
layout="wide",
|
130 |
+
initial_sidebar_state="expanded"
|
131 |
+
)
|
132 |
|
133 |
+
# --- Header ---
|
134 |
+
st.title("🤖 Agentic AI: Clinical Reasoning Support Tool")
|
135 |
+
st.caption(f"Powered by Google Gemini ({MODEL_NAME})")
|
136 |
+
st.markdown("---")
|
137 |
|
138 |
# --- CRITICAL DISCLAIMER ---
|
139 |
st.warning(
|
140 |
"""
|
141 |
+
**🔴 EXTREMELY IMPORTANT DISCLAIMER 🔴**
|
142 |
+
* This tool **SIMULATES** an AI reasoning process. It **DOES NOT DIAGNOSE** diseases or provide medical advice.
|
143 |
+
* Outputs are based **SOLELY** on the text input and the AI's internal knowledge, which may be incomplete or contain inaccuracies. **It lacks real-world clinical context.**
|
144 |
+
* **NEVER** use this tool for actual patient diagnosis, treatment decisions, or clinical management. It is intended for **educational and conceptual purposes ONLY**, potentially aiding clinicians in organizing thoughts or exploring possibilities.
|
145 |
+
* **ALWAYS rely on the expertise and judgment of qualified healthcare professionals.**
|
146 |
+
* **PRIVACY ALERT:** Do **NOT** enter identifiable patient information (PHI) unless you comply with all legal and ethical requirements (e.g., HIPAA, GDPR, patient consent). You are responsible for the data you input.
|
147 |
+
""",
|
148 |
+
icon="⚠️"
|
149 |
)
|
150 |
+
st.markdown("---")
|
151 |
+
|
152 |
+
# --- Input Area ---
|
153 |
+
st.header("Clinical Information Input")
|
154 |
+
st.markdown("Enter de-identified clinical information below (e.g., symptoms, brief history, key findings). The AI will attempt a structured analysis.")
|
155 |
|
156 |
+
text_input = st.text_area(
|
157 |
+
"Enter Clinical Data:",
|
158 |
+
height=300,
|
159 |
+
placeholder="Example: A 68-year-old male presents with sudden onset shortness of breath and pleuritic chest pain. History of recent long-haul flight. Vitals show tachycardia (HR 110) and mild hypoxia (SpO2 92% on room air). No significant cardiac history mentioned...",
|
160 |
+
key="clinical_text_input"
|
161 |
)
|
162 |
|
163 |
+
# --- Action Button ---
|
164 |
+
analyze_button = st.button("▶️ Run Agentic Analysis", key="analyze_button", type="primary", help="Click to start the simulated reasoning process.")
|
165 |
+
|
166 |
+
st.markdown("---") # Separator before results
|
167 |
+
|
168 |
+
# --- Output Area ---
|
169 |
+
st.header("Analysis Results")
|
170 |
+
|
171 |
+
if analyze_button:
|
172 |
+
if text_input:
|
173 |
+
with st.spinner("🧠 Simulating agentic reasoning... Please wait."):
|
174 |
+
analysis_result, error_message = run_agentic_reasoning(text_input)
|
175 |
+
|
176 |
+
if error_message:
|
177 |
+
st.error(f"Analysis Failed: {error_message}", icon="❌")
|
178 |
+
elif analysis_result:
|
179 |
+
st.markdown("**Simulated Agent Analysis Output:**")
|
180 |
+
st.markdown(analysis_result) # Use markdown for better formatting
|
181 |
+
else:
|
182 |
+
st.error("An unexpected issue occurred. No analysis was returned.", icon="❓")
|
183 |
+
else:
|
184 |
+
st.warning("Please enter clinical information in the text area above before analyzing.", icon="☝️")
|
185 |
+
else:
|
186 |
+
st.info("Analysis results will appear here after you enter information and click 'Run Agentic Analysis'.")
|
187 |
+
|
188 |
+
# --- Sidebar for Explanation/Prompt ---
|
189 |
+
st.sidebar.header("About This Tool")
|
190 |
+
st.sidebar.info(
|
191 |
+
"This application demonstrates how an AI model (Gemini) can be prompted to simulate "
|
192 |
+
"a structured, agent-like approach to analyzing clinical information. It focuses on "
|
193 |
+
"differential considerations and suggesting investigation pathways for **clinician review**."
|
194 |
+
)
|
195 |
+
with st.sidebar.expander("View the Agentic Prompt Structure"):
|
196 |
+
st.markdown(f"```plaintext\n{AGENTIC_ANALYSIS_PROMPT_TEMPLATE.split('---')[0]} ... [Input Text] ...\n```")
|
197 |
+
st.caption("The prompt guides the AI to break down the task into logical steps.")
|
198 |
+
|
199 |
+
st.sidebar.header("Ethical Considerations")
|
200 |
+
st.sidebar.error(
|
201 |
+
"**Crucial Reminder:** This AI is a tool, not a clinician. It has limitations and biases. "
|
202 |
+
"Clinical decisions require human expertise, empathy, and comprehensive patient assessment. "
|
203 |
+
"Misuse can lead to harm."
|
204 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
if __name__ == "__main__":
|
207 |
main()
|