Update agent.py
Browse files
agent.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
|
2 |
-
import re
|
3 |
-
import json
|
4 |
import requests
|
5 |
-
import
|
|
|
|
|
6 |
import operator
|
|
|
7 |
from functools import lru_cache
|
8 |
-
from typing import Any, Dict, List, Optional, TypedDict, Annotated
|
9 |
|
10 |
from langchain_groq import ChatGroq
|
11 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
@@ -15,490 +15,333 @@ from langchain_core.tools import tool
|
|
15 |
from langgraph.prebuilt import ToolExecutor
|
16 |
from langgraph.graph import StateGraph, END
|
17 |
|
|
|
18 |
|
19 |
-
# ---
|
|
|
20 |
UMLS_API_KEY = os.environ.get("UMLS_API_KEY")
|
21 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
22 |
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
|
23 |
|
|
|
24 |
AGENT_MODEL_NAME = "llama3-70b-8192"
|
25 |
AGENT_TEMPERATURE = 0.1
|
26 |
MAX_SEARCH_RESULTS = 3
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
UMLS_AUTH_ENDPOINT = "https://utslogin.nlm.nih.gov/cas/v1/api-key"
|
29 |
RXNORM_API_BASE = "https://rxnav.nlm.nih.gov/REST"
|
30 |
OPENFDA_API_BASE = "https://api.fda.gov/drug/label.json"
|
31 |
|
32 |
-
|
33 |
-
class ClinicalPrompts:
|
34 |
-
SYSTEM_PROMPT = (
|
35 |
-
"""
|
36 |
-
You are SynapseAI, an expert AI clinical assistant in an interactive consultation.
|
37 |
-
Analyze patient data, provide differential diagnoses, suggest management plans,
|
38 |
-
and identify risks according to current standards of care.
|
39 |
-
|
40 |
-
1. Process information sequentially; use full conversation history.
|
41 |
-
2. Ask for clarification if data is insufficient; do not guess.
|
42 |
-
3. When ready, output a complete JSON assessment as specified.
|
43 |
-
4. Before prescribing, run drug-interaction checks and report results.
|
44 |
-
5. Flag urgent red flags immediately.
|
45 |
-
6. Use tools logically; await results when needed.
|
46 |
-
7. Query clinical guidelines via tavily_search_results and cite them.
|
47 |
-
8. Be concise, accurate, and use standard terminology.
|
48 |
-
"""
|
49 |
-
)
|
50 |
-
|
51 |
-
|
52 |
-
# --- Helper Functions ---
|
53 |
@lru_cache(maxsize=256)
|
54 |
def get_rxcui(drug_name: str) -> Optional[str]:
|
55 |
-
"""
|
56 |
-
if not drug_name:
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
return
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
# Fallback lookup
|
73 |
-
params = {"name": name}
|
74 |
-
resp = requests.get(f"{RXNORM_API_BASE}/drugs.json", params=params, timeout=10)
|
75 |
-
resp.raise_for_status()
|
76 |
-
data = resp.json()
|
77 |
-
groups = data.get("drugGroup", {}).get("conceptGroup", [])
|
78 |
-
for grp in groups:
|
79 |
-
if grp.get("tty") in ["SBD", "SCD", "GPCK", "BPCK", "IN", "MIN", "PIN"]:
|
80 |
-
props = grp.get("conceptProperties", [])
|
81 |
-
if props:
|
82 |
-
return props[0].get("rxcui")
|
83 |
-
except Exception:
|
84 |
-
traceback.print_exc()
|
85 |
-
return None
|
86 |
-
|
87 |
|
88 |
@lru_cache(maxsize=128)
|
89 |
-
def get_openfda_label(
|
90 |
-
|
91 |
-
drug_name:
|
92 |
-
|
93 |
-
""
|
94 |
-
|
95 |
-
return None
|
96 |
-
|
97 |
-
terms = []
|
98 |
-
if rxcui:
|
99 |
-
terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
|
100 |
-
if drug_name:
|
101 |
-
name = drug_name.lower()
|
102 |
-
terms.append(f'(openfda.brand_name:"{name}" OR openfda.generic_name:"{name}")')
|
103 |
-
|
104 |
-
query = " OR ".join(terms)
|
105 |
-
params = {"search": query, "limit": 1}
|
106 |
-
|
107 |
try:
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
return None
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
"""Return snippets where any term appears in texts."""
|
121 |
-
snippets = []
|
122 |
-
lowers = [t.lower() for t in terms if t]
|
123 |
-
for txt in texts or []:
|
124 |
-
if not isinstance(txt, str):
|
125 |
-
continue
|
126 |
-
low_txt = txt.lower()
|
127 |
-
for term in lowers:
|
128 |
-
idx = low_txt.find(term)
|
129 |
-
if idx >= 0:
|
130 |
-
start = max(0, idx - 50)
|
131 |
-
end = min(len(txt), idx + len(term) + 100)
|
132 |
-
snippet = txt[start:end]
|
133 |
-
snippet = re.sub(
|
134 |
-
f"({re.escape(term)})",
|
135 |
-
r"**\1**",
|
136 |
-
snippet,
|
137 |
-
count=1,
|
138 |
-
flags=re.IGNORECASE,
|
139 |
-
)
|
140 |
-
snippets.append(f"...{snippet}...")
|
141 |
-
break
|
142 |
-
return snippets
|
143 |
-
|
144 |
-
|
145 |
-
def parse_bp(bp_str: str) -> Optional[tuple[int, int]]:
|
146 |
-
"""Parse blood pressure string 'systolic/diastolic'."""
|
147 |
-
match = re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", bp_str or "")
|
148 |
-
if match:
|
149 |
-
return int(match.group(1)), int(match.group(2))
|
150 |
-
return None
|
151 |
-
|
152 |
-
|
153 |
-
def check_red_flags(patient_data: Dict) -> List[str]:
|
154 |
-
"""Identify critical red flags from patient data."""
|
155 |
flags = []
|
156 |
-
if not patient_data:
|
157 |
-
|
158 |
-
|
159 |
-
symptoms = [s.lower() for s in patient_data.get("hpi", {}).get("symptoms", [])]
|
160 |
vitals = patient_data.get("vitals", {})
|
161 |
-
history = patient_data.get("pmh", {}).get("conditions", "")
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
flags.append(
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
return "No patient data provided."
|
215 |
-
|
216 |
-
sections = []
|
217 |
-
for key, val in data.items():
|
218 |
-
title = key.replace("_", " ").title()
|
219 |
-
if isinstance(val, dict) and any(val.values()):
|
220 |
-
lines = [f"**{title}:**"]
|
221 |
-
for subk, subv in val.items():
|
222 |
-
if subv:
|
223 |
-
lines.append(f"- {subk.replace('_', ' ').title()}: {subv}")
|
224 |
-
sections.append("\n".join(lines))
|
225 |
-
elif isinstance(val, list) and val:
|
226 |
-
sections.append(f"**{title}:** {', '.join(map(str, val))}")
|
227 |
-
elif val:
|
228 |
-
sections.append(f"**{title}:** {val}")
|
229 |
-
|
230 |
-
return "\n\n".join(sections)
|
231 |
-
|
232 |
-
|
233 |
-
# --- Tool Schemas & Definitions ---
|
234 |
-
class LabOrderInput(BaseModel):
|
235 |
-
test_name: str = Field(...)
|
236 |
-
reason: str = Field(...)
|
237 |
-
priority: str = Field("Routine")
|
238 |
-
|
239 |
-
|
240 |
-
class PrescriptionInput(BaseModel):
|
241 |
-
medication_name: str = Field(...)
|
242 |
-
dosage: str = Field(...)
|
243 |
-
route: str = Field(...)
|
244 |
-
frequency: str = Field(...)
|
245 |
-
duration: str = Field("As directed")
|
246 |
-
reason: str = Field(...)
|
247 |
-
|
248 |
-
|
249 |
-
class InteractionCheckInput(BaseModel):
|
250 |
-
potential_prescription: str = Field(...)
|
251 |
-
current_medications: Optional[List[str]] = Field(None)
|
252 |
-
allergies: Optional[List[str]] = Field(None)
|
253 |
-
|
254 |
-
|
255 |
-
class FlagRiskInput(BaseModel):
|
256 |
-
risk_description: str = Field(...)
|
257 |
-
urgency: str = Field("High")
|
258 |
-
|
259 |
|
260 |
@tool("order_lab_test", args_schema=LabOrderInput)
|
261 |
def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str:
|
262 |
-
|
263 |
-
"status": "success",
|
264 |
-
"message": f"Lab Ordered: {test_name} ({priority})",
|
265 |
-
"details": f"Reason: {reason}"
|
266 |
-
}
|
267 |
-
return json.dumps(result)
|
268 |
-
|
269 |
-
|
270 |
@tool("prescribe_medication", args_schema=PrescriptionInput)
|
271 |
-
def prescribe_medication(
|
272 |
-
medication_name:
|
273 |
-
dosage: str,
|
274 |
-
route: str,
|
275 |
-
frequency: str,
|
276 |
-
duration: str,
|
277 |
-
reason: str
|
278 |
-
) -> str:
|
279 |
-
result = {
|
280 |
-
"status": "success",
|
281 |
-
"message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}",
|
282 |
-
"details": f"Duration: {duration}. Reason: {reason}"
|
283 |
-
}
|
284 |
-
return json.dumps(result)
|
285 |
-
|
286 |
-
|
287 |
@tool("check_drug_interactions", args_schema=InteractionCheckInput)
|
288 |
-
def check_drug_interactions(
|
289 |
-
|
290 |
-
current_medications
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
if
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
)
|
322 |
-
else:
|
323 |
-
warnings.append(f"INFO: Skipped interaction check for '{potential_prescription}'.")
|
324 |
-
|
325 |
-
status = "warning" if warnings else "clear"
|
326 |
-
message = (
|
327 |
-
f"Interaction/Allergy check for '{potential_prescription}': {len(warnings)} issue(s)."
|
328 |
-
if warnings else
|
329 |
-
f"No major issues for '{potential_prescription}'."
|
330 |
-
)
|
331 |
-
return json.dumps({"status": status, "message": message, "warnings": warnings})
|
332 |
-
|
333 |
-
|
334 |
@tool("flag_risk", args_schema=FlagRiskInput)
|
335 |
def flag_risk(risk_description: str, urgency: str) -> str:
|
336 |
-
|
337 |
-
|
338 |
-
"message": f"Risk '{risk_description}' flagged with {urgency} urgency."
|
339 |
-
})
|
340 |
-
|
341 |
-
|
342 |
-
# Initialize search tool and tool list
|
343 |
search_tool = TavilySearchResults(max_results=MAX_SEARCH_RESULTS, name="tavily_search_results")
|
344 |
all_tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
|
345 |
|
|
|
|
|
346 |
|
347 |
-
# --- LangGraph Setup ---
|
348 |
-
class AgentState(TypedDict):
|
349 |
-
messages: Annotated[List[Any], operator.add]
|
350 |
-
patient_data: Optional[Dict]
|
351 |
-
summary: Optional[str]
|
352 |
-
interaction_warnings: Optional[List[str]]
|
353 |
-
|
354 |
-
# LLM and executor
|
355 |
llm = ChatGroq(temperature=AGENT_TEMPERATURE, model=AGENT_MODEL_NAME)
|
356 |
model_with_tools = llm.bind_tools(all_tools)
|
357 |
tool_executor = ToolExecutor(all_tools)
|
358 |
|
359 |
-
|
360 |
-
|
361 |
-
""
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
warnings:
|
405 |
-
|
406 |
-
responses = tool_executor.batch(valid_calls, return_exceptions=True)
|
407 |
-
for call, resp in zip(valid_calls, responses):
|
408 |
-
if isinstance(resp, Exception):
|
409 |
-
traceback.print_exc()
|
410 |
-
content = json.dumps({"status": "error", "message": str(resp)})
|
411 |
-
else:
|
412 |
-
content = str(resp)
|
413 |
-
if call['name'] == 'check_drug_interactions':
|
414 |
-
data = json.loads(content)
|
415 |
-
if data.get('warnings'):
|
416 |
-
warnings.extend(data['warnings'])
|
417 |
-
results.append(ToolMessage(content=content, tool_call_id=call['id'], name=call['name']))
|
418 |
-
except Exception as e:
|
419 |
-
traceback.print_exc()
|
420 |
-
content = json.dumps({"status": "error", "message": str(e)})
|
421 |
-
for c in valid_calls:
|
422 |
-
results.append(ToolMessage(content=content, tool_call_id=c['id'], name=c['name']))
|
423 |
-
|
424 |
-
return {"messages": results, "interaction_warnings": warnings or None}
|
425 |
-
|
426 |
-
|
427 |
-
def reflection_node(state: AgentState) -> Dict:
|
428 |
-
"""Review interaction warnings and adjust plan if needed."""
|
429 |
-
warnings = state.get('interaction_warnings')
|
430 |
-
if not warnings:
|
431 |
-
return {"messages": [], "interaction_warnings": None}
|
432 |
-
|
433 |
-
# Find the AI message that triggered the warnings
|
434 |
-
trigger_id = None
|
435 |
for msg in reversed(state['messages']):
|
436 |
-
if isinstance(msg, ToolMessage) and msg.name ==
|
437 |
-
|
438 |
-
break
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
)
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
]
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
return {"messages": [AIMessage(content=resp.content)], "interaction_warnings": None}
|
452 |
-
except Exception as e:
|
453 |
-
traceback.print_exc()
|
454 |
-
return {"messages": [AIMessage(content=f"Reflection error: {e}")], "interaction_warnings": None}
|
455 |
-
|
456 |
-
|
457 |
def should_continue(state: AgentState) -> str:
|
458 |
-
|
459 |
-
if not isinstance(
|
460 |
-
|
461 |
-
if getattr(
|
462 |
-
return 'continue_tools'
|
463 |
-
return 'end_conversation_turn'
|
464 |
-
|
465 |
|
466 |
def after_tools_router(state: AgentState) -> str:
|
467 |
-
|
468 |
-
|
469 |
-
return
|
470 |
-
|
471 |
|
|
|
472 |
class ClinicalAgent:
|
473 |
def __init__(self):
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
)
|
483 |
-
|
484 |
-
'tools', after_tools_router,
|
485 |
-
{'reflect_on_warnings': 'reflection', 'continue_to_agent': 'agent'}
|
486 |
-
)
|
487 |
-
graph.add_edge('reflection', 'agent')
|
488 |
-
self.app = graph.compile()
|
489 |
|
490 |
def invoke_turn(self, state: Dict) -> Dict:
|
|
|
|
|
491 |
try:
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
return
|
496 |
except Exception as e:
|
497 |
-
traceback.print_exc()
|
498 |
-
|
499 |
-
return {
|
500 |
-
'messages': state.get('messages', []) + [err],
|
501 |
-
'patient_data': state.get('patient_data'),
|
502 |
-
'summary': state.get('summary'),
|
503 |
-
'interaction_warnings': None
|
504 |
-
}
|
|
|
1 |
+
# agent.py
|
|
|
|
|
2 |
import requests
|
3 |
+
import json
|
4 |
+
import re
|
5 |
+
import os
|
6 |
import operator
|
7 |
+
import traceback
|
8 |
from functools import lru_cache
|
|
|
9 |
|
10 |
from langchain_groq import ChatGroq
|
11 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
|
|
15 |
from langgraph.prebuilt import ToolExecutor
|
16 |
from langgraph.graph import StateGraph, END
|
17 |
|
18 |
+
from typing import Optional, List, Dict, Any, TypedDict, Annotated
|
19 |
|
20 |
+
# --- Environment Variable Loading ---
|
21 |
+
# Keys are primarily used here, but checked in app.py for UI feedback
|
22 |
UMLS_API_KEY = os.environ.get("UMLS_API_KEY")
|
23 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
24 |
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
|
25 |
|
26 |
+
# --- Configuration & Constants ---
|
27 |
AGENT_MODEL_NAME = "llama3-70b-8192"
|
28 |
AGENT_TEMPERATURE = 0.1
|
29 |
MAX_SEARCH_RESULTS = 3
|
30 |
|
31 |
+
class ClinicalPrompts:
|
32 |
+
# The comprehensive system prompt defining agent behavior
|
33 |
+
SYSTEM_PROMPT = """
|
34 |
+
You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation.
|
35 |
+
Your goal is to support healthcare professionals by analyzing patient data, providing differential diagnoses, suggesting evidence-based management plans, and identifying risks according to current standards of care.
|
36 |
+
|
37 |
+
**Core Directives for this Conversation:**
|
38 |
+
1. **Analyze Sequentially:** Process information turn-by-turn. Base your responses on the *entire* conversation history.
|
39 |
+
2. **Seek Clarity:** If the provided information is insufficient or ambiguous for a safe assessment, CLEARLY STATE what specific additional information or clarification is needed. Do NOT guess or make unsafe assumptions.
|
40 |
+
3. **Structured Assessment (When Ready):** When you have sufficient information and have performed necessary checks (like interactions, guideline searches), provide a comprehensive assessment using the following JSON structure. Output this JSON structure as the primary content of your response when you are providing the full analysis. Do NOT output incomplete JSON. If you need to ask a question or perform a tool call first, do that instead of outputting this structure.
|
41 |
+
```json
|
42 |
+
{
|
43 |
+
"assessment": "Concise summary of the patient's presentation and key findings based on the conversation.",
|
44 |
+
"differential_diagnosis": [
|
45 |
+
{"diagnosis": "Primary Diagnosis", "likelihood": "High/Medium/Low", "rationale": "Supporting evidence from conversation..."},
|
46 |
+
{"diagnosis": "Alternative Diagnosis 1", "likelihood": "Medium/Low", "rationale": "Supporting/Refuting evidence..."},
|
47 |
+
{"diagnosis": "Alternative Diagnosis 2", "likelihood": "Low", "rationale": "Why it's less likely but considered..."}
|
48 |
+
],
|
49 |
+
"risk_assessment": {
|
50 |
+
"identified_red_flags": ["List any triggered red flags based on input and analysis"],
|
51 |
+
"immediate_concerns": ["Specific urgent issues requiring attention (e.g., sepsis risk, ACS rule-out)"],
|
52 |
+
"potential_complications": ["Possible future issues based on presentation"]
|
53 |
+
},
|
54 |
+
"recommended_plan": {
|
55 |
+
"investigations": ["List specific lab tests or imaging required. Use 'order_lab_test' tool."],
|
56 |
+
"therapeutics": ["Suggest specific treatments or prescriptions. Use 'prescribe_medication' tool. MUST check interactions first using 'check_drug_interactions'."],
|
57 |
+
"consultations": ["Recommend specialist consultations if needed."],
|
58 |
+
"patient_education": ["Key points for patient communication."]
|
59 |
+
},
|
60 |
+
"rationale_summary": "Justification for assessment/plan. **Crucially, if relevant (e.g., ACS, sepsis, common infections), use 'tavily_search_results' to find and cite current clinical practice guidelines (e.g., 'latest ACC/AHA chest pain guidelines 202X', 'Surviving Sepsis Campaign guidelines') supporting your recommendations.** Include summary of guideline findings here.",
|
61 |
+
"interaction_check_summary": "Summary of findings from 'check_drug_interactions' if performed."
|
62 |
+
}
|
63 |
+
```
|
64 |
+
4. **Safety First - Interactions:** BEFORE suggesting a new prescription via `prescribe_medication`, you MUST FIRST use `check_drug_interactions` in a preceding or concurrent tool call. Report the findings from the interaction check. If significant interactions exist, modify the plan or state the contraindication clearly.
|
65 |
+
5. **Safety First - Red Flags:** Use the `flag_risk` tool IMMEDIATELY if critical red flags requiring urgent action are identified at any point in the conversation.
|
66 |
+
6. **Tool Use:** Employ tools (`order_lab_test`, `prescribe_medication`, `check_drug_interactions`, `flag_risk`, `tavily_search_results`) logically within the conversational flow. Wait for tool results before proceeding if the result is needed for the next step (e.g., wait for interaction check before confirming prescription in the structured JSON).
|
67 |
+
7. **Evidence & Guidelines:** Actively use `tavily_search_results` not just for general knowledge, but specifically to query for and incorporate **current clinical practice guidelines** relevant to the patient's presentation (e.g., chest pain, shortness of breath, suspected infection). Summarize findings in the `rationale_summary` when providing the structured output.
|
68 |
+
8. **Conciseness & Flow:** Be medically accurate and concise. Use standard terminology. Respond naturally in conversation (asking questions, acknowledging info) until ready for the full structured JSON output.
|
69 |
+
"""
|
70 |
+
|
71 |
+
# --- API Constants & Helper Functions ---
|
72 |
UMLS_AUTH_ENDPOINT = "https://utslogin.nlm.nih.gov/cas/v1/api-key"
|
73 |
RXNORM_API_BASE = "https://rxnav.nlm.nih.gov/REST"
|
74 |
OPENFDA_API_BASE = "https://api.fda.gov/drug/label.json"
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
@lru_cache(maxsize=256)
|
77 |
def get_rxcui(drug_name: str) -> Optional[str]:
|
78 |
+
"""Uses RxNorm API to find the RxCUI for a given drug name."""
|
79 |
+
if not drug_name or not isinstance(drug_name, str): return None; drug_name = drug_name.strip();
|
80 |
+
if not drug_name: return None; print(f"RxNorm Lookup for: '{drug_name}'");
|
81 |
+
try: # Try direct lookup first
|
82 |
+
params = {"name": drug_name, "search": 1}; response = requests.get(f"{RXNORM_API_BASE}/rxcui.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
|
83 |
+
if data and "idGroup" in data and "rxnormId" in data["idGroup"]: rxcui = data["idGroup"]["rxnormId"][0]; print(f" Found RxCUI: {rxcui} for '{drug_name}'"); return rxcui
|
84 |
+
else: # Fallback to /drugs search
|
85 |
+
params = {"name": drug_name}; response = requests.get(f"{RXNORM_API_BASE}/drugs.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
|
86 |
+
if data and "drugGroup" in data and "conceptGroup" in data["drugGroup"]:
|
87 |
+
for group in data["drugGroup"]["conceptGroup"]:
|
88 |
+
if group.get("tty") in ["SBD", "SCD", "GPCK", "BPCK", "IN", "MIN", "PIN"]:
|
89 |
+
if "conceptProperties" in group and group["conceptProperties"]: rxcui = group["conceptProperties"][0].get("rxcui");
|
90 |
+
if rxcui: print(f" Found RxCUI (via /drugs): {rxcui} for '{drug_name}'"); return rxcui
|
91 |
+
print(f" RxCUI not found for '{drug_name}'."); return None
|
92 |
+
except requests.exceptions.RequestException as e: print(f" Error fetching RxCUI for '{drug_name}': {e}"); return None
|
93 |
+
except json.JSONDecodeError as e: print(f" Error decoding RxNorm JSON response for '{drug_name}': {e}"); return None
|
94 |
+
except Exception as e: print(f" Unexpected error in get_rxcui for '{drug_name}': {e}"); return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
@lru_cache(maxsize=128)
|
97 |
+
def get_openfda_label(rxcui: Optional[str] = None, drug_name: Optional[str] = None) -> Optional[dict]:
|
98 |
+
"""Fetches drug label information from OpenFDA using RxCUI or drug name."""
|
99 |
+
if not rxcui and not drug_name: return None; print(f"OpenFDA Label Lookup for: RXCUI={rxcui}, Name={drug_name}"); search_terms = []
|
100 |
+
if rxcui: search_terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
|
101 |
+
if drug_name: search_terms.append(f'(openfda.brand_name:"{drug_name.lower()}" OR openfda.generic_name:"{drug_name.lower()}")')
|
102 |
+
search_query = " OR ".join(search_terms); params = {"search": search_query, "limit": 1};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
try:
|
104 |
+
response = requests.get(OPENFDA_API_BASE, params=params, timeout=15); response.raise_for_status(); data = response.json();
|
105 |
+
if data and "results" in data and data["results"]: print(f" Found OpenFDA label for query: {search_query}"); return data["results"][0]
|
106 |
+
print(f" No OpenFDA label found for query: {search_query}"); return None
|
107 |
+
except requests.exceptions.RequestException as e: print(f" Error fetching OpenFDA label: {e}"); return None
|
108 |
+
except json.JSONDecodeError as e: print(f" Error decoding OpenFDA JSON response: {e}"); return None
|
109 |
+
except Exception as e: print(f" Unexpected error in get_openfda_label: {e}"); return None
|
110 |
+
|
111 |
+
def search_text_list(text_list: Optional[List[str]], search_terms: List[str]) -> List[str]:
|
112 |
+
""" Case-insensitive search for any search_term within a list of text strings. Returns snippets. """
|
113 |
+
found_snippets = [];
|
114 |
+
if not text_list or not search_terms: return found_snippets; search_terms_lower = [str(term).lower() for term in search_terms if term];
|
115 |
+
for text_item in text_list:
|
116 |
+
if not isinstance(text_item, str): continue; text_item_lower = text_item.lower();
|
117 |
+
for term in search_terms_lower:
|
118 |
+
if term in text_item_lower:
|
119 |
+
start_index = text_item_lower.find(term); snippet_start = max(0, start_index - 50); snippet_end = min(len(text_item), start_index + len(term) + 100); snippet = text_item[snippet_start:snippet_end];
|
120 |
+
snippet = re.sub(f"({re.escape(term)})", r"**\1**", snippet, count=1, flags=re.IGNORECASE) # Highlight match
|
121 |
+
found_snippets.append(f"...{snippet}...")
|
122 |
+
break # Only report first match per text item
|
123 |
+
return found_snippets
|
124 |
+
|
125 |
+
|
126 |
+
# --- Clinical Helper Functions ---
|
127 |
+
def parse_bp(bp_string: str) -> Optional[tuple[int, int]]:
|
128 |
+
"""Parses BP string like '120/80' into (systolic, diastolic) integers."""
|
129 |
+
if not isinstance(bp_string, str): return None
|
130 |
+
match = re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", bp_string.strip())
|
131 |
+
if match: return int(match.group(1)), int(match.group(2))
|
132 |
return None
|
133 |
|
134 |
+
def check_red_flags(patient_data: dict) -> List[str]:
|
135 |
+
"""Checks patient data against predefined red flags."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
flags = []
|
137 |
+
if not patient_data: return flags
|
138 |
+
symptoms = patient_data.get("hpi", {}).get("symptoms", [])
|
|
|
|
|
139 |
vitals = patient_data.get("vitals", {})
|
140 |
+
history = patient_data.get("pmh", {}).get("conditions", "")
|
141 |
+
symptoms_lower = [str(s).lower() for s in symptoms if isinstance(s, str)]
|
142 |
+
|
143 |
+
# Symptom Flags
|
144 |
+
if "chest pain" in symptoms_lower: flags.append("Red Flag: Chest Pain reported.")
|
145 |
+
if "shortness of breath" in symptoms_lower: flags.append("Red Flag: Shortness of Breath reported.")
|
146 |
+
if "severe headache" in symptoms_lower: flags.append("Red Flag: Severe Headache reported.")
|
147 |
+
if "sudden vision loss" in symptoms_lower: flags.append("Red Flag: Sudden Vision Loss reported.")
|
148 |
+
if "weakness on one side" in symptoms_lower: flags.append("Red Flag: Unilateral Weakness reported (potential stroke).")
|
149 |
+
if "hemoptysis" in symptoms_lower: flags.append("Red Flag: Hemoptysis (coughing up blood).")
|
150 |
+
if "syncope" in symptoms_lower: flags.append("Red Flag: Syncope (fainting).")
|
151 |
+
|
152 |
+
# Vital Sign Flags
|
153 |
+
if vitals:
|
154 |
+
temp = vitals.get("temp_c"); hr = vitals.get("hr_bpm"); rr = vitals.get("rr_rpm")
|
155 |
+
spo2 = vitals.get("spo2_percent"); bp_str = vitals.get("bp_mmhg")
|
156 |
+
if temp is not None and temp >= 38.5: flags.append(f"Red Flag: Fever ({temp}°C).")
|
157 |
+
if hr is not None and hr >= 120: flags.append(f"Red Flag: Tachycardia ({hr} bpm).")
|
158 |
+
if hr is not None and hr <= 50: flags.append(f"Red Flag: Bradycardia ({hr} bpm).")
|
159 |
+
if rr is not None and rr >= 24: flags.append(f"Red Flag: Tachypnea ({rr} rpm).")
|
160 |
+
if spo2 is not None and spo2 <= 92: flags.append(f"Red Flag: Hypoxia ({spo2}%).")
|
161 |
+
if bp_str:
|
162 |
+
bp = parse_bp(bp_str)
|
163 |
+
if bp:
|
164 |
+
if bp[0] >= 180 or bp[1] >= 110: flags.append(f"Red Flag: Hypertensive Urgency/Emergency (BP: {bp_str} mmHg).")
|
165 |
+
if bp[0] <= 90 or bp[1] <= 60: flags.append(f"Red Flag: Hypotension (BP: {bp_str} mmHg).")
|
166 |
+
|
167 |
+
# History Flags
|
168 |
+
if history and isinstance(history, str):
|
169 |
+
history_lower = history.lower()
|
170 |
+
if "history of mi" in history_lower and "chest pain" in symptoms_lower: flags.append("Red Flag: History of MI with current Chest Pain.")
|
171 |
+
if "history of dvt/pe" in history_lower and "shortness of breath" in symptoms_lower: flags.append("Red Flag: History of DVT/PE with current Shortness of Breath.")
|
172 |
+
|
173 |
+
return list(set(flags)) # Unique flags
|
174 |
+
|
175 |
+
def format_patient_data_for_prompt(data: dict) -> str:
|
176 |
+
"""Formats the patient dictionary into a readable string for the LLM."""
|
177 |
+
if not data: return "No patient data provided."; prompt_str = "";
|
178 |
+
for key, value in data.items(): section_title = key.replace('_', ' ').title();
|
179 |
+
if isinstance(value, dict) and value: has_content = any(sub_value for sub_value in value.values());
|
180 |
+
if has_content: prompt_str += f"**{section_title}:**\n";
|
181 |
+
for sub_key, sub_value in value.items():
|
182 |
+
if sub_value: prompt_str += f" - {sub_key.replace('_', ' ').title()}: {sub_value}\n"
|
183 |
+
elif isinstance(value, list) and value: prompt_str += f"**{section_title}:** {', '.join(map(str, value))}\n"
|
184 |
+
elif value and not isinstance(value, dict): prompt_str += f"**{section_title}:** {value}\n";
|
185 |
+
return prompt_str.strip()
|
186 |
+
|
187 |
+
|
188 |
+
# --- Tool Definitions ---
|
189 |
+
class LabOrderInput(BaseModel): test_name: str = Field(...); reason: str = Field(...); priority: str = Field("Routine")
|
190 |
+
class PrescriptionInput(BaseModel): medication_name: str = Field(...); dosage: str = Field(...); route: str = Field(...); frequency: str = Field(...); duration: str = Field("As directed"); reason: str = Field(...)
|
191 |
+
class InteractionCheckInput(BaseModel): potential_prescription: str = Field(...); current_medications: Optional[List[str]] = Field(None); allergies: Optional[List[str]] = Field(None)
|
192 |
+
class FlagRiskInput(BaseModel): risk_description: str = Field(...); urgency: str = Field("High")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
|
194 |
@tool("order_lab_test", args_schema=LabOrderInput)
|
195 |
def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str:
|
196 |
+
print(f"Executing order_lab_test: {test_name}, Reason: {reason}, Priority: {priority}"); return json.dumps({"status": "success", "message": f"Lab Ordered: {test_name} ({priority})", "details": f"Reason: {reason}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
@tool("prescribe_medication", args_schema=PrescriptionInput)
|
198 |
+
def prescribe_medication(medication_name: str, dosage: str, route: str, frequency: str, duration: str, reason: str) -> str:
|
199 |
+
print(f"Executing prescribe_medication: {medication_name} {dosage}..."); return json.dumps({"status": "success", "message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}", "details": f"Duration: {duration}. Reason: {reason}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
@tool("check_drug_interactions", args_schema=InteractionCheckInput)
|
201 |
+
def check_drug_interactions(potential_prescription: str, current_medications: Optional[List[str]] = None, allergies: Optional[List[str]] = None) -> str:
|
202 |
+
print(f"\n--- Executing REAL check_drug_interactions ---"); print(f"Checking potential prescription: '{potential_prescription}'"); warnings = []; potential_med_lower = potential_prescription.lower().strip();
|
203 |
+
current_meds_list = current_medications or []; allergies_list = allergies or []; current_med_names_lower = [];
|
204 |
+
for med in current_meds_list: match = re.match(r"^\s*([a-zA-Z\-]+)", str(med));
|
205 |
+
if match: current_med_names_lower.append(match.group(1).lower());
|
206 |
+
allergies_lower = [str(a).lower().strip() for a in allergies_list if a]; print(f" Against Current Meds (names): {current_med_names_lower}"); print(f" Against Allergies: {allergies_lower}");
|
207 |
+
print(f" Step 1: Normalizing '{potential_prescription}'..."); potential_rxcui = get_rxcui(potential_prescription); potential_label = get_openfda_label(rxcui=potential_rxcui, drug_name=potential_prescription);
|
208 |
+
if not potential_rxcui and not potential_label: warnings.append(f"INFO: Could not reliably identify '{potential_prescription}'. Checks may be incomplete.");
|
209 |
+
print(" Step 2: Performing Allergy Check...");
|
210 |
+
for allergy in allergies_lower:
|
211 |
+
if allergy == potential_med_lower: warnings.append(f"CRITICAL ALLERGY (Name Match): Patient allergic to '{allergy}'. Potential prescription is '{potential_prescription}'.");
|
212 |
+
elif allergy in ["penicillin", "pcns"] and potential_med_lower in ["amoxicillin", "ampicillin", "augmentin", "piperacillin"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Penicillin. High risk with '{potential_prescription}'.");
|
213 |
+
elif allergy == "sulfa" and potential_med_lower in ["sulfamethoxazole", "bactrim", "sulfasalazine"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Sulfa. High risk with '{potential_prescription}'.");
|
214 |
+
elif allergy in ["nsaids", "aspirin"] and potential_med_lower in ["ibuprofen", "naproxen", "ketorolac", "diclofenac"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to NSAIDs/Aspirin. Risk with '{potential_prescription}'.");
|
215 |
+
if potential_label: contraindications = potential_label.get("contraindications"); warnings_section = potential_label.get("warnings_and_cautions") or potential_label.get("warnings");
|
216 |
+
if contraindications: allergy_mentions_ci = search_text_list(contraindications, allergies_lower);
|
217 |
+
if allergy_mentions_ci: warnings.append(f"ALLERGY RISK (Contraindication Found): Label for '{potential_prescription}' mentions contraindication potentially related to patient allergies: {'; '.join(allergy_mentions_ci)}");
|
218 |
+
if warnings_section: allergy_mentions_warn = search_text_list(warnings_section, allergies_lower);
|
219 |
+
if allergy_mentions_warn: warnings.append(f"ALLERGY RISK (Warning Found): Label for '{potential_prescription}' mentions warnings potentially related to patient allergies: {'; '.join(allergy_mentions_warn)}");
|
220 |
+
print(" Step 3: Performing Drug-Drug Interaction Check...");
|
221 |
+
if potential_rxcui or potential_label:
|
222 |
+
for current_med_name in current_med_names_lower:
|
223 |
+
if not current_med_name or current_med_name == potential_med_lower: continue; print(f" Checking interaction between '{potential_prescription}' and '{current_med_name}'..."); current_rxcui = get_rxcui(current_med_name); current_label = get_openfda_label(rxcui=current_rxcui, drug_name=current_med_name); search_terms_for_current = [current_med_name];
|
224 |
+
if current_rxcui: search_terms_for_current.append(current_rxcui); search_terms_for_potential = [potential_med_lower];
|
225 |
+
if potential_rxcui: search_terms_for_potential.append(potential_rxcui); interaction_found_flag = False;
|
226 |
+
if potential_label and potential_label.get("drug_interactions"): interaction_mentions = search_text_list(potential_label.get("drug_interactions"), search_terms_for_current);
|
227 |
+
if interaction_mentions: warnings.append(f"Potential Interaction ({potential_prescription.capitalize()} Label): Mentions '{current_med_name.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}"); interaction_found_flag = True;
|
228 |
+
if current_label and current_label.get("drug_interactions") and not interaction_found_flag: interaction_mentions = search_text_list(current_label.get("drug_interactions"), search_terms_for_potential);
|
229 |
+
if interaction_mentions: warnings.append(f"Potential Interaction ({current_med_name.capitalize()} Label): Mentions '{potential_prescription.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}");
|
230 |
+
else: warnings.append(f"INFO: Drug-drug interaction check skipped for '{potential_prescription}' as it could not be identified via RxNorm/OpenFDA.");
|
231 |
+
final_warnings = list(set(warnings)); status = "warning" if any("CRITICAL" in w or "Interaction" in w or "RISK" in w for w in final_warnings) else "clear";
|
232 |
+
if not final_warnings: status = "clear"; message = f"Interaction/Allergy check for '{potential_prescription}': {len(final_warnings)} potential issue(s) identified using RxNorm/OpenFDA." if final_warnings else f"No major interactions or allergy issues identified for '{potential_prescription}' based on RxNorm/OpenFDA lookup."; print(f"--- Interaction Check Complete ---");
|
233 |
+
return json.dumps({"status": status, "message": message, "warnings": final_warnings})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
@tool("flag_risk", args_schema=FlagRiskInput)
|
235 |
def flag_risk(risk_description: str, urgency: str) -> str:
|
236 |
+
print(f"Executing flag_risk: {risk_description}, Urgency: {urgency}"); # UI part in app.py
|
237 |
+
return json.dumps({"status": "flagged", "message": f"Risk '{risk_description}' flagged with {urgency} urgency."})
|
|
|
|
|
|
|
|
|
|
|
238 |
search_tool = TavilySearchResults(max_results=MAX_SEARCH_RESULTS, name="tavily_search_results")
|
239 |
all_tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
|
240 |
|
241 |
+
# --- LangGraph State & Nodes ---
|
242 |
+
class AgentState(TypedDict): messages: Annotated[list[Any], operator.add]; patient_data: Optional[dict]; summary: Optional[str]; interaction_warnings: Optional[List[str]]
|
243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
llm = ChatGroq(temperature=AGENT_TEMPERATURE, model=AGENT_MODEL_NAME)
|
245 |
model_with_tools = llm.bind_tools(all_tools)
|
246 |
tool_executor = ToolExecutor(all_tools)
|
247 |
|
248 |
+
def agent_node(state: AgentState):
|
249 |
+
print("\n---AGENT NODE---"); current_messages = state['messages'];
|
250 |
+
if not current_messages or not isinstance(current_messages[0], SystemMessage): print("Prepending System Prompt."); current_messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + current_messages;
|
251 |
+
print(f"Invoking LLM with {len(current_messages)} messages.");
|
252 |
+
try: response = model_with_tools.invoke(current_messages); print(f"Agent Raw Response Type: {type(response)}");
|
253 |
+
if hasattr(response, 'tool_calls') and response.tool_calls: print(f"Agent Response Tool Calls: {response.tool_calls}"); else: print("Agent Response: No tool calls.");
|
254 |
+
except Exception as e: print(f"ERROR in agent_node: {e}"); traceback.print_exc(); error_message = AIMessage(content=f"Error: {e}"); return {"messages": [error_message]};
|
255 |
+
return {"messages": [response]} # Only return messages
|
256 |
+
|
257 |
+
def tool_node(state: AgentState):
|
258 |
+
print("\n---TOOL NODE---"); tool_messages = []; last_message = state['messages'][-1]; interaction_warnings_found = [];
|
259 |
+
if not isinstance(last_message, AIMessage) or not getattr(last_message, 'tool_calls', None): print("Warning: Tool node called unexpectedly."); return {"messages": [], "interaction_warnings": None};
|
260 |
+
tool_calls = last_message.tool_calls; print(f"Tool calls received: {json.dumps(tool_calls, indent=2)}"); prescriptions_requested = {}; interaction_checks_requested = {};
|
261 |
+
for call in tool_calls: tool_name = call.get('name'); tool_args = call.get('args', {});
|
262 |
+
if tool_name == 'prescribe_medication': med_name = tool_args.get('medication_name', '').lower();
|
263 |
+
if med_name: prescriptions_requested[med_name] = call;
|
264 |
+
elif tool_name == 'check_drug_interactions': potential_med = tool_args.get('potential_prescription', '').lower();
|
265 |
+
if potential_med: interaction_checks_requested[potential_med] = call;
|
266 |
+
valid_tool_calls_for_execution = []; blocked_ids = set();
|
267 |
+
for med_name, prescribe_call in prescriptions_requested.items():
|
268 |
+
if med_name not in interaction_checks_requested: print(f"**SAFETY VIOLATION (Agent): Prescribe '{med_name}' blocked - no interaction check requested.**"); error_msg = ToolMessage(content=json.dumps({"status": "error", "message": f"Interaction check needed for '{med_name}'."}), tool_call_id=prescribe_call['id'], name=prescribe_call['name']); tool_messages.append(error_msg); blocked_ids.add(prescribe_call['id']);
|
269 |
+
valid_tool_calls_for_execution = [call for call in tool_calls if call['id'] not in blocked_ids];
|
270 |
+
patient_data = state.get("patient_data", {}); patient_meds_full = patient_data.get("medications", {}).get("current", []); patient_allergies = patient_data.get("allergies", []);
|
271 |
+
for call in valid_tool_calls_for_execution:
|
272 |
+
if call['name'] == 'check_drug_interactions':
|
273 |
+
if 'args' not in call: call['args'] = {}; call['args']['current_medications'] = patient_meds_full; call['args']['allergies'] = patient_allergies; print(f"Augmented interaction check args for call ID {call['id']}");
|
274 |
+
if valid_tool_calls_for_execution: print(f"Attempting execution: {[c['name'] for c in valid_tool_calls_for_execution]}");
|
275 |
+
try: responses = tool_executor.batch(valid_tool_calls_for_execution, return_exceptions=True);
|
276 |
+
for call, resp in zip(valid_tool_calls_for_execution, responses): tool_call_id = call['id']; tool_name = call['name'];
|
277 |
+
if isinstance(resp, Exception): error_type = type(resp).__name__; error_str = str(resp); print(f"ERROR executing tool '{tool_name}': {error_type} - {error_str}"); traceback.print_exc(); error_content = json.dumps({"status": "error", "message": f"Failed: {error_type} - {error_str}"}); tool_messages.append(ToolMessage(content=error_content, tool_call_id=tool_call_id, name=tool_name));
|
278 |
+
# ... Specific error check ...
|
279 |
+
else:
|
280 |
+
print(f"Tool '{tool_name}' executed."); content_str = str(resp); tool_messages.append(ToolMessage(content=content_str, tool_call_id=tool_call_id, name=tool_name));
|
281 |
+
if tool_name == "check_drug_interactions": # Extract warnings
|
282 |
+
try: result_data = json.loads(content_str);
|
283 |
+
if result_data.get("status") == "warning" and result_data.get("warnings"): print(f" Interaction check returned warnings: {result_data['warnings']}"); interaction_warnings_found.extend(result_data["warnings"]);
|
284 |
+
except Exception as e: print(f" Error processing interaction check result: {e}");
|
285 |
+
except Exception as e: # Outer exception handling...
|
286 |
+
print(f"CRITICAL TOOL NODE ERROR: {e}"); traceback.print_exc(); error_content = json.dumps({"status": "error", "message": f"Internal error: {e}"}); processed_ids = {msg.tool_call_id for msg in tool_messages}; [tool_messages.append(ToolMessage(content=error_content, tool_call_id=call['id'], name=call['name'])) for call in valid_tool_calls_for_execution if call['id'] not in processed_ids];
|
287 |
+
print(f"Returning {len(tool_messages)} tool messages. Warnings: {bool(interaction_warnings_found)}")
|
288 |
+
return {"messages": tool_messages, "interaction_warnings": interaction_warnings_found or None} # Return messages AND warnings
|
289 |
+
|
290 |
+
def reflection_node(state: AgentState):
|
291 |
+
print("\n---REFLECTION NODE---")
|
292 |
+
interaction_warnings = state.get("interaction_warnings")
|
293 |
+
if not interaction_warnings: print("Warning: Reflection node called without warnings."); return {"messages": [], "interaction_warnings": None};
|
294 |
+
print(f"Reviewing interaction warnings: {interaction_warnings}"); triggering_ai_message = None; relevant_tool_call_ids = set();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
for msg in reversed(state['messages']):
|
296 |
+
if isinstance(msg, ToolMessage) and msg.name == "check_drug_interactions": relevant_tool_call_ids.add(msg.tool_call_id);
|
297 |
+
if isinstance(msg, AIMessage) and msg.tool_calls:
|
298 |
+
if any(tc['id'] in relevant_tool_call_ids for tc in msg.tool_calls): triggering_ai_message = msg; break;
|
299 |
+
if not triggering_ai_message: print("Error: Could not find triggering AI message for reflection."); return {"messages": [AIMessage(content="Internal Error: Reflection context missing.")], "interaction_warnings": None};
|
300 |
+
original_plan_proposal_context = triggering_ai_message.content;
|
301 |
+
reflection_prompt_text = f"""You are SynapseAI, performing a critical safety review...
|
302 |
+
Previous Context:\n{original_plan_proposal_context}\n---\nInteraction Warnings:\n```json\n{json.dumps(interaction_warnings, indent=2)}\n```\n**CRITICAL REFLECTION STEP:** Analyze warnings, decide if revision is needed, respond ONLY about therapeutics revision based on these warnings."""
|
303 |
+
reflection_messages = [SystemMessage(content="Perform focused safety review based on interaction warnings."), HumanMessage(content=reflection_prompt_text)];
|
304 |
+
print("Invoking LLM for reflection...");
|
305 |
+
try: reflection_response = llm.invoke(reflection_messages); print(f"Reflection Response: {reflection_response.content}"); final_ai_message = AIMessage(content=reflection_response.content);
|
306 |
+
except Exception as e: print(f"ERROR during reflection: {e}"); traceback.print_exc(); final_ai_message = AIMessage(content=f"Error during safety reflection: {e}");
|
307 |
+
return {"messages": [final_ai_message], "interaction_warnings": None} # Return reflection response, clear warnings
|
308 |
+
|
309 |
+
|
310 |
+
# --- Graph Routing Logic ---
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
def should_continue(state: AgentState) -> str:
|
312 |
+
print("\n---ROUTING DECISION (Agent Output)---"); last_message = state['messages'][-1] if state['messages'] else None;
|
313 |
+
if not isinstance(last_message, AIMessage): return "end_conversation_turn";
|
314 |
+
if "Sorry, an internal error occurred" in last_message.content: return "end_conversation_turn";
|
315 |
+
if getattr(last_message, 'tool_calls', None): return "continue_tools"; else: return "end_conversation_turn";
|
|
|
|
|
|
|
316 |
|
317 |
def after_tools_router(state: AgentState) -> str:
|
318 |
+
print("\n---ROUTING DECISION (After Tools)---");
|
319 |
+
if state.get("interaction_warnings"): print("Routing: Warnings found -> Reflection"); return "reflect_on_warnings";
|
320 |
+
else: print("Routing: No warnings -> Agent"); return "continue_to_agent";
|
|
|
321 |
|
322 |
+
# --- ClinicalAgent Class ---
|
323 |
class ClinicalAgent:
|
324 |
def __init__(self):
|
325 |
+
workflow = StateGraph(AgentState)
|
326 |
+
workflow.add_node("agent", agent_node)
|
327 |
+
workflow.add_node("tools", tool_node)
|
328 |
+
workflow.add_node("reflection", reflection_node)
|
329 |
+
workflow.set_entry_point("agent")
|
330 |
+
workflow.add_conditional_edges("agent", should_continue, {"continue_tools": "tools", "end_conversation_turn": END})
|
331 |
+
workflow.add_conditional_edges("tools", after_tools_router, {"reflect_on_warnings": "reflection", "continue_to_agent": "agent"})
|
332 |
+
workflow.add_edge("reflection", "agent")
|
333 |
+
self.graph_app = workflow.compile()
|
334 |
+
print("ClinicalAgent initialized and LangGraph compiled.")
|
|
|
|
|
|
|
|
|
|
|
335 |
|
336 |
def invoke_turn(self, state: Dict) -> Dict:
|
337 |
+
"""Invokes the LangGraph app for one turn."""
|
338 |
+
print(f"Invoking graph with state keys: {state.keys()}")
|
339 |
try:
|
340 |
+
final_state = self.graph_app.invoke(state, {"recursion_limit": 15})
|
341 |
+
final_state.setdefault('summary', state.get('summary')) # Ensure keys exist
|
342 |
+
final_state.setdefault('interaction_warnings', None)
|
343 |
+
return final_state
|
344 |
except Exception as e:
|
345 |
+
print(f"CRITICAL ERROR during graph invocation: {type(e).__name__} - {e}"); traceback.print_exc();
|
346 |
+
error_msg = AIMessage(content=f"Sorry, a critical error occurred during processing: {e}");
|
347 |
+
return {"messages": state.get('messages', []) + [error_msg], "patient_data": state.get('patient_data'), "summary": state.get('summary'), "interaction_warnings": None}
|
|
|
|
|
|
|
|
|
|