File size: 31,359 Bytes
55bb7be 8e9de1e 55bb7be 8e9de1e 55bb7be 93f53ae 23d48f5 93f53ae 23d48f5 55bb7be 93f53ae 55bb7be 23d48f5 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be 93f53ae 55bb7be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 |
# agent.py
import requests
import json
import re
import os
import operator
import traceback
from functools import lru_cache
from langchain_groq import ChatGroq
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import tool
from langgraph.prebuilt import ToolExecutor
from langgraph.graph import StateGraph, END
from typing import Optional, List, Dict, Any, TypedDict, Annotated
# --- Environment Variable Loading ---
# Keys are primarily used here, but checked in app.py for UI feedback
UMLS_API_KEY = os.environ.get("UMLS_API_KEY")
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
# --- Configuration & Constants ---
AGENT_MODEL_NAME = "llama3-70b-8192"
AGENT_TEMPERATURE = 0.1
MAX_SEARCH_RESULTS = 3
class ClinicalPrompts:
# The comprehensive system prompt defining agent behavior
SYSTEM_PROMPT = """
You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation.
Your goal is to support healthcare professionals by analyzing patient data, providing differential diagnoses, suggesting evidence-based management plans, and identifying risks according to current standards of care.
**Core Directives for this Conversation:**
1. **Analyze Sequentially:** Process information turn-by-turn. Base your responses on the *entire* conversation history.
2. **Seek Clarity:** If the provided information is insufficient or ambiguous for a safe assessment, CLEARLY STATE what specific additional information or clarification is needed. Do NOT guess or make unsafe assumptions.
3. **Structured Assessment (When Ready):** When you have sufficient information and have performed necessary checks (like interactions, guideline searches), provide a comprehensive assessment using the following JSON structure. Output this JSON structure as the primary content of your response when you are providing the full analysis. Do NOT output incomplete JSON. If you need to ask a question or perform a tool call first, do that instead of outputting this structure.
```json
{
"assessment": "Concise summary of the patient's presentation and key findings based on the conversation.",
"differential_diagnosis": [
{"diagnosis": "Primary Diagnosis", "likelihood": "High/Medium/Low", "rationale": "Supporting evidence from conversation..."},
{"diagnosis": "Alternative Diagnosis 1", "likelihood": "Medium/Low", "rationale": "Supporting/Refuting evidence..."},
{"diagnosis": "Alternative Diagnosis 2", "likelihood": "Low", "rationale": "Why it's less likely but considered..."}
],
"risk_assessment": {
"identified_red_flags": ["List any triggered red flags based on input and analysis"],
"immediate_concerns": ["Specific urgent issues requiring attention (e.g., sepsis risk, ACS rule-out)"],
"potential_complications": ["Possible future issues based on presentation"]
},
"recommended_plan": {
"investigations": ["List specific lab tests or imaging required. Use 'order_lab_test' tool."],
"therapeutics": ["Suggest specific treatments or prescriptions. Use 'prescribe_medication' tool. MUST check interactions first using 'check_drug_interactions'."],
"consultations": ["Recommend specialist consultations if needed."],
"patient_education": ["Key points for patient communication."]
},
"rationale_summary": "Justification for assessment/plan. **Crucially, if relevant (e.g., ACS, sepsis, common infections), use 'tavily_search_results' to find and cite current clinical practice guidelines (e.g., 'latest ACC/AHA chest pain guidelines 202X', 'Surviving Sepsis Campaign guidelines') supporting your recommendations.** Include summary of guideline findings here.",
"interaction_check_summary": "Summary of findings from 'check_drug_interactions' if performed."
}
```
4. **Safety First - Interactions:** BEFORE suggesting a new prescription via `prescribe_medication`, you MUST FIRST use `check_drug_interactions` in a preceding or concurrent tool call. Report the findings from the interaction check. If significant interactions exist, modify the plan or state the contraindication clearly.
5. **Safety First - Red Flags:** Use the `flag_risk` tool IMMEDIATELY if critical red flags requiring urgent action are identified at any point in the conversation.
6. **Tool Use:** Employ tools (`order_lab_test`, `prescribe_medication`, `check_drug_interactions`, `flag_risk`, `tavily_search_results`) logically within the conversational flow. Wait for tool results before proceeding if the result is needed for the next step (e.g., wait for interaction check before confirming prescription in the structured JSON).
7. **Evidence & Guidelines:** Actively use `tavily_search_results` not just for general knowledge, but specifically to query for and incorporate **current clinical practice guidelines** relevant to the patient's presentation (e.g., chest pain, shortness of breath, suspected infection). Summarize findings in the `rationale_summary` when providing the structured output.
8. **Conciseness & Flow:** Be medically accurate and concise. Use standard terminology. Respond naturally in conversation (asking questions, acknowledging info) until ready for the full structured JSON output.
"""
# --- API Constants & Helper Functions ---
UMLS_AUTH_ENDPOINT = "https://utslogin.nlm.nih.gov/cas/v1/api-key"
RXNORM_API_BASE = "https://rxnav.nlm.nih.gov/REST"
OPENFDA_API_BASE = "https://api.fda.gov/drug/label.json"
@lru_cache(maxsize=256)
def get_rxcui(drug_name: str) -> Optional[str]:
"""Uses RxNorm API to find the RxCUI for a given drug name."""
if not drug_name or not isinstance(drug_name, str): return None; drug_name = drug_name.strip();
if not drug_name: return None; print(f"RxNorm Lookup for: '{drug_name}'");
try: # Try direct lookup first
params = {"name": drug_name, "search": 1}; response = requests.get(f"{RXNORM_API_BASE}/rxcui.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
if data and "idGroup" in data and "rxnormId" in data["idGroup"]: rxcui = data["idGroup"]["rxnormId"][0]; print(f" Found RxCUI: {rxcui} for '{drug_name}'"); return rxcui
else: # Fallback to /drugs search
params = {"name": drug_name}; response = requests.get(f"{RXNORM_API_BASE}/drugs.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
if data and "drugGroup" in data and "conceptGroup" in data["drugGroup"]:
for group in data["drugGroup"]["conceptGroup"]:
if group.get("tty") in ["SBD", "SCD", "GPCK", "BPCK", "IN", "MIN", "PIN"]:
if "conceptProperties" in group and group["conceptProperties"]: rxcui = group["conceptProperties"][0].get("rxcui");
if rxcui: print(f" Found RxCUI (via /drugs): {rxcui} for '{drug_name}'"); return rxcui
print(f" RxCUI not found for '{drug_name}'."); return None
except requests.exceptions.RequestException as e: print(f" Error fetching RxCUI for '{drug_name}': {e}"); return None
except json.JSONDecodeError as e: print(f" Error decoding RxNorm JSON response for '{drug_name}': {e}"); return None
except Exception as e: print(f" Unexpected error in get_rxcui for '{drug_name}': {e}"); return None
@lru_cache(maxsize=128)
def get_openfda_label(rxcui: Optional[str] = None, drug_name: Optional[str] = None) -> Optional[dict]:
"""Fetches drug label information from OpenFDA using RxCUI or drug name."""
if not rxcui and not drug_name: return None; print(f"OpenFDA Label Lookup for: RXCUI={rxcui}, Name={drug_name}"); search_terms = []
if rxcui: search_terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
if drug_name: search_terms.append(f'(openfda.brand_name:"{drug_name.lower()}" OR openfda.generic_name:"{drug_name.lower()}")')
search_query = " OR ".join(search_terms); params = {"search": search_query, "limit": 1};
try:
response = requests.get(OPENFDA_API_BASE, params=params, timeout=15); response.raise_for_status(); data = response.json();
if data and "results" in data and data["results"]: print(f" Found OpenFDA label for query: {search_query}"); return data["results"][0]
print(f" No OpenFDA label found for query: {search_query}"); return None
except requests.exceptions.RequestException as e: print(f" Error fetching OpenFDA label: {e}"); return None
except json.JSONDecodeError as e: print(f" Error decoding OpenFDA JSON response: {e}"); return None
except Exception as e: print(f" Unexpected error in get_openfda_label: {e}"); return None
def search_text_list(text_list: Optional[List[str]], search_terms: List[str]) -> List[str]:
""" Case-insensitive search for any search_term within a list of text strings. Returns snippets. """
found_snippets = [];
if not text_list or not search_terms: return found_snippets; search_terms_lower = [str(term).lower() for term in search_terms if term];
for text_item in text_list:
if not isinstance(text_item, str): continue; text_item_lower = text_item.lower();
for term in search_terms_lower:
if term in text_item_lower:
start_index = text_item_lower.find(term); snippet_start = max(0, start_index - 50); snippet_end = min(len(text_item), start_index + len(term) + 100); snippet = text_item[snippet_start:snippet_end];
snippet = re.sub(f"({re.escape(term)})", r"**\1**", snippet, count=1, flags=re.IGNORECASE) # Highlight match
found_snippets.append(f"...{snippet}...")
break # Only report first match per text item
return found_snippets
# --- Clinical Helper Functions ---
def parse_bp(bp_string: str) -> Optional[tuple[int, int]]:
"""Parses BP string like '120/80' into (systolic, diastolic) integers."""
if not isinstance(bp_string, str): return None
match = re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", bp_string.strip())
if match: return int(match.group(1)), int(match.group(2))
return None
def check_red_flags(patient_data: dict) -> List[str]:
"""Checks patient data against predefined red flags."""
flags = []
if not patient_data: return flags
symptoms = patient_data.get("hpi", {}).get("symptoms", [])
vitals = patient_data.get("vitals", {})
history = patient_data.get("pmh", {}).get("conditions", "")
symptoms_lower = [str(s).lower() for s in symptoms if isinstance(s, str)]
# Symptom Flags
if "chest pain" in symptoms_lower: flags.append("Red Flag: Chest Pain reported.")
if "shortness of breath" in symptoms_lower: flags.append("Red Flag: Shortness of Breath reported.")
if "severe headache" in symptoms_lower: flags.append("Red Flag: Severe Headache reported.")
if "sudden vision loss" in symptoms_lower: flags.append("Red Flag: Sudden Vision Loss reported.")
if "weakness on one side" in symptoms_lower: flags.append("Red Flag: Unilateral Weakness reported (potential stroke).")
if "hemoptysis" in symptoms_lower: flags.append("Red Flag: Hemoptysis (coughing up blood).")
if "syncope" in symptoms_lower: flags.append("Red Flag: Syncope (fainting).")
# Vital Sign Flags
if vitals:
temp = vitals.get("temp_c"); hr = vitals.get("hr_bpm"); rr = vitals.get("rr_rpm")
spo2 = vitals.get("spo2_percent"); bp_str = vitals.get("bp_mmhg")
if temp is not None and temp >= 38.5: flags.append(f"Red Flag: Fever ({temp}°C).")
if hr is not None and hr >= 120: flags.append(f"Red Flag: Tachycardia ({hr} bpm).")
if hr is not None and hr <= 50: flags.append(f"Red Flag: Bradycardia ({hr} bpm).")
if rr is not None and rr >= 24: flags.append(f"Red Flag: Tachypnea ({rr} rpm).")
if spo2 is not None and spo2 <= 92: flags.append(f"Red Flag: Hypoxia ({spo2}%).")
if bp_str:
bp = parse_bp(bp_str)
if bp:
if bp[0] >= 180 or bp[1] >= 110: flags.append(f"Red Flag: Hypertensive Urgency/Emergency (BP: {bp_str} mmHg).")
if bp[0] <= 90 or bp[1] <= 60: flags.append(f"Red Flag: Hypotension (BP: {bp_str} mmHg).")
# History Flags
if history and isinstance(history, str):
history_lower = history.lower()
if "history of mi" in history_lower and "chest pain" in symptoms_lower: flags.append("Red Flag: History of MI with current Chest Pain.")
if "history of dvt/pe" in history_lower and "shortness of breath" in symptoms_lower: flags.append("Red Flag: History of DVT/PE with current Shortness of Breath.")
return list(set(flags)) # Unique flags
def format_patient_data_for_prompt(data: dict) -> str:
"""Formats the patient dictionary into a readable string for the LLM."""
if not data: return "No patient data provided."; prompt_str = "";
for key, value in data.items(): section_title = key.replace('_', ' ').title();
if isinstance(value, dict) and value: has_content = any(sub_value for sub_value in value.values());
if has_content: prompt_str += f"**{section_title}:**\n";
for sub_key, sub_value in value.items():
if sub_value: prompt_str += f" - {sub_key.replace('_', ' ').title()}: {sub_value}\n"
elif isinstance(value, list) and value: prompt_str += f"**{section_title}:** {', '.join(map(str, value))}\n"
elif value and not isinstance(value, dict): prompt_str += f"**{section_title}:** {value}\n";
return prompt_str.strip()
# --- Tool Definitions ---
class LabOrderInput(BaseModel): test_name: str = Field(...); reason: str = Field(...); priority: str = Field("Routine")
class PrescriptionInput(BaseModel): medication_name: str = Field(...); dosage: str = Field(...); route: str = Field(...); frequency: str = Field(...); duration: str = Field("As directed"); reason: str = Field(...)
class InteractionCheckInput(BaseModel): potential_prescription: str = Field(...); current_medications: Optional[List[str]] = Field(None); allergies: Optional[List[str]] = Field(None)
class FlagRiskInput(BaseModel): risk_description: str = Field(...); urgency: str = Field("High")
@tool("order_lab_test", args_schema=LabOrderInput)
def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str:
print(f"Executing order_lab_test: {test_name}, Reason: {reason}, Priority: {priority}"); return json.dumps({"status": "success", "message": f"Lab Ordered: {test_name} ({priority})", "details": f"Reason: {reason}"})
@tool("prescribe_medication", args_schema=PrescriptionInput)
def prescribe_medication(medication_name: str, dosage: str, route: str, frequency: str, duration: str, reason: str) -> str:
print(f"Executing prescribe_medication: {medication_name} {dosage}..."); return json.dumps({"status": "success", "message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}", "details": f"Duration: {duration}. Reason: {reason}"})
@tool("check_drug_interactions", args_schema=InteractionCheckInput)
def check_drug_interactions(potential_prescription: str, current_medications: Optional[List[str]] = None, allergies: Optional[List[str]] = None) -> str:
print(f"\n--- Executing REAL check_drug_interactions ---"); print(f"Checking potential prescription: '{potential_prescription}'"); warnings = []; potential_med_lower = potential_prescription.lower().strip();
current_meds_list = current_medications or []; allergies_list = allergies or []; current_med_names_lower = [];
for med in current_meds_list: match = re.match(r"^\s*([a-zA-Z\-]+)", str(med));
if match: current_med_names_lower.append(match.group(1).lower());
allergies_lower = [str(a).lower().strip() for a in allergies_list if a]; print(f" Against Current Meds (names): {current_med_names_lower}"); print(f" Against Allergies: {allergies_lower}");
print(f" Step 1: Normalizing '{potential_prescription}'..."); potential_rxcui = get_rxcui(potential_prescription); potential_label = get_openfda_label(rxcui=potential_rxcui, drug_name=potential_prescription);
if not potential_rxcui and not potential_label: warnings.append(f"INFO: Could not reliably identify '{potential_prescription}'. Checks may be incomplete.");
print(" Step 2: Performing Allergy Check...");
for allergy in allergies_lower:
if allergy == potential_med_lower: warnings.append(f"CRITICAL ALLERGY (Name Match): Patient allergic to '{allergy}'. Potential prescription is '{potential_prescription}'.");
elif allergy in ["penicillin", "pcns"] and potential_med_lower in ["amoxicillin", "ampicillin", "augmentin", "piperacillin"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Penicillin. High risk with '{potential_prescription}'.");
elif allergy == "sulfa" and potential_med_lower in ["sulfamethoxazole", "bactrim", "sulfasalazine"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Sulfa. High risk with '{potential_prescription}'.");
elif allergy in ["nsaids", "aspirin"] and potential_med_lower in ["ibuprofen", "naproxen", "ketorolac", "diclofenac"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to NSAIDs/Aspirin. Risk with '{potential_prescription}'.");
if potential_label: contraindications = potential_label.get("contraindications"); warnings_section = potential_label.get("warnings_and_cautions") or potential_label.get("warnings");
if contraindications: allergy_mentions_ci = search_text_list(contraindications, allergies_lower);
if allergy_mentions_ci: warnings.append(f"ALLERGY RISK (Contraindication Found): Label for '{potential_prescription}' mentions contraindication potentially related to patient allergies: {'; '.join(allergy_mentions_ci)}");
if warnings_section: allergy_mentions_warn = search_text_list(warnings_section, allergies_lower);
if allergy_mentions_warn: warnings.append(f"ALLERGY RISK (Warning Found): Label for '{potential_prescription}' mentions warnings potentially related to patient allergies: {'; '.join(allergy_mentions_warn)}");
print(" Step 3: Performing Drug-Drug Interaction Check...");
if potential_rxcui or potential_label:
for current_med_name in current_med_names_lower:
if not current_med_name or current_med_name == potential_med_lower: continue; print(f" Checking interaction between '{potential_prescription}' and '{current_med_name}'..."); current_rxcui = get_rxcui(current_med_name); current_label = get_openfda_label(rxcui=current_rxcui, drug_name=current_med_name); search_terms_for_current = [current_med_name];
if current_rxcui: search_terms_for_current.append(current_rxcui); search_terms_for_potential = [potential_med_lower];
if potential_rxcui: search_terms_for_potential.append(potential_rxcui); interaction_found_flag = False;
if potential_label and potential_label.get("drug_interactions"): interaction_mentions = search_text_list(potential_label.get("drug_interactions"), search_terms_for_current);
if interaction_mentions: warnings.append(f"Potential Interaction ({potential_prescription.capitalize()} Label): Mentions '{current_med_name.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}"); interaction_found_flag = True;
if current_label and current_label.get("drug_interactions") and not interaction_found_flag: interaction_mentions = search_text_list(current_label.get("drug_interactions"), search_terms_for_potential);
if interaction_mentions: warnings.append(f"Potential Interaction ({current_med_name.capitalize()} Label): Mentions '{potential_prescription.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}");
else: warnings.append(f"INFO: Drug-drug interaction check skipped for '{potential_prescription}' as it could not be identified via RxNorm/OpenFDA.");
final_warnings = list(set(warnings)); status = "warning" if any("CRITICAL" in w or "Interaction" in w or "RISK" in w for w in final_warnings) else "clear";
if not final_warnings: status = "clear"; message = f"Interaction/Allergy check for '{potential_prescription}': {len(final_warnings)} potential issue(s) identified using RxNorm/OpenFDA." if final_warnings else f"No major interactions or allergy issues identified for '{potential_prescription}' based on RxNorm/OpenFDA lookup."; print(f"--- Interaction Check Complete ---");
return json.dumps({"status": status, "message": message, "warnings": final_warnings})
@tool("flag_risk", args_schema=FlagRiskInput)
def flag_risk(risk_description: str, urgency: str) -> str:
print(f"Executing flag_risk: {risk_description}, Urgency: {urgency}"); # UI part in app.py
return json.dumps({"status": "flagged", "message": f"Risk '{risk_description}' flagged with {urgency} urgency."})
search_tool = TavilySearchResults(max_results=MAX_SEARCH_RESULTS, name="tavily_search_results")
all_tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
# --- LangGraph State & Nodes ---
class AgentState(TypedDict): messages: Annotated[list[Any], operator.add]; patient_data: Optional[dict]; summary: Optional[str]; interaction_warnings: Optional[List[str]]
llm = ChatGroq(temperature=AGENT_TEMPERATURE, model=AGENT_MODEL_NAME)
model_with_tools = llm.bind_tools(all_tools)
tool_executor = ToolExecutor(all_tools)
def agent_node(state: AgentState):
print("\n---AGENT NODE---"); current_messages = state['messages'];
if not current_messages or not isinstance(current_messages[0], SystemMessage): print("Prepending System Prompt."); current_messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + current_messages;
print(f"Invoking LLM with {len(current_messages)} messages.");
try: response = model_with_tools.invoke(current_messages); print(f"Agent Raw Response Type: {type(response)}");
if hasattr(response, 'tool_calls') and response.tool_calls: print(f"Agent Response Tool Calls: {response.tool_calls}"); else: print("Agent Response: No tool calls.");
except Exception as e: print(f"ERROR in agent_node: {e}"); traceback.print_exc(); error_message = AIMessage(content=f"Error: {e}"); return {"messages": [error_message]};
return {"messages": [response]} # Only return messages
def tool_node(state: AgentState):
print("\n---TOOL NODE---"); tool_messages = []; last_message = state['messages'][-1]; interaction_warnings_found = [];
if not isinstance(last_message, AIMessage) or not getattr(last_message, 'tool_calls', None): print("Warning: Tool node called unexpectedly."); return {"messages": [], "interaction_warnings": None};
tool_calls = last_message.tool_calls; print(f"Tool calls received: {json.dumps(tool_calls, indent=2)}"); prescriptions_requested = {}; interaction_checks_requested = {};
for call in tool_calls: tool_name = call.get('name'); tool_args = call.get('args', {});
if tool_name == 'prescribe_medication': med_name = tool_args.get('medication_name', '').lower();
if med_name: prescriptions_requested[med_name] = call;
elif tool_name == 'check_drug_interactions': potential_med = tool_args.get('potential_prescription', '').lower();
if potential_med: interaction_checks_requested[potential_med] = call;
valid_tool_calls_for_execution = []; blocked_ids = set();
for med_name, prescribe_call in prescriptions_requested.items():
if med_name not in interaction_checks_requested: print(f"**SAFETY VIOLATION (Agent): Prescribe '{med_name}' blocked - no interaction check requested.**"); error_msg = ToolMessage(content=json.dumps({"status": "error", "message": f"Interaction check needed for '{med_name}'."}), tool_call_id=prescribe_call['id'], name=prescribe_call['name']); tool_messages.append(error_msg); blocked_ids.add(prescribe_call['id']);
valid_tool_calls_for_execution = [call for call in tool_calls if call['id'] not in blocked_ids];
patient_data = state.get("patient_data", {}); patient_meds_full = patient_data.get("medications", {}).get("current", []); patient_allergies = patient_data.get("allergies", []);
for call in valid_tool_calls_for_execution:
if call['name'] == 'check_drug_interactions':
if 'args' not in call: call['args'] = {}; call['args']['current_medications'] = patient_meds_full; call['args']['allergies'] = patient_allergies; print(f"Augmented interaction check args for call ID {call['id']}");
if valid_tool_calls_for_execution: print(f"Attempting execution: {[c['name'] for c in valid_tool_calls_for_execution]}");
try: responses = tool_executor.batch(valid_tool_calls_for_execution, return_exceptions=True);
for call, resp in zip(valid_tool_calls_for_execution, responses): tool_call_id = call['id']; tool_name = call['name'];
if isinstance(resp, Exception): error_type = type(resp).__name__; error_str = str(resp); print(f"ERROR executing tool '{tool_name}': {error_type} - {error_str}"); traceback.print_exc(); error_content = json.dumps({"status": "error", "message": f"Failed: {error_type} - {error_str}"}); tool_messages.append(ToolMessage(content=error_content, tool_call_id=tool_call_id, name=tool_name));
# ... Specific error check ...
else:
print(f"Tool '{tool_name}' executed."); content_str = str(resp); tool_messages.append(ToolMessage(content=content_str, tool_call_id=tool_call_id, name=tool_name));
if tool_name == "check_drug_interactions": # Extract warnings
try: result_data = json.loads(content_str);
if result_data.get("status") == "warning" and result_data.get("warnings"): print(f" Interaction check returned warnings: {result_data['warnings']}"); interaction_warnings_found.extend(result_data["warnings"]);
except Exception as e: print(f" Error processing interaction check result: {e}");
except Exception as e: # Outer exception handling...
print(f"CRITICAL TOOL NODE ERROR: {e}"); traceback.print_exc(); error_content = json.dumps({"status": "error", "message": f"Internal error: {e}"}); processed_ids = {msg.tool_call_id for msg in tool_messages}; [tool_messages.append(ToolMessage(content=error_content, tool_call_id=call['id'], name=call['name'])) for call in valid_tool_calls_for_execution if call['id'] not in processed_ids];
print(f"Returning {len(tool_messages)} tool messages. Warnings: {bool(interaction_warnings_found)}")
return {"messages": tool_messages, "interaction_warnings": interaction_warnings_found or None} # Return messages AND warnings
def reflection_node(state: AgentState):
print("\n---REFLECTION NODE---")
interaction_warnings = state.get("interaction_warnings")
if not interaction_warnings: print("Warning: Reflection node called without warnings."); return {"messages": [], "interaction_warnings": None};
print(f"Reviewing interaction warnings: {interaction_warnings}"); triggering_ai_message = None; relevant_tool_call_ids = set();
for msg in reversed(state['messages']):
if isinstance(msg, ToolMessage) and msg.name == "check_drug_interactions": relevant_tool_call_ids.add(msg.tool_call_id);
if isinstance(msg, AIMessage) and msg.tool_calls:
if any(tc['id'] in relevant_tool_call_ids for tc in msg.tool_calls): triggering_ai_message = msg; break;
if not triggering_ai_message: print("Error: Could not find triggering AI message for reflection."); return {"messages": [AIMessage(content="Internal Error: Reflection context missing.")], "interaction_warnings": None};
original_plan_proposal_context = triggering_ai_message.content;
reflection_prompt_text = f"""You are SynapseAI, performing a critical safety review...
Previous Context:\n{original_plan_proposal_context}\n---\nInteraction Warnings:\n```json\n{json.dumps(interaction_warnings, indent=2)}\n```\n**CRITICAL REFLECTION STEP:** Analyze warnings, decide if revision is needed, respond ONLY about therapeutics revision based on these warnings."""
reflection_messages = [SystemMessage(content="Perform focused safety review based on interaction warnings."), HumanMessage(content=reflection_prompt_text)];
print("Invoking LLM for reflection...");
try: reflection_response = llm.invoke(reflection_messages); print(f"Reflection Response: {reflection_response.content}"); final_ai_message = AIMessage(content=reflection_response.content);
except Exception as e: print(f"ERROR during reflection: {e}"); traceback.print_exc(); final_ai_message = AIMessage(content=f"Error during safety reflection: {e}");
return {"messages": [final_ai_message], "interaction_warnings": None} # Return reflection response, clear warnings
# --- Graph Routing Logic ---
def should_continue(state: AgentState) -> str:
print("\n---ROUTING DECISION (Agent Output)---"); last_message = state['messages'][-1] if state['messages'] else None;
if not isinstance(last_message, AIMessage): return "end_conversation_turn";
if "Sorry, an internal error occurred" in last_message.content: return "end_conversation_turn";
if getattr(last_message, 'tool_calls', None): return "continue_tools"; else: return "end_conversation_turn";
def after_tools_router(state: AgentState) -> str:
print("\n---ROUTING DECISION (After Tools)---");
if state.get("interaction_warnings"): print("Routing: Warnings found -> Reflection"); return "reflect_on_warnings";
else: print("Routing: No warnings -> Agent"); return "continue_to_agent";
# --- ClinicalAgent Class ---
class ClinicalAgent:
def __init__(self):
workflow = StateGraph(AgentState)
workflow.add_node("agent", agent_node)
workflow.add_node("tools", tool_node)
workflow.add_node("reflection", reflection_node)
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", should_continue, {"continue_tools": "tools", "end_conversation_turn": END})
workflow.add_conditional_edges("tools", after_tools_router, {"reflect_on_warnings": "reflection", "continue_to_agent": "agent"})
workflow.add_edge("reflection", "agent")
self.graph_app = workflow.compile()
print("ClinicalAgent initialized and LangGraph compiled.")
def invoke_turn(self, state: Dict) -> Dict:
"""Invokes the LangGraph app for one turn."""
print(f"Invoking graph with state keys: {state.keys()}")
try:
final_state = self.graph_app.invoke(state, {"recursion_limit": 15})
final_state.setdefault('summary', state.get('summary')) # Ensure keys exist
final_state.setdefault('interaction_warnings', None)
return final_state
except Exception as e:
print(f"CRITICAL ERROR during graph invocation: {type(e).__name__} - {e}"); traceback.print_exc();
error_msg = AIMessage(content=f"Sorry, a critical error occurred during processing: {e}");
return {"messages": state.get('messages', []) + [error_msg], "patient_data": state.get('patient_data'), "summary": state.get('summary'), "interaction_warnings": None} |