mgbam commited on
Commit
5fb0df4
Β·
verified Β·
1 Parent(s): caaaced

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +198 -432
agent.py CHANGED
@@ -25,473 +25,239 @@ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
25
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
26
 
27
  if not all([UMLS_API_KEY, GROQ_API_KEY, TAVILY_API_KEY]):
28
- logger.error("Missing one or more required API keys: UMLS_API_KEY, GROQ_API_KEY, TAVILY_API_KEY")
29
- raise RuntimeError("Missing required API keys")
30
 
31
  # ── Agent Configuration ──────────────────────────────────────────────
32
- AGENT_MODEL_NAME = "llama3-70b-8192"
33
- AGENT_TEMPERATURE = 0.1
34
- MAX_SEARCH_RESULTS = 3
35
-
36
  class ClinicalPrompts:
37
  SYSTEM_PROMPT = """
38
  You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation...
39
  [SYSTEM PROMPT CONTENT HERE]
40
  """
41
 
42
- # ── Helper: Message Wrapper ─────────────────────────────────────────────
43
- def wrap_message(msg: Any) -> AIMessage:
44
- """
45
- Ensures that the given message is an AIMessage.
46
- If it is a dict, it extracts the 'content' field (or serializes the dict).
47
- Otherwise, it converts the message to a string.
48
- """
49
- if isinstance(msg, AIMessage):
50
- return msg
51
- elif isinstance(msg, dict):
52
- return AIMessage(content=msg.get("content", json.dumps(msg)))
53
- else:
54
- return AIMessage(content=str(msg))
55
-
56
- # ── Helper Functions ─────────────────────────────────────────────────────
57
- UMLS_AUTH_ENDPOINT = "https://utslogin.nlm.nih.gov/cas/v1/api-key"
58
- RXNORM_API_BASE = "https://rxnav.nlm.nih.gov/REST"
59
- OPENFDA_API_BASE = "https://api.fda.gov/drug/label.json"
60
-
61
- @lru_cache(maxsize=256)
62
- def get_rxcui(drug_name: str) -> Optional[str]:
63
- """Lookup RxNorm CUI for a given drug name."""
64
- drug_name = (drug_name or "").strip()
65
- if not drug_name:
66
- return None
67
- logger.info(f"Looking up RxCUI for '{drug_name}'")
68
- try:
69
- params = {"name": drug_name, "search": 1}
70
- r = requests.get(f"{RXNORM_API_BASE}/rxcui.json", params=params, timeout=10)
71
- r.raise_for_status()
72
- ids = r.json().get("idGroup", {}).get("rxnormId")
73
- if ids:
74
- logger.info(f"Found RxCUI {ids[0]} for '{drug_name}'")
75
- return ids[0]
76
- r = requests.get(f"{RXNORM_API_BASE}/drugs.json", params={"name": drug_name}, timeout=10)
77
- r.raise_for_status()
78
- for grp in r.json().get("drugGroup", {}).get("conceptGroup", []):
79
- props = grp.get("conceptProperties")
80
- if props:
81
- logger.info(f"Found RxCUI {props[0]['rxcui']} via /drugs for '{drug_name}'")
82
- return props[0]["rxcui"]
83
- except Exception:
84
- logger.exception(f"Error fetching RxCUI for '{drug_name}'")
85
- return None
86
-
87
- @lru_cache(maxsize=128)
88
- def get_openfda_label(rxcui: Optional[str] = None, drug_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
89
- """Fetch the OpenFDA label for a drug by RxCUI or name."""
90
- if not (rxcui or drug_name):
91
- return None
92
- terms = []
93
- if rxcui:
94
- terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
95
- if drug_name:
96
- dn = drug_name.lower()
97
- terms.append(f'(openfda.brand_name:"{dn}" OR openfda.generic_name:"{dn}")')
98
- query = " OR ".join(terms)
99
- logger.info(f"Looking up OpenFDA label with query: {query}")
100
- try:
101
- r = requests.get(OPENFDA_API_BASE, params={"search": query, "limit": 1}, timeout=15)
102
- r.raise_for_status()
103
- results = r.json().get("results", [])
104
- if results:
105
- return results[0]
106
- except Exception:
107
- logger.exception("Error fetching OpenFDA label")
108
- return None
109
-
110
- def search_text_list(texts: List[str], terms: List[str]) -> List[str]:
111
- """Return highlighted snippets from a list of texts containing any of the search terms."""
112
- snippets = []
113
- lowers = [t.lower() for t in terms if t]
114
- for text in texts or []:
115
- tl = text.lower()
116
- for term in lowers:
117
- if term in tl:
118
- i = tl.find(term)
119
- start = max(0, i - 50)
120
- end = min(len(text), i + len(term) + 100)
121
- snippet = text[start:end]
122
- snippet = re.sub(f"({re.escape(term)})", r"**\1**", snippet, flags=re.IGNORECASE)
123
- snippets.append(f"...{snippet}...")
124
- break
125
- return snippets
126
-
127
- def parse_bp(bp: str) -> Optional[tuple[int, int]]:
128
- """Parse 'SYS/DIA' blood pressure string into a (sys, dia) tuple."""
129
- if m := re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", (bp or "").strip()):
130
- return int(m.group(1)), int(m.group(2))
131
- return None
132
-
133
- def check_red_flags(patient_data: Dict[str, Any]) -> List[str]:
134
- """Identify immediate red flags from patient_data."""
135
- flags: List[str] = []
136
- hpi = patient_data.get("hpi", {})
137
- vitals = patient_data.get("vitals", {})
138
- syms = [s.lower() for s in hpi.get("symptoms", []) if isinstance(s, str)]
139
- mapping = {
140
- "chest pain": "Chest pain reported",
141
- "shortness of breath": "Shortness of breath reported",
142
- "severe headache": "Severe headache reported",
143
- "syncope": "Syncope reported",
144
- "hemoptysis": "Hemoptysis reported"
145
- }
146
- for term, desc in mapping.items():
147
- if term in syms:
148
- flags.append(f"Red Flag: {desc}.")
149
- temp = vitals.get("temp_c")
150
- hr = vitals.get("hr_bpm")
151
- rr = vitals.get("rr_rpm")
152
- spo2 = vitals.get("spo2_percent")
153
- bp = parse_bp(vitals.get("bp_mmhg", ""))
154
- if temp is not None and temp >= 38.5:
155
- flags.append(f"Red Flag: Fever ({temp}Β°C).")
156
- if hr is not None:
157
- if hr >= 120:
158
- flags.append(f"Red Flag: Tachycardia ({hr} bpm).")
159
- if hr <= 50:
160
- flags.append(f"Red Flag: Bradycardia ({hr} bpm).")
161
- if rr is not None and rr >= 24:
162
- flags.append(f"Red Flag: Tachypnea ({rr} rpm).")
163
- if spo2 is not None and spo2 <= 92:
164
- flags.append(f"Red Flag: Hypoxia ({spo2}%).")
165
- if bp:
166
- sys, dia = bp
167
- if sys >= 180 or dia >= 110:
168
- flags.append(f"Red Flag: Hypertensive urgency/emergency ({sys}/{dia} mmHg).")
169
- if sys <= 90 or dia <= 60:
170
- flags.append(f"Red Flag: Hypotension ({sys}/{dia} mmHg).")
171
- return list(dict.fromkeys(flags))
172
-
173
- def format_patient_data_for_prompt(data: Dict[str, Any]) -> str:
174
- """Format patient_data dict into a markdown-like prompt section."""
175
- if not data:
176
- return "No patient data provided."
177
- lines: List[str] = []
178
- for section, value in data.items():
179
- title = section.replace("_", " ").title()
180
- if isinstance(value, dict) and any(value.values()):
181
- lines.append(f"**{title}:**")
182
- for k, v in value.items():
183
- if v:
184
- lines.append(f"- {k.replace('_',' ').title()}: {v}")
185
- elif isinstance(value, list) and value:
186
- lines.append(f"**{title}:** {', '.join(map(str, value))}")
187
- elif value:
188
- lines.append(f"**{title}:** {value}")
189
- return "\n".join(lines)
190
-
191
- # ── Tool Input Schemas ─────────────────────────────────────────────────────
192
- class LabOrderInput(BaseModel):
193
- test_name: str = Field(...)
194
- reason: str = Field(...)
195
- priority: str = Field("Routine")
196
-
197
- class PrescriptionInput(BaseModel):
198
- medication_name: str = Field(...)
199
- dosage: str = Field(...)
200
- route: str = Field(...)
201
- frequency: str = Field(...)
202
- duration: str = Field("As directed")
203
- reason: str = Field(...)
204
-
205
- class InteractionCheckInput(BaseModel):
206
- potential_prescription: str
207
- current_medications: Optional[List[str]] = Field(None)
208
- allergies: Optional[List[str]] = Field(None)
209
-
210
- class FlagRiskInput(BaseModel):
211
- risk_description: str = Field(...)
212
- urgency: str = Field("High")
213
-
214
- # ── Tool Implementations ───────────────────────────────────────────────────
215
- @tool("order_lab_test", args_schema=LabOrderInput)
216
- def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str:
217
- """
218
- Place an order for a laboratory test.
219
- """
220
- logger.info(f"Ordering lab test: {test_name}, reason: {reason}, priority: {priority}")
221
- return json.dumps({
222
- "status": "success",
223
- "message": f"Lab Ordered: {test_name} ({priority})",
224
- "details": f"Reason: {reason}"
225
- })
226
-
227
- @tool("prescribe_medication", args_schema=PrescriptionInput)
228
- def prescribe_medication(
229
- medication_name: str,
230
- dosage: str,
231
- route: str,
232
- frequency: str,
233
- duration: str,
234
- reason: str
235
- ) -> str:
236
- """
237
- Prepare a medication prescription.
238
- """
239
- logger.info(f"Preparing prescription: {medication_name} {dosage}, route: {route}, freq: {frequency}")
240
- return json.dumps({
241
- "status": "success",
242
- "message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}",
243
- "details": f"Duration: {duration}. Reason: {reason}"
244
- })
245
 
246
- @tool("check_drug_interactions", args_schema=InteractionCheckInput)
247
- def check_drug_interactions(
248
- potential_prescription: str,
249
- current_medications: Optional[List[str]] = None,
250
- allergies: Optional[List[str]] = None
251
- ) -> str:
252
- """
253
- Check for drug–drug interactions and allergy risks.
254
- """
255
- logger.info(f"Checking interactions for: {potential_prescription}")
256
- warnings: List[str] = []
257
- pm = [m.lower().strip() for m in (current_medications or []) if m]
258
- al = [a.lower().strip() for a in (allergies or []) if a]
259
- if potential_prescription.lower().strip() in al:
260
- warnings.append(f"CRITICAL ALLERGY: Patient allergic to '{potential_prescription}'.")
261
- rxcui = get_rxcui(potential_prescription)
262
- label = get_openfda_label(rxcui=rxcui, drug_name=potential_prescription)
263
- if not (rxcui or label):
264
- warnings.append(f"INFO: Could not identify '{potential_prescription}'. Checks may be incomplete.")
265
- for section in ("contraindications", "warnings_and_cautions", "warnings"):
266
- items = label.get(section) if label else None
267
- if isinstance(items, list):
268
- snippets = search_text_list(items, al)
269
- if snippets:
270
- warnings.append(f"ALLERGY RISK ({section}): {'; '.join(snippets)}")
271
- for med in pm:
272
- mrxcui = get_rxcui(med)
273
- mlabel = get_openfda_label(rxcui=mrxcui, drug_name=med)
274
- for sec in ("drug_interactions",):
275
- for src_label, src_name in ((label, potential_prescription), (mlabel, med)):
276
- items = src_label.get(sec) if src_label else None
277
- if isinstance(items, list):
278
- snippets = search_text_list(items, [med if src_name == potential_prescription else potential_prescription])
279
- if snippets:
280
- warnings.append(f"Interaction ({src_name} label): {'; '.join(snippets)}")
281
- status = "warning" if warnings else "clear"
282
- message = (
283
- f"{len(warnings)} issue(s) found for '{potential_prescription}'."
284
- if warnings else
285
- f"No major interactions or allergy issues identified for '{potential_prescription}'."
286
- )
287
- return json.dumps({"status": status, "message": message, "warnings": warnings})
288
-
289
- @tool("flag_risk", args_schema=FlagRiskInput)
290
- def flag_risk(risk_description: str, urgency: str = "High") -> str:
291
- """
292
- Flag a clinical risk with given urgency.
293
- """
294
- logger.info(f"Flagging risk: {risk_description} (urgency={urgency})")
295
- return json.dumps({
296
- "status": "flagged",
297
- "message": f"Risk '{risk_description}' flagged with {urgency} urgency."
298
- })
299
-
300
- # Include the Tavily search tool
301
- search_tool = TavilySearchResults(max_results=MAX_SEARCH_RESULTS, name="tavily_search_results")
302
- all_tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
303
-
304
- # ── LLM & Tool Executor ───────────────────────────────────────────────────
305
- llm = ChatGroq(temperature=AGENT_TEMPERATURE, model=AGENT_MODEL_NAME)
306
- model_with_tools = llm.bind_tools(all_tools)
307
- tool_executor = ToolExecutor(all_tools)
308
-
309
- # ── State Definition ─────────────────────────────────────────────────────
310
  class AgentState(TypedDict):
311
  messages: List[Any]
312
  patient_data: Optional[Dict[str, Any]]
313
  summary: Optional[str]
314
  interaction_warnings: Optional[List[str]]
315
- done: Optional[bool]
316
- iterations: Optional[int]
317
 
318
- # Helper to propagate state fields between nodes
319
  def propagate_state(new: Dict[str, Any], old: Dict[str, Any]) -> Dict[str, Any]:
320
- for key in ["iterations", "done", "patient_data", "summary", "interaction_warnings"]:
321
- if key in old and key not in new:
322
- new[key] = old[key]
323
- return new
324
 
325
- # ── Graph Nodes ─────────────────────────────────────────────────────────
326
  def agent_node(state: AgentState) -> Dict[str, Any]:
 
 
 
 
327
  if state.get("done", False):
328
  return state
329
- msgs = state.get("messages", [])
330
- if not msgs or not isinstance(msgs[0], SystemMessage):
331
- msgs = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + msgs
332
- logger.info(f"Invoking LLM with {len(msgs)} messages")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  try:
334
- response = model_with_tools.invoke(msgs)
335
- response = wrap_message(response)
336
- new_state = {"messages": [response]}
337
- return propagate_state(new_state, state)
 
 
 
 
 
 
 
338
  except Exception as e:
339
- logger.exception("Error in agent_node")
340
- new_state = {"messages": [wrap_message(AIMessage(content=f"Error: {e}"))]}
341
- return propagate_state(new_state, state)
 
 
 
 
 
 
 
 
342
 
343
  def tool_node(state: AgentState) -> Dict[str, Any]:
344
- if state.get("done", False):
 
 
 
 
 
345
  return state
346
- messages_list = state.get("messages", [])
347
- if not messages_list:
348
- logger.warning("tool_node invoked with no messages")
349
- new_state = {"messages": []}
350
- return propagate_state(new_state, state)
351
- last = wrap_message(messages_list[-1])
352
- # Check for pending tool_calls using dict.get if necessary.
353
- tool_calls = last.tool_calls if hasattr(last, "tool_calls") else last.__dict__.get("tool_calls")
354
- if not (isinstance(last, AIMessage) and tool_calls):
355
- logger.warning("tool_node invoked without pending tool_calls")
356
- new_state = {"messages": []}
357
- return propagate_state(new_state, state)
358
- calls = tool_calls
359
- blocked_ids = set()
360
- for call in calls:
361
- if call["name"] == "prescribe_medication":
362
- med = call["args"].get("medication_name", "").lower()
363
- if not any(
364
- c["name"] == "check_drug_interactions" and
365
- c["args"].get("potential_prescription", "").lower() == med
366
- for c in calls
367
- ):
368
- logger.warning(f"Blocking prescribe_medication for '{med}' without interaction check")
369
- blocked_ids.add(call["id"])
370
- to_execute = [c for c in calls if c["id"] not in blocked_ids]
371
- pd = state.get("patient_data", {})
372
- for call in to_execute:
373
- if call["name"] == "check_drug_interactions":
374
- call["args"].setdefault("current_medications", pd.get("medications", {}).get("current", []))
375
- call["args"].setdefault("allergies", pd.get("allergies", []))
376
- messages: List[ToolMessage] = []
377
- warnings: List[str] = []
378
- try:
379
- responses = tool_executor.batch(to_execute, return_exceptions=True)
380
- for call, resp in zip(to_execute, responses):
381
- if isinstance(resp, Exception):
382
- logger.exception(f"Error executing tool {call['name']}")
383
- content = json.dumps({"status": "error", "message": str(resp)})
384
- else:
385
- content = str(resp)
386
- if call["name"] == "check_drug_interactions":
387
- data = json.loads(content)
388
- if data.get("status") == "warning":
389
- warnings.extend(data.get("warnings", []))
390
- messages.append(ToolMessage(content=content, tool_call_id=call["id"], name=call["name"]))
391
- except Exception as e:
392
- logger.exception("Critical error in tool_node")
393
- for call in to_execute:
394
- messages.append(ToolMessage(
395
- content=json.dumps({"status": "error", "message": str(e)}),
396
- tool_call_id=call["id"],
397
- name=call["name"]
398
- ))
399
- new_state = {"messages": messages, "interaction_warnings": warnings or None}
400
- return propagate_state(new_state, state)
401
-
402
  def reflection_node(state: AgentState) -> Dict[str, Any]:
403
- if state.get("done", False):
 
 
404
  return state
405
- warns = state.get("interaction_warnings")
406
- if not warns:
407
- logger.warning("reflection_node called without warnings")
408
- new_state = {"messages": []}
409
- return propagate_state(new_state, state)
410
- triggering = None
411
- for msg in reversed(state.get("messages", [])):
412
- wrapped = wrap_message(msg)
413
- if isinstance(wrapped, AIMessage) and wrapped.__dict__.get("tool_calls"):
414
- triggering = wrapped
415
- break
416
- if not triggering:
417
- new_state = {"messages": [AIMessage(content="Internal Error: reflection context missing.")]}
418
- return propagate_state(new_state, state)
419
- prompt = (
420
- "You are SynapseAI, performing a focused safety review of the following plan:\n\n"
421
- f"{triggering.content}\n\n"
422
- "Highlight any issues based on these warnings:\n" +
423
- "\n".join(f"- {w}" for w in warns)
424
- )
425
  try:
426
- resp = llm.invoke([SystemMessage(content="Safety reflection"), HumanMessage(content=prompt)])
427
- new_state = {"messages": [wrap_message(resp)]}
428
- return propagate_state(new_state, state)
 
 
 
 
 
 
 
429
  except Exception as e:
430
- logger.exception("Error during reflection")
431
- new_state = {"messages": [AIMessage(content=f"Error during reflection: {e}")]}
432
- return propagate_state(new_state, state)
433
-
434
- # ── Routing Functions ────────────────────────────────────────────────────
435
- def should_continue(state: AgentState) -> str:
436
- state.setdefault("iterations", 0)
437
- state["iterations"] += 1
438
- logger.info(f"Iteration count: {state['iterations']}")
439
- if state["iterations"] >= 4:
440
- state.setdefault("messages", []).append(AIMessage(content="Final output: consultation complete."))
441
- state["done"] = True
442
- return "end_conversation_turn"
443
- if not state.get("messages"):
444
- state["done"] = True
445
- return "end_conversation_turn"
446
- last = wrap_message(state["messages"][-1])
447
- if not isinstance(last, AIMessage):
448
- state["done"] = True
449
- return "end_conversation_turn"
450
- if last.__dict__.get("tool_calls"):
451
- return "continue_tools"
452
- if "consultation complete" in last.content.lower():
453
- state["done"] = True
454
- return "end_conversation_turn"
455
- state["done"] = False
456
- return "agent"
457
-
458
- def after_tools_router(state: AgentState) -> str:
459
  if state.get("interaction_warnings"):
460
  return "reflection"
461
- return "end_conversation_turn"
 
 
 
 
 
 
462
 
463
- # ── ClinicalAgent ─────────────────────────────────────────────────────────
464
  class ClinicalAgent:
465
  def __init__(self):
466
- logger.info("Building ClinicalAgent workflow")
467
- wf = StateGraph(AgentState)
468
- wf.add_node("agent", agent_node)
469
- wf.add_node("tools", tool_node)
470
- wf.add_node("reflection", reflection_node)
471
- wf.set_entry_point("agent")
472
- wf.add_conditional_edges("agent", should_continue, {
473
- "continue_tools": "tools",
474
- "end_conversation_turn": END
475
- })
476
- wf.add_conditional_edges("tools", after_tools_router, {
477
- "reflection": "reflection",
478
- "end_conversation_turn": END
479
- })
480
- # Removed edge from reflection to agent to break cycle.
481
- self.graph_app = wf.compile()
482
- logger.info("ClinicalAgent ready")
483
-
484
- def invoke_turn(self, state: Dict[str, Any]) -> Dict[str, Any]:
 
 
 
 
 
 
 
 
 
485
  try:
486
- result = self.graph_app.invoke(state, {"recursion_limit": 100})
487
- result.setdefault("summary", state.get("summary"))
488
- result.setdefault("interaction_warnings", None)
489
- return result
490
  except Exception as e:
491
- logger.exception("Error during graph invocation")
492
  return {
493
- "messages": state.get("messages", []) + [AIMessage(content=f"Error: {e}")],
494
- "patient_data": state.get("patient_data"),
495
- "summary": state.get("summary"),
496
- "interaction_warnings": None
497
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
26
 
27
  if not all([UMLS_API_KEY, GROQ_API_KEY, TAVILY_API_KEY]):
28
+ logger.error("Missing required API keys")
29
+ raise RuntimeError("Missing API keys")
30
 
31
  # ── Agent Configuration ──────────────────────────────────────────────
 
 
 
 
32
  class ClinicalPrompts:
33
  SYSTEM_PROMPT = """
34
  You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation...
35
  [SYSTEM PROMPT CONTENT HERE]
36
  """
37
 
38
+ MAX_ITERATIONS = 4
39
+ AGENT_MODEL_NAME = "llama3-70b-8192"
40
+ AGENT_TEMPERATURE = 0.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ # ── State Definition ─────────────────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  class AgentState(TypedDict):
44
  messages: List[Any]
45
  patient_data: Optional[Dict[str, Any]]
46
  summary: Optional[str]
47
  interaction_warnings: Optional[List[str]]
48
+ done: bool
49
+ iterations: int
50
 
 
51
  def propagate_state(new: Dict[str, Any], old: Dict[str, Any]) -> Dict[str, Any]:
52
+ """Merge new state changes with existing state"""
53
+ return {**old, **new}
 
 
54
 
55
+ # ── Core Agent Node ──────────────────────────────────────────────────
56
  def agent_node(state: AgentState) -> Dict[str, Any]:
57
+ """Main agent node with iteration tracking"""
58
+ state = dict(state) # Create mutable copy
59
+
60
+ # Check termination conditions
61
  if state.get("done", False):
62
  return state
63
+
64
+ # Update iteration count
65
+ iterations = state.get("iterations", 0) + 1
66
+ state["iterations"] = iterations
67
+
68
+ # Enforce iteration limit
69
+ if iterations >= MAX_ITERATIONS:
70
+ return {
71
+ "messages": [AIMessage(content="Consultation concluded. Maximum iterations reached.")],
72
+ "done": True,
73
+ **state
74
+ }
75
+
76
+ # Prepare message history
77
+ messages = state.get("messages", [])
78
+ if not messages or not isinstance(messages[0], SystemMessage):
79
+ messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + messages
80
+
81
  try:
82
+ # Generate response
83
+ llm_response = ChatGroq(
84
+ temperature=AGENT_TEMPERATURE,
85
+ model=AGENT_MODEL_NAME
86
+ ).invoke(messages)
87
+
88
+ return propagate_state({
89
+ "messages": [llm_response],
90
+ "done": "consultation complete" in llm_response.content.lower()
91
+ }, state)
92
+
93
  except Exception as e:
94
+ logger.error(f"Agent error: {str(e)}")
95
+ return propagate_state({
96
+ "messages": [AIMessage(content=f"System Error: {str(e)}")],
97
+ "done": True
98
+ }, state)
99
+
100
+ # ── Tool Handling Nodes ──────────────────────────────────────────────
101
+ tool_executor = ToolExecutor([
102
+ TavilySearchResults(max_results=3),
103
+ # Include other tools here...
104
+ ])
105
 
106
  def tool_node(state: AgentState) -> Dict[str, Any]:
107
+ """Execute tool calls from last agent message"""
108
+ state = dict(state)
109
+ messages = state["messages"]
110
+ last_message = messages[-1]
111
+
112
+ if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
113
  return state
114
+
115
+ tool_calls = last_message.tool_calls
116
+ outputs = []
117
+
118
+ for tool_call in tool_calls:
119
+ try:
120
+ output = tool_executor.invoke(tool_call)
121
+ outputs.append(
122
+ ToolMessage(
123
+ content=json.dumps(output),
124
+ tool_call_id=tool_call["id"],
125
+ name=tool_call["name"]
126
+ )
127
+ )
128
+ except Exception as e:
129
+ logger.error(f"Tool error: {str(e)}")
130
+ outputs.append(
131
+ ToolMessage(
132
+ content=json.dumps({"error": str(e)}),
133
+ tool_call_id=tool_call["id"],
134
+ name=tool_call["name"]
135
+ )
136
+ )
137
+
138
+ return propagate_state({
139
+ "messages": outputs,
140
+ "interaction_warnings": detect_interaction_warnings(outputs)
141
+ }, state)
142
+
143
+ def detect_interaction_warnings(tool_messages: List[ToolMessage]) -> List[str]:
144
+ """Parse tool outputs for interaction warnings"""
145
+ warnings = []
146
+ for msg in tool_messages:
147
+ try:
148
+ content = json.loads(msg.content)
149
+ if content.get("status") == "warning":
150
+ warnings.extend(content.get("warnings", []))
151
+ except json.JSONDecodeError:
152
+ continue
153
+ return warnings
154
+
155
+ # ── Safety Reflection Node ───────────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  def reflection_node(state: AgentState) -> Dict[str, Any]:
157
+ """Analyze potential safety issues"""
158
+ warnings = state.get("interaction_warnings", [])
159
+ if not warnings:
160
  return state
161
+
162
+ prompt = f"""Analyze these clinical warnings:
163
+ {chr(10).join(warnings)}
164
+
165
+ Provide concise safety recommendations:"""
166
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  try:
168
+ reflection = ChatGroq(
169
+ temperature=0.0, # Strict safety mode
170
+ model=AGENT_MODEL_NAME
171
+ ).invoke([HumanMessage(content=prompt)])
172
+
173
+ return propagate_state({
174
+ "messages": [reflection],
175
+ "summary": f"Safety Review:\n{reflection.content}"
176
+ }, state)
177
+
178
  except Exception as e:
179
+ logger.error(f"Reflection error: {str(e)}")
180
+ return propagate_state({
181
+ "messages": [AIMessage(content=f"Safety review unavailable: {str(e)}")],
182
+ "summary": "Failed safety review"
183
+ }, state)
184
+
185
+ # ── State Routing Logic ──────────────────────────────────────────────
186
+ def route_state(state: AgentState) -> str:
187
+ """Determine next node in workflow"""
188
+ if state.get("done", False):
189
+ return "end"
190
+
191
+ messages = state.get("messages", [])
192
+
193
+ # Prioritize safety reflection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  if state.get("interaction_warnings"):
195
  return "reflection"
196
+
197
+ # Check for tool calls
198
+ if messages and isinstance(messages[-1], AIMessage):
199
+ if messages[-1].tool_calls:
200
+ return "tools"
201
+
202
+ return "agent"
203
 
204
+ # ── Workflow Construction ────────────────────────────────────────────
205
  class ClinicalAgent:
206
  def __init__(self):
207
+ self.workflow = StateGraph(AgentState)
208
+
209
+ # Define nodes
210
+ self.workflow.add_node("agent", agent_node)
211
+ self.workflow.add_node("tools", tool_node)
212
+ self.workflow.add_node("reflection", reflection_node)
213
+
214
+ # Configure edges
215
+ self.workflow.set_entry_point("agent")
216
+
217
+ self.workflow.add_conditional_edges(
218
+ "agent",
219
+ lambda state: "tools" if state.get("messages")[-1].tool_calls else "end",
220
+ {"tools": "tools", "end": END}
221
+ )
222
+
223
+ self.workflow.add_conditional_edges(
224
+ "tools",
225
+ lambda state: "reflection" if state.get("interaction_warnings") else "agent",
226
+ {"reflection": "reflection", "agent": "agent"}
227
+ )
228
+
229
+ self.workflow.add_edge("reflection", "agent")
230
+
231
+ self.app = self.workflow.compile()
232
+
233
+ def consult(self, initial_state: Dict) -> Dict:
234
+ """Execute full consultation workflow"""
235
  try:
236
+ return self.app.invoke(
237
+ initial_state,
238
+ {"recursion_limit": MAX_ITERATIONS + 2}
239
+ )
240
  except Exception as e:
241
+ logger.error(f"Consultation failed: {str(e)}")
242
  return {
243
+ "error": str(e),
244
+ "trace": traceback.format_exc(),
245
+ "done": True
 
246
  }
247
+
248
+ # ── Example Usage ────────────────────────────────────────────────────
249
+ if __name__ == "__main__":
250
+ agent = ClinicalAgent()
251
+
252
+ initial_state = {
253
+ "messages": [HumanMessage(content="Patient presents with chest pain")],
254
+ "patient_data": {
255
+ "age": 45,
256
+ "vitals": {"bp": "150/95", "hr": 110}
257
+ },
258
+ "done": False,
259
+ "iterations": 0
260
+ }
261
+
262
+ result = agent.consult(initial_state)
263
+ print("Final State:", json.dumps(result, indent=2))