File size: 34,117 Bytes
71db5de 788074d b34efbf 788074d 4258926 896de2d 63b0a52 4258926 b34efbf 71db5de b34efbf 71db5de 9988477 31ea2bf 71db5de 896de2d 71db5de 6b2d9f7 71db5de b34efbf 71db5de 6b2d9f7 71db5de 6b2d9f7 71db5de b34efbf 71db5de 9988477 71db5de b34efbf 71db5de 9988477 71db5de b34efbf 71db5de 9988477 b34efbf 71db5de b34efbf 71db5de b34efbf 71db5de 6b2d9f7 b34efbf 896de2d 6b2d9f7 71db5de 6b2d9f7 896de2d 6b2d9f7 31ea2bf 6b2d9f7 71db5de 31ea2bf 896de2d 31ea2bf 71db5de 4258926 b34efbf 71db5de b34efbf 71db5de 896de2d 31ea2bf 71db5de 9988477 4258926 71db5de 9988477 b34efbf 71db5de b34efbf 896de2d 71db5de 9988477 4258926 71db5de 9988477 4258926 9988477 31ea2bf 4258926 71db5de 6b2d9f7 4258926 71db5de 4258926 71db5de 31ea2bf 71db5de 31ea2bf 71db5de 9988477 31ea2bf 9988477 4258926 71db5de 31ea2bf 9988477 4258926 31ea2bf 4258926 b34efbf 4258926 9988477 4258926 6b2d9f7 9988477 6b2d9f7 9988477 6b2d9f7 4258926 9988477 31ea2bf 71db5de 9988477 71db5de 9988477 71db5de 9988477 4258926 6b2d9f7 9988477 4258926 6b2d9f7 4258926 6b2d9f7 b34efbf 71db5de 31ea2bf 71db5de b34efbf 71db5de 31ea2bf 71db5de 6b2d9f7 71db5de 6b2d9f7 71db5de 6b2d9f7 4258926 b34efbf 9988477 71db5de b34efbf 9988477 6b2d9f7 4258926 6b2d9f7 b34efbf 4258926 9988477 4258926 9988477 71db5de b34efbf 31ea2bf b34efbf 71db5de 9988477 71db5de 31ea2bf 9988477 b564942 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 |
# -*- coding: utf-8 -*-
import streamlit as st
import requests
import json
import re
import os
import operator
import traceback
from functools import lru_cache
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import tool
from langgraph.prebuilt import ToolExecutor
from langgraph.graph import StateGraph, END
from typing import Optional, List, Dict, Any, TypedDict, Annotated
# --- Environment Variable Loading & Validation ---
load_dotenv()
UMLS_API_KEY = os.environ.get("UMLS_API_KEY")
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
missing_keys = []
if not UMLS_API_KEY: missing_keys.append("UMLS_API_KEY")
if not GROQ_API_KEY: missing_keys.append("GROQ_API_KEY")
if not TAVILY_API_KEY: missing_keys.append("TAVILY_API_KEY")
if missing_keys: st.error(f"Missing API Key(s): {', '.join(missing_keys)}."); st.stop()
# --- Configuration & Constants ---
class ClinicalAppSettings: APP_TITLE = "SynapseAI (UMLS/FDA Integrated)"; PAGE_LAYOUT = "wide"; MODEL_NAME = "llama3-70b-8192"; TEMPERATURE = 0.1; MAX_SEARCH_RESULTS = 3
class ClinicalPrompts: SYSTEM_PROMPT = """
You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation... [SYSTEM PROMPT REMAINS THE SAME - OMITTED FOR BREVITY]
"""
# --- API Helper Functions (get_rxcui, get_openfda_label, search_text_list) ---
# ... (Keep these functions exactly as they were) ...
UMLS_AUTH_ENDPOINT = "https://utslogin.nlm.nih.gov/cas/v1/api-key"; RXNORM_API_BASE = "https://rxnav.nlm.nih.gov/REST"; OPENFDA_API_BASE = "https://api.fda.gov/drug/label.json"
@lru_cache(maxsize=256)
def get_rxcui(drug_name: str) -> Optional[str]:
if not drug_name or not isinstance(drug_name, str): return None; drug_name = drug_name.strip();
if not drug_name: return None; print(f"RxNorm Lookup for: '{drug_name}'");
try: # Try direct lookup first
params = {"name": drug_name, "search": 1}; response = requests.get(f"{RXNORM_API_BASE}/rxcui.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
if data and "idGroup" in data and "rxnormId" in data["idGroup"]: rxcui = data["idGroup"]["rxnormId"][0]; print(f" Found RxCUI: {rxcui} for '{drug_name}'"); return rxcui
else: # Fallback to /drugs search
params = {"name": drug_name}; response = requests.get(f"{RXNORM_API_BASE}/drugs.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
if data and "drugGroup" in data and "conceptGroup" in data["drugGroup"]:
for group in data["drugGroup"]["conceptGroup"]:
if group.get("tty") in ["SBD", "SCD", "GPCK", "BPCK", "IN", "MIN", "PIN"]:
if "conceptProperties" in group and group["conceptProperties"]: rxcui = group["conceptProperties"][0].get("rxcui");
if rxcui: print(f" Found RxCUI (via /drugs): {rxcui} for '{drug_name}'"); return rxcui
print(f" RxCUI not found for '{drug_name}'."); return None
except requests.exceptions.RequestException as e: print(f" Error fetching RxCUI for '{drug_name}': {e}"); return None
except json.JSONDecodeError as e: print(f" Error decoding RxNorm JSON response for '{drug_name}': {e}"); return None
except Exception as e: print(f" Unexpected error in get_rxcui for '{drug_name}': {e}"); return None
@lru_cache(maxsize=128)
def get_openfda_label(rxcui: Optional[str] = None, drug_name: Optional[str] = None) -> Optional[dict]:
if not rxcui and not drug_name: return None; print(f"OpenFDA Label Lookup for: RXCUI={rxcui}, Name={drug_name}"); search_terms = []
if rxcui: search_terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
if drug_name: search_terms.append(f'(openfda.brand_name:"{drug_name.lower()}" OR openfda.generic_name:"{drug_name.lower()}")')
search_query = " OR ".join(search_terms); params = {"search": search_query, "limit": 1};
try:
response = requests.get(OPENFDA_API_BASE, params=params, timeout=15); response.raise_for_status(); data = response.json();
if data and "results" in data and data["results"]: print(f" Found OpenFDA label for query: {search_query}"); return data["results"][0]
print(f" No OpenFDA label found for query: {search_query}"); return None
except requests.exceptions.RequestException as e: print(f" Error fetching OpenFDA label: {e}"); return None
except json.JSONDecodeError as e: print(f" Error decoding OpenFDA JSON response: {e}"); return None
except Exception as e: print(f" Unexpected error in get_openfda_label: {e}"); return None
def search_text_list(text_list: Optional[List[str]], search_terms: List[str]) -> List[str]:
found_snippets = [];
if not text_list or not search_terms: return found_snippets; search_terms_lower = [str(term).lower() for term in search_terms if term];
for text_item in text_list:
if not isinstance(text_item, str): continue; text_item_lower = text_item.lower();
for term in search_terms_lower:
if term in text_item_lower:
start_index = text_item_lower.find(term); snippet_start = max(0, start_index - 50); snippet_end = min(len(text_item), start_index + len(term) + 100); snippet = text_item[snippet_start:snippet_end];
# Highlight first match for clarity
snippet = re.sub(f"({re.escape(term)})", r"**\1**", snippet, count=1, flags=re.IGNORECASE)
found_snippets.append(f"...{snippet}...")
break # Only report first match per text item
return found_snippets
# --- Other Helper Functions ---
def parse_bp(bp_string: str) -> Optional[tuple[int, int]]:
if not isinstance(bp_string, str): return None; match = re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", bp_string.strip());
if match: return int(match.group(1)), int(match.group(2)); return None
# CORRECTED check_red_flags function
def check_red_flags(patient_data: dict) -> List[str]:
"""Checks patient data against predefined red flags."""
flags = []
if not patient_data: return flags
symptoms = patient_data.get("hpi", {}).get("symptoms", [])
vitals = patient_data.get("vitals", {})
history = patient_data.get("pmh", {}).get("conditions", "")
symptoms_lower = [str(s).lower() for s in symptoms if isinstance(s, str)]
# Symptom Flags (CORRECTED - Separate lines)
if "chest pain" in symptoms_lower:
flags.append("Red Flag: Chest Pain reported.")
if "shortness of breath" in symptoms_lower:
flags.append("Red Flag: Shortness of Breath reported.")
if "severe headache" in symptoms_lower:
flags.append("Red Flag: Severe Headache reported.")
if "sudden vision loss" in symptoms_lower:
flags.append("Red Flag: Sudden Vision Loss reported.")
if "weakness on one side" in symptoms_lower:
flags.append("Red Flag: Unilateral Weakness reported (potential stroke).")
if "hemoptysis" in symptoms_lower:
flags.append("Red Flag: Hemoptysis (coughing up blood).")
if "syncope" in symptoms_lower:
flags.append("Red Flag: Syncope (fainting).")
# Vital Sign Flags
if vitals:
temp = vitals.get("temp_c"); hr = vitals.get("hr_bpm"); rr = vitals.get("rr_rpm")
spo2 = vitals.get("spo2_percent"); bp_str = vitals.get("bp_mmhg")
if temp is not None and temp >= 38.5: flags.append(f"Red Flag: Fever ({temp}Β°C).")
if hr is not None and hr >= 120: flags.append(f"Red Flag: Tachycardia ({hr} bpm).")
if hr is not None and hr <= 50: flags.append(f"Red Flag: Bradycardia ({hr} bpm).")
if rr is not None and rr >= 24: flags.append(f"Red Flag: Tachypnea ({rr} rpm).")
if spo2 is not None and spo2 <= 92: flags.append(f"Red Flag: Hypoxia ({spo2}%).")
if bp_str:
bp = parse_bp(bp_str)
if bp:
if bp[0] >= 180 or bp[1] >= 110: flags.append(f"Red Flag: Hypertensive Urgency/Emergency (BP: {bp_str} mmHg).")
if bp[0] <= 90 or bp[1] <= 60: flags.append(f"Red Flag: Hypotension (BP: {bp_str} mmHg).")
# History Flags
if history and isinstance(history, str):
history_lower = history.lower()
if "history of mi" in history_lower and "chest pain" in symptoms_lower:
flags.append("Red Flag: History of MI with current Chest Pain.")
if "history of dvt/pe" in history_lower and "shortness of breath" in symptoms_lower:
flags.append("Red Flag: History of DVT/PE with current Shortness of Breath.")
return list(set(flags)) # Unique flags
def format_patient_data_for_prompt(data: dict) -> str:
# ... (Keep this function exactly as it was) ...
if not data: return "No patient data provided."; prompt_str = "";
for key, value in data.items(): section_title = key.replace('_', ' ').title();
if isinstance(value, dict) and value: has_content = any(sub_value for sub_value in value.values());
if has_content: prompt_str += f"**{section_title}:**\n";
for sub_key, sub_value in value.items():
if sub_value: prompt_str += f" - {sub_key.replace('_', ' ').title()}: {sub_value}\n"
elif isinstance(value, list) and value: prompt_str += f"**{section_title}:** {', '.join(map(str, value))}\n"
elif value and not isinstance(value, dict): prompt_str += f"**{section_title}:** {value}\n";
return prompt_str.strip()
# --- Tool Definitions ---
class LabOrderInput(BaseModel): test_name: str = Field(...); reason: str = Field(...); priority: str = Field("Routine")
class PrescriptionInput(BaseModel): medication_name: str = Field(...); dosage: str = Field(...); route: str = Field(...); frequency: str = Field(...); duration: str = Field("As directed"); reason: str = Field(...)
class InteractionCheckInput(BaseModel): potential_prescription: str = Field(...); current_medications: Optional[List[str]] = Field(None); allergies: Optional[List[str]] = Field(None)
class FlagRiskInput(BaseModel): risk_description: str = Field(...); urgency: str = Field("High")
@tool("order_lab_test", args_schema=LabOrderInput)
def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str:
print(f"Executing order_lab_test: {test_name}, Reason: {reason}, Priority: {priority}"); return json.dumps({"status": "success", "message": f"Lab Ordered: {test_name} ({priority})", "details": f"Reason: {reason}"})
@tool("prescribe_medication", args_schema=PrescriptionInput)
def prescribe_medication(medication_name: str, dosage: str, route: str, frequency: str, duration: str, reason: str) -> str:
print(f"Executing prescribe_medication: {medication_name} {dosage}..."); return json.dumps({"status": "success", "message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}", "details": f"Duration: {duration}. Reason: {reason}"})
@tool("check_drug_interactions", args_schema=InteractionCheckInput)
def check_drug_interactions(potential_prescription: str, current_medications: Optional[List[str]] = None, allergies: Optional[List[str]] = None) -> str:
# ... (Keep the FULL implementation of the NEW check_drug_interactions using API helpers) ...
print(f"\n--- Executing REAL check_drug_interactions ---"); print(f"Checking potential prescription: '{potential_prescription}'"); warnings = []; potential_med_lower = potential_prescription.lower().strip();
current_meds_list = current_medications or []; allergies_list = allergies or []; current_med_names_lower = [];
for med in current_meds_list: match = re.match(r"^\s*([a-zA-Z\-]+)", str(med));
if match: current_med_names_lower.append(match.group(1).lower());
allergies_lower = [str(a).lower().strip() for a in allergies_list if a]; print(f" Against Current Meds (names): {current_med_names_lower}"); print(f" Against Allergies: {allergies_lower}");
print(f" Step 1: Normalizing '{potential_prescription}'..."); potential_rxcui = get_rxcui(potential_prescription); potential_label = get_openfda_label(rxcui=potential_rxcui, drug_name=potential_prescription);
if not potential_rxcui and not potential_label: warnings.append(f"INFO: Could not reliably identify '{potential_prescription}'. Checks may be incomplete.");
print(" Step 2: Performing Allergy Check...");
for allergy in allergies_lower:
if allergy == potential_med_lower: warnings.append(f"CRITICAL ALLERGY (Name Match): Patient allergic to '{allergy}'. Potential prescription is '{potential_prescription}'.");
elif allergy in ["penicillin", "pcns"] and potential_med_lower in ["amoxicillin", "ampicillin", "augmentin", "piperacillin"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Penicillin. High risk with '{potential_prescription}'.");
elif allergy == "sulfa" and potential_med_lower in ["sulfamethoxazole", "bactrim", "sulfasalazine"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Sulfa. High risk with '{potential_prescription}'.");
elif allergy in ["nsaids", "aspirin"] and potential_med_lower in ["ibuprofen", "naproxen", "ketorolac", "diclofenac"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to NSAIDs/Aspirin. Risk with '{potential_prescription}'.");
if potential_label: contraindications = potential_label.get("contraindications"); warnings_section = potential_label.get("warnings_and_cautions") or potential_label.get("warnings");
if contraindications: allergy_mentions_ci = search_text_list(contraindications, allergies_lower);
if allergy_mentions_ci: warnings.append(f"ALLERGY RISK (Contraindication Found): Label for '{potential_prescription}' mentions contraindication potentially related to patient allergies: {'; '.join(allergy_mentions_ci)}");
if warnings_section: allergy_mentions_warn = search_text_list(warnings_section, allergies_lower);
if allergy_mentions_warn: warnings.append(f"ALLERGY RISK (Warning Found): Label for '{potential_prescription}' mentions warnings potentially related to patient allergies: {'; '.join(allergy_mentions_warn)}");
print(" Step 3: Performing Drug-Drug Interaction Check...");
if potential_rxcui or potential_label:
for current_med_name in current_med_names_lower:
if not current_med_name or current_med_name == potential_med_lower: continue; print(f" Checking interaction between '{potential_prescription}' and '{current_med_name}'..."); current_rxcui = get_rxcui(current_med_name); current_label = get_openfda_label(rxcui=current_rxcui, drug_name=current_med_name); search_terms_for_current = [current_med_name];
if current_rxcui: search_terms_for_current.append(current_rxcui); search_terms_for_potential = [potential_med_lower];
if potential_rxcui: search_terms_for_potential.append(potential_rxcui); interaction_found_flag = False;
if potential_label and potential_label.get("drug_interactions"): interaction_mentions = search_text_list(potential_label.get("drug_interactions"), search_terms_for_current);
if interaction_mentions: warnings.append(f"Potential Interaction ({potential_prescription.capitalize()} Label): Mentions '{current_med_name.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}"); interaction_found_flag = True;
if current_label and current_label.get("drug_interactions") and not interaction_found_flag: interaction_mentions = search_text_list(current_label.get("drug_interactions"), search_terms_for_potential);
if interaction_mentions: warnings.append(f"Potential Interaction ({current_med_name.capitalize()} Label): Mentions '{potential_prescription.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}");
else: warnings.append(f"INFO: Drug-drug interaction check skipped for '{potential_prescription}' as it could not be identified via RxNorm/OpenFDA.");
final_warnings = list(set(warnings)); status = "warning" if any("CRITICAL" in w or "Interaction" in w or "RISK" in w for w in final_warnings) else "clear";
if not final_warnings: status = "clear"; message = f"Interaction/Allergy check for '{potential_prescription}': {len(final_warnings)} potential issue(s) identified using RxNorm/OpenFDA." if final_warnings else f"No major interactions or allergy issues identified for '{potential_prescription}' based on RxNorm/OpenFDA lookup."; print(f"--- Interaction Check Complete for '{potential_prescription}' ---");
return json.dumps({"status": status, "message": message, "warnings": final_warnings})
@tool("flag_risk", args_schema=FlagRiskInput)
def flag_risk(risk_description: str, urgency: str) -> str:
print(f"Executing flag_risk: {risk_description}, Urgency: {urgency}"); st.error(f"π¨ **{urgency.upper()} RISK FLAGGED by AI:** {risk_description}", icon="π¨"); return json.dumps({"status": "flagged", "message": f"Risk '{risk_description}' flagged with {urgency} urgency."})
search_tool = TavilySearchResults(max_results=ClinicalAppSettings.MAX_SEARCH_RESULTS, name="tavily_search_results")
# --- LangGraph Setup ---
class AgentState(TypedDict): messages: Annotated[list[Any], operator.add]; patient_data: Optional[dict]
tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
tool_executor = ToolExecutor(tools)
model = ChatGroq(temperature=ClinicalAppSettings.TEMPERATURE, model=ClinicalAppSettings.MODEL_NAME)
model_with_tools = model.bind_tools(tools)
# --- Graph Nodes (agent_node, tool_node) ---
# ... (Keep these functions exactly as they were) ...
def agent_node(state: AgentState):
print("\n---AGENT NODE---"); current_messages = state['messages'];
if not current_messages or not isinstance(current_messages[0], SystemMessage): print("Prepending System Prompt."); current_messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + current_messages;
print(f"Invoking LLM with {len(current_messages)} messages.");
try: response = model_with_tools.invoke(current_messages); print(f"Agent Raw Response Type: {type(response)}");
if hasattr(response, 'tool_calls') and response.tool_calls: print(f"Agent Response Tool Calls: {response.tool_calls}"); else: print("Agent Response: No tool calls.");
except Exception as e: print(f"ERROR in agent_node: {e}"); traceback.print_exc(); error_message = AIMessage(content=f"Error: {e}"); return {"messages": [error_message]};
return {"messages": [response]}
def tool_node(state: AgentState):
print("\n---TOOL NODE---"); tool_messages = []; last_message = state['messages'][-1];
if not isinstance(last_message, AIMessage) or not getattr(last_message, 'tool_calls', None): print("Warning: Tool node called unexpectedly."); return {"messages": []};
tool_calls = last_message.tool_calls; print(f"Tool calls received: {json.dumps(tool_calls, indent=2)}"); prescriptions_requested = {}; interaction_checks_requested = {};
for call in tool_calls: tool_name = call.get('name'); tool_args = call.get('args', {});
if tool_name == 'prescribe_medication': med_name = tool_args.get('medication_name', '').lower();
if med_name: prescriptions_requested[med_name] = call;
elif tool_name == 'check_drug_interactions': potential_med = tool_args.get('potential_prescription', '').lower();
if potential_med: interaction_checks_requested[potential_med] = call;
valid_tool_calls_for_execution = []; blocked_ids = set();
for med_name, prescribe_call in prescriptions_requested.items():
if med_name not in interaction_checks_requested: st.error(f"**Safety Violation:** AI tried to prescribe '{med_name}' without check."); error_msg = ToolMessage(content=json.dumps({"status": "error", "message": f"Interaction check needed for '{med_name}'."}), tool_call_id=prescribe_call['id'], name=prescribe_call['name']); tool_messages.append(error_msg); blocked_ids.add(prescribe_call['id']);
valid_tool_calls_for_execution = [call for call in tool_calls if call['id'] not in blocked_ids];
patient_data = state.get("patient_data", {}); patient_meds_full = patient_data.get("medications", {}).get("current", []); patient_allergies = patient_data.get("allergies", []);
for call in valid_tool_calls_for_execution:
if call['name'] == 'check_drug_interactions':
if 'args' not in call: call['args'] = {}; call['args']['current_medications'] = patient_meds_full; call['args']['allergies'] = patient_allergies; print(f"Augmented interaction check args for call ID {call['id']}");
if valid_tool_calls_for_execution: print(f"Attempting execution: {[c['name'] for c in valid_tool_calls_for_execution]}");
try: responses = tool_executor.batch(valid_tool_calls_for_execution, return_exceptions=True);
for call, resp in zip(valid_tool_calls_for_execution, responses): tool_call_id = call['id']; tool_name = call['name'];
if isinstance(resp, Exception): error_type = type(resp).__name__; error_str = str(resp); print(f"ERROR executing tool '{tool_name}': {error_type} - {error_str}"); traceback.print_exc(); st.error(f"Error: {error_type}"); error_content = json.dumps({"status": "error", "message": f"Failed: {error_type} - {error_str}"}); tool_messages.append(ToolMessage(content=error_content, tool_call_id=tool_call_id, name=tool_name));
if isinstance(resp, AttributeError) and "'dict' object has no attribute 'tool'" in error_str: print("\n *** DETECTED SPECIFIC ATTRIBUTE ERROR *** \n");
else: print(f"Tool '{tool_name}' executed."); content_str = str(resp); tool_messages.append(ToolMessage(content=content_str, tool_call_id=tool_call_id, name=tool_name));
except Exception as e: print(f"CRITICAL TOOL NODE ERROR: {e}"); traceback.print_exc(); st.error(f"Critical error: {e}"); error_content = json.dumps({"status": "error", "message": f"Internal error: {e}"}); processed_ids = {msg.tool_call_id for msg in tool_messages}; [tool_messages.append(ToolMessage(content=error_content, tool_call_id=call['id'], name=call['name'])) for call in valid_tool_calls_for_execution if call['id'] not in processed_ids];
print(f"Returning {len(tool_messages)} tool messages."); return {"messages": tool_messages}
# --- Graph Edges (Routing Logic) ---
def should_continue(state: AgentState) -> str:
print("\n---ROUTING DECISION---"); last_message = state['messages'][-1] if state['messages'] else None;
if not isinstance(last_message, AIMessage): return "end_conversation_turn";
if "Sorry, an internal error occurred" in last_message.content: return "end_conversation_turn";
if getattr(last_message, 'tool_calls', None): return "continue_tools"; else: return "end_conversation_turn";
# --- Graph Definition & Compilation ---
workflow = StateGraph(AgentState); workflow.add_node("agent", agent_node); workflow.add_node("tools", tool_node)
workflow.set_entry_point("agent"); workflow.add_conditional_edges("agent", should_continue, {"continue_tools": "tools", "end_conversation_turn": END})
workflow.add_edge("tools", "agent"); app = workflow.compile(); print("LangGraph compiled successfully.")
# --- Streamlit UI ---
def main():
st.set_page_config(page_title=ClinicalAppSettings.APP_TITLE, layout=ClinicalAppSettings.PAGE_LAYOUT)
st.title(f"π©Ί {ClinicalAppSettings.APP_TITLE}")
st.caption(f"Interactive Assistant | LangGraph/Groq/Tavily/UMLS/OpenFDA | Model: {ClinicalAppSettings.MODEL_NAME}")
if "messages" not in st.session_state: st.session_state.messages = []
if "patient_data" not in st.session_state: st.session_state.patient_data = None
if "graph_app" not in st.session_state: st.session_state.graph_app = app
# --- Patient Data Input Sidebar ---
with st.sidebar:
st.header("π Patient Intake Form")
# Input fields...
st.subheader("Demographics"); age = st.number_input("Age", 0, 120, 55, key="sb_age"); sex = st.selectbox("Sex", ["Male", "Female", "Other"], key="sb_sex")
st.subheader("HPI"); chief_complaint = st.text_input("Chief Complaint", "Chest pain", key="sb_cc"); hpi_details = st.text_area("HPI Details", "55 y/o male...", height=100, key="sb_hpi"); symptoms = st.multiselect("Symptoms", ["Nausea", "Diaphoresis", "SOB", "Dizziness", "Severe Headache", "Syncope", "Hemoptysis"], default=["Nausea", "Diaphoresis"], key="sb_sym")
st.subheader("History"); pmh = st.text_area("PMH", "HTN, HLD, DM2, History of MI", key="sb_pmh"); psh = st.text_area("PSH", "Appendectomy", key="sb_psh")
st.subheader("Meds & Allergies"); current_meds_str = st.text_area("Current Meds", "Lisinopril 10mg daily\nMetformin 1000mg BID\nAtorvastatin 40mg daily", key="sb_meds"); allergies_str = st.text_area("Allergies", "Penicillin (rash), Sulfa", key="sb_allergies")
st.subheader("Social/Family"); social_history = st.text_area("SH", "Smoker", key="sb_sh"); family_history = st.text_area("FHx", "Father MI", key="sb_fhx")
st.subheader("Vitals & Exam"); col1, col2 = st.columns(2);
with col1: temp_c = st.number_input("Temp C", 35.0, 42.0, 36.8, format="%.1f", key="sb_temp"); hr_bpm = st.number_input("HR", 30, 250, 95, key="sb_hr"); rr_rpm = st.number_input("RR", 5, 50, 18, key="sb_rr")
with col2: bp_mmhg = st.text_input("BP", "155/90", key="sb_bp"); spo2_percent = st.number_input("SpO2", 70, 100, 96, key="sb_spo2"); pain_scale = st.slider("Pain", 0, 10, 8, key="sb_pain")
exam_notes = st.text_area("Exam Notes", "Awake, alert...", height=50, key="sb_exam")
if st.button("Start/Update Consultation", key="sb_start"):
current_meds_list = [med.strip() for med in current_meds_str.split('\n') if med.strip()]
current_med_names_only = [];
for med in current_meds_list: match = re.match(r"^\s*([a-zA-Z\-]+)", med);
if match: current_med_names_only.append(match.group(1).lower())
allergies_list = []
for a in allergies_str.split(','): cleaned_allergy = a.strip();
if cleaned_allergy: match = re.match(r"^\s*([a-zA-Z\-\s/]+)(?:\s*\(.*\))?", cleaned_allergy); name_part = match.group(1).strip().lower() if match else cleaned_allergy.lower(); allergies_list.append(name_part)
st.session_state.patient_data = { "demographics": {"age": age, "sex": sex}, "hpi": {"chief_complaint": chief_complaint, "details": hpi_details, "symptoms": symptoms}, "pmh": {"conditions": pmh}, "psh": {"procedures": psh}, "medications": {"current": current_meds_list, "names_only": current_med_names_only}, "allergies": allergies_list, "social_history": {"details": social_history}, "family_history": {"details": family_history}, "vitals": { "temp_c": temp_c, "hr_bpm": hr_bpm, "bp_mmhg": bp_mmhg, "rr_rpm": rr_rpm, "spo2_percent": spo2_percent, "pain_scale": pain_scale}, "exam_findings": {"notes": exam_notes} }
red_flags = check_red_flags(st.session_state.patient_data); st.sidebar.markdown("---");
if red_flags: st.sidebar.warning("**Initial Red Flags:**"); [st.sidebar.warning(f"- {flag.replace('Red Flag: ','')}") for flag in red_flags]
else: st.sidebar.success("No immediate red flags.")
initial_prompt = "Initiate consultation. Review patient data and begin analysis."
st.session_state.messages = [HumanMessage(content=initial_prompt)]; st.success("Patient data loaded/updated.")
# --- Main Chat Interface Area ---
st.header("π¬ Clinical Consultation")
# Display loop - key= argument REMOVED, Tool Call Display Syntax FIXED
for msg in st.session_state.messages:
if isinstance(msg, HumanMessage):
with st.chat_message("user"): st.markdown(msg.content)
elif isinstance(msg, AIMessage):
with st.chat_message("assistant"):
ai_content = msg.content; structured_output = None
try: # JSON Parsing logic...
json_match = re.search(r"```json\s*(\{.*?\})\s*```", ai_content, re.DOTALL | re.IGNORECASE)
if json_match: json_str = json_match.group(1); prefix = ai_content[:json_match.start()].strip(); suffix = ai_content[json_match.end():].strip();
if prefix: st.markdown(prefix); structured_output = json.loads(json_str);
if suffix: st.markdown(suffix)
elif ai_content.strip().startswith("{") and ai_content.strip().endswith("}"): structured_output = json.loads(ai_content); ai_content = ""
else: st.markdown(ai_content)
except Exception as e: st.markdown(ai_content); print(f"Error parsing/displaying AI JSON: {e}")
if structured_output and isinstance(structured_output, dict): # Structured JSON display logic...
st.divider(); st.subheader("π AI Analysis & Recommendations")
cols = st.columns(2);
with cols[0]: st.markdown("**Assessment:**"); st.markdown(f"> {structured_output.get('assessment', 'N/A')}"); st.markdown("**Differential Diagnosis:**"); ddx = structured_output.get('differential_diagnosis', []);
if ddx: [st.expander(f"{'π₯π₯π₯'[('High','Medium','Low').index(item.get('likelihood','Low')[0])] if item.get('likelihood','?')[0] in 'HML' else '?'} {item.get('diagnosis', 'Unknown')} ({item.get('likelihood','?')})").write(f"**Rationale:** {item.get('rationale', 'N/A')}") for item in ddx]
else: st.info("No DDx provided."); st.markdown("**Risk Assessment:**"); risk = structured_output.get('risk_assessment', {}); flags=risk.get('identified_red_flags',[]); concerns=risk.get("immediate_concerns",[]); comps=risk.get("potential_complications",[])
if flags: st.warning(f"**Flags:** {', '.join(flags)}"); if concerns: st.warning(f"**Concerns:** {', '.join(concerns)}"); if comps: st.info(f"**Potential Complications:** {', '.join(comps)}");
if not flags and not concerns: st.success("No major risks highlighted.")
with cols[1]: st.markdown("**Recommended Plan:**"); plan = structured_output.get('recommended_plan', {});
for section in ["investigations","therapeutics","consultations","patient_education"]: st.markdown(f"_{section.replace('_',' ').capitalize()}:_"); items = plan.get(section); [st.markdown(f"- {item}") for item in items] if items and isinstance(items, list) else (st.markdown(f"- {items}") if items else st.markdown("_None_")); st.markdown("")
st.markdown("**Rationale & Guideline Check:**"); st.markdown(f"> {structured_output.get('rationale_summary', 'N/A')}"); interaction_summary = structured_output.get("interaction_check_summary", "");
if interaction_summary: st.markdown("**Interaction Check Summary:**"); st.markdown(f"> {interaction_summary}"); st.divider()
# CORRECTED Tool Call Display Block
if getattr(msg, 'tool_calls', None):
with st.expander("π οΈ AI requested actions", expanded=False):
if msg.tool_calls:
for tc in msg.tool_calls:
try:
st.code(f"Action: {tc.get('name', 'Unknown Tool')}\nArgs: {json.dumps(tc.get('args', {}), indent=2)}", language="json")
except Exception as display_e:
st.error(f"Could not display tool call args: {display_e}", icon="β οΈ")
st.code(f"Action: {tc.get('name', 'Unknown Tool')}\nRaw Args: {tc.get('args')}")
else:
st.caption("_No actions requested._")
elif isinstance(msg, ToolMessage):
tool_name_display = getattr(msg, 'name', 'tool_execution')
with st.chat_message(tool_name_display, avatar="π οΈ"): # No key
try: # Tool message display logic...
tool_data = json.loads(msg.content); status = tool_data.get("status", "info"); message = tool_data.get("message", msg.content); details = tool_data.get("details"); warnings = tool_data.get("warnings");
if status == "success" or status == "clear" or status == "flagged": st.success(f"{message}", icon="β
" if status != "flagged" else "π¨")
elif status == "warning": st.warning(f"{message}", icon="β οΈ");
if warnings and isinstance(warnings, list): st.caption("Details:"); [st.caption(f"- {warn}") for warn in warnings]
else: st.error(f"{message}", icon="β") # Assume error if not success/clear/flagged/warning
if details: st.caption(f"Details: {details}")
except json.JSONDecodeError: st.info(f"{msg.content}") # Display raw if not JSON
except Exception as e: st.error(f"Error displaying tool message: {e}", icon="β"); st.caption(f"Raw content: {msg.content}")
# --- Chat Input Logic ---
if prompt := st.chat_input("Your message or follow-up query..."):
if not st.session_state.patient_data: st.warning("Please load patient data first."); st.stop()
user_message = HumanMessage(content=prompt); st.session_state.messages.append(user_message)
with st.chat_message("user"): st.markdown(prompt)
current_state = AgentState(messages=st.session_state.messages, patient_data=st.session_state.patient_data)
with st.spinner("SynapseAI is thinking..."):
try:
final_state = st.session_state.graph_app.invoke(current_state, {"recursion_limit": 15})
st.session_state.messages = final_state['messages']
except Exception as e: print(f"CRITICAL ERROR: {e}"); traceback.print_exc(); st.error(f"Error: {e}")
st.rerun()
# Disclaimer
st.markdown("---"); st.warning("**Disclaimer:** SynapseAI is for demonstration...")
if __name__ == "__main__":
main() |