|
import streamlit as st |
|
from langchain_groq import ChatGroq |
|
from langchain_community.tools.tavily_search import TavilySearchResults |
|
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from langchain_core.pydantic_v1 import BaseModel, Field |
|
from langchain_core.tools import tool |
|
from langgraph.prebuilt import ToolExecutor |
|
from langgraph.graph import StateGraph, END |
|
|
|
|
|
from typing import Optional, List, Dict, Any, TypedDict, Annotated |
|
import json |
|
import re |
|
import operator |
|
import traceback |
|
|
|
|
|
class ClinicalAppSettings: |
|
APP_TITLE = "SynapseAI: Interactive Clinical Decision Support" |
|
PAGE_LAYOUT = "wide" |
|
MODEL_NAME = "llama3-70b-8192" |
|
TEMPERATURE = 0.1 |
|
MAX_SEARCH_RESULTS = 3 |
|
|
|
class ClinicalPrompts: |
|
|
|
SYSTEM_PROMPT = """ |
|
You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation. |
|
Your goal is to support healthcare professionals by analyzing patient data, providing differential diagnoses, suggesting evidence-based management plans, and identifying risks according to current standards of care. |
|
|
|
**Core Directives for this Conversation:** |
|
1. **Analyze Sequentially:** Process information turn-by-turn. Base your responses on the *entire* conversation history. |
|
2. **Seek Clarity:** If the provided information is insufficient or ambiguous for a safe assessment, CLEARLY STATE what specific additional information or clarification is needed. Do NOT guess or make unsafe assumptions. |
|
3. **Structured Assessment (When Ready):** When you have sufficient information and have performed necessary checks (like interactions, guideline searches), provide a comprehensive assessment using the following JSON structure. Output this JSON structure as the primary content of your response when you are providing the full analysis. Do NOT output incomplete JSON. If you need to ask a question or perform a tool call first, do that instead of outputting this structure. |
|
```json |
|
{ |
|
"assessment": "Concise summary of the patient's presentation and key findings based on the conversation.", |
|
"differential_diagnosis": [ |
|
{"diagnosis": "Primary Diagnosis", "likelihood": "High/Medium/Low", "rationale": "Supporting evidence from conversation..."}, |
|
{"diagnosis": "Alternative Diagnosis 1", "likelihood": "Medium/Low", "rationale": "Supporting/Refuting evidence..."}, |
|
{"diagnosis": "Alternative Diagnosis 2", "likelihood": "Low", "rationale": "Why it's less likely but considered..."} |
|
], |
|
"risk_assessment": { |
|
"identified_red_flags": ["List any triggered red flags based on input and analysis"], |
|
"immediate_concerns": ["Specific urgent issues requiring attention (e.g., sepsis risk, ACS rule-out)"], |
|
"potential_complications": ["Possible future issues based on presentation"] |
|
}, |
|
"recommended_plan": { |
|
"investigations": ["List specific lab tests or imaging required. Use 'order_lab_test' tool."], |
|
"therapeutics": ["Suggest specific treatments or prescriptions. Use 'prescribe_medication' tool. MUST check interactions first using 'check_drug_interactions'."], |
|
"consultations": ["Recommend specialist consultations if needed."], |
|
"patient_education": ["Key points for patient communication."] |
|
}, |
|
"rationale_summary": "Justification for assessment/plan. **Crucially, if relevant (e.g., ACS, sepsis, common infections), use 'tavily_search_results' to find and cite current clinical practice guidelines (e.g., 'latest ACC/AHA chest pain guidelines 202X', 'Surviving Sepsis Campaign guidelines') supporting your recommendations.** Include summary of guideline findings here.", |
|
"interaction_check_summary": "Summary of findings from 'check_drug_interactions' if performed." |
|
} |
|
``` |
|
4. **Safety First - Interactions:** BEFORE suggesting a new prescription via `prescribe_medication`, you MUST FIRST use `check_drug_interactions` in a preceding or concurrent tool call. Report the findings from the interaction check. If significant interactions exist, modify the plan or state the contraindication clearly. |
|
5. **Safety First - Red Flags:** Use the `flag_risk` tool IMMEDIATELY if critical red flags requiring urgent action are identified at any point in the conversation. |
|
6. **Tool Use:** Employ tools (`order_lab_test`, `prescribe_medication`, `check_drug_interactions`, `flag_risk`, `tavily_search_results`) logically within the conversational flow. Wait for tool results before proceeding if the result is needed for the next step (e.g., wait for interaction check before confirming prescription in the structured JSON). |
|
7. **Evidence & Guidelines:** Actively use `tavily_search_results` not just for general knowledge, but specifically to query for and incorporate **current clinical practice guidelines** relevant to the patient's presentation (e.g., chest pain, shortness of breath, suspected infection). Summarize findings in the `rationale_summary` when providing the structured output. |
|
8. **Conciseness & Flow:** Be medically accurate and concise. Use standard terminology. Respond naturally in conversation (asking questions, acknowledging info) until ready for the full structured JSON output. |
|
""" |
|
|
|
|
|
MOCK_INTERACTION_DB = { |
|
("lisinopril", "spironolactone"): "High risk of hyperkalemia. Monitor potassium closely.", |
|
("warfarin", "amiodarone"): "Increased bleeding risk. Monitor INR frequently and adjust Warfarin dose.", |
|
("simvastatin", "clarithromycin"): "Increased risk of myopathy/rhabdomyolysis. Avoid combination or use lower statin dose.", |
|
("aspirin", "ibuprofen"): "Concurrent use may decrease Aspirin's cardioprotective effect. Potential for increased GI bleeding.", |
|
|
|
("amiodarone", "warfarin"): "Increased bleeding risk. Monitor INR frequently and adjust Warfarin dose.", |
|
("clarithromycin", "simvastatin"): "Increased risk of myopathy/rhabdomyolysis. Avoid combination or use lower statin dose.", |
|
("ibuprofen", "aspirin"): "Concurrent use may decrease Aspirin's cardioprotective effect. Potential for increased GI bleeding.", |
|
("spironolactone", "lisinopril"): "High risk of hyperkalemia. Monitor potassium closely.", |
|
} |
|
|
|
ALLERGY_INTERACTIONS = { |
|
"penicillin": ["amoxicillin", "ampicillin", "piperacillin", "augmentin"], |
|
"sulfa": ["sulfamethoxazole", "sulfasalazine", "bactrim"], |
|
"aspirin": ["ibuprofen", "naproxen", "nsaid"] |
|
} |
|
|
|
def parse_bp(bp_string: str) -> Optional[tuple[int, int]]: |
|
"""Parses BP string like '120/80' into (systolic, diastolic) integers.""" |
|
if not isinstance(bp_string, str): return None |
|
match = re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", bp_string.strip()) |
|
if match: |
|
return int(match.group(1)), int(match.group(2)) |
|
return None |
|
|
|
def check_red_flags(patient_data: dict) -> List[str]: |
|
"""Checks patient data against predefined red flags.""" |
|
flags = [] |
|
if not patient_data: return flags |
|
|
|
symptoms = patient_data.get("hpi", {}).get("symptoms", []) |
|
vitals = patient_data.get("vitals", {}) |
|
history = patient_data.get("pmh", {}).get("conditions", "") |
|
|
|
symptoms_lower = [str(s).lower() for s in symptoms if isinstance(s, str)] |
|
|
|
|
|
if "chest pain" in symptoms_lower: flags.append("Red Flag: Chest Pain reported.") |
|
if "shortness of breath" in symptoms_lower: flags.append("Red Flag: Shortness of Breath reported.") |
|
if "severe headache" in symptoms_lower: flags.append("Red Flag: Severe Headache reported.") |
|
if "sudden vision loss" in symptoms_lower: flags.append("Red Flag: Sudden Vision Loss reported.") |
|
if "weakness on one side" in symptoms_lower: flags.append("Red Flag: Unilateral Weakness reported (potential stroke).") |
|
if "hemoptysis" in symptoms_lower: flags.append("Red Flag: Hemoptysis (coughing up blood).") |
|
if "syncope" in symptoms_lower: flags.append("Red Flag: Syncope (fainting).") |
|
|
|
|
|
if vitals: |
|
temp = vitals.get("temp_c") |
|
hr = vitals.get("hr_bpm") |
|
rr = vitals.get("rr_rpm") |
|
spo2 = vitals.get("spo2_percent") |
|
bp_str = vitals.get("bp_mmhg") |
|
|
|
if temp is not None and temp >= 38.5: flags.append(f"Red Flag: Fever (Temperature: {temp}Β°C).") |
|
if hr is not None and hr >= 120: flags.append(f"Red Flag: Tachycardia (Heart Rate: {hr} bpm).") |
|
if hr is not None and hr <= 50: flags.append(f"Red Flag: Bradycardia (Heart Rate: {hr} bpm).") |
|
if rr is not None and rr >= 24: flags.append(f"Red Flag: Tachypnea (Respiratory Rate: {rr} rpm).") |
|
if spo2 is not None and spo2 <= 92: flags.append(f"Red Flag: Hypoxia (SpO2: {spo2}%).") |
|
if bp_str: |
|
bp = parse_bp(bp_str) |
|
if bp: |
|
if bp[0] >= 180 or bp[1] >= 110: flags.append(f"Red Flag: Hypertensive Urgency/Emergency (BP: {bp_str} mmHg).") |
|
if bp[0] <= 90 or bp[1] <= 60: flags.append(f"Red Flag: Hypotension (BP: {bp_str} mmHg).") |
|
|
|
|
|
if history and "history of mi" in history.lower() and "chest pain" in symptoms_lower: |
|
flags.append("Red Flag: History of MI with current Chest Pain.") |
|
if history and "history of dvt/pe" in history.lower() and "shortness of breath" in symptoms_lower: |
|
flags.append("Red Flag: History of DVT/PE with current Shortness of Breath.") |
|
|
|
|
|
return list(set(flags)) |
|
|
|
def format_patient_data_for_prompt(data: dict) -> str: |
|
"""Formats the patient dictionary into a readable string for the LLM.""" |
|
if not data: return "No patient data provided." |
|
prompt_str = "" |
|
for key, value in data.items(): |
|
section_title = key.replace('_', ' ').title() |
|
if isinstance(value, dict) and value: |
|
has_content = any(sub_value for sub_value in value.values()) |
|
if has_content: |
|
prompt_str += f"**{section_title}:**\n" |
|
for sub_key, sub_value in value.items(): |
|
if sub_value: |
|
prompt_str += f" - {sub_key.replace('_', ' ').title()}: {sub_value}\n" |
|
elif isinstance(value, list) and value: |
|
prompt_str += f"**{section_title}:** {', '.join(map(str, value))}\n" |
|
elif value and not isinstance(value, dict): |
|
prompt_str += f"**{section_title}:** {value}\n" |
|
return prompt_str.strip() |
|
|
|
|
|
|
|
|
|
|
|
class LabOrderInput(BaseModel): |
|
test_name: str = Field(..., description="Specific name of the lab test or panel (e.g., 'CBC', 'BMP', 'Troponin I', 'Urinalysis', 'D-dimer').") |
|
reason: str = Field(..., description="Clinical justification for ordering the test (e.g., 'Rule out infection', 'Assess renal function', 'Evaluate for ACS', 'Assess for PE').") |
|
priority: str = Field("Routine", description="Priority of the test (e.g., 'STAT', 'Routine').") |
|
|
|
@tool("order_lab_test", args_schema=LabOrderInput) |
|
def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str: |
|
"""Orders a specific lab test with clinical justification and priority.""" |
|
print(f"Executing order_lab_test: {test_name}, Reason: {reason}, Priority: {priority}") |
|
|
|
return json.dumps({ |
|
"status": "success", |
|
"message": f"Lab Ordered: {test_name} ({priority})", |
|
"details": f"Reason: {reason}" |
|
}) |
|
|
|
class PrescriptionInput(BaseModel): |
|
medication_name: str = Field(..., description="Name of the medication.") |
|
dosage: str = Field(..., description="Dosage amount and unit (e.g., '500 mg', '10 mg', '81 mg').") |
|
route: str = Field(..., description="Route of administration (e.g., 'PO', 'IV', 'IM', 'Topical', 'SL').") |
|
frequency: str = Field(..., description="How often the medication should be taken (e.g., 'BID', 'QDaily', 'Q4-6H PRN', 'once').") |
|
duration: str = Field("As directed", description="Duration of treatment (e.g., '7 days', '1 month', 'Ongoing', 'Until follow-up').") |
|
reason: str = Field(..., description="Clinical indication for the prescription.") |
|
|
|
@tool("prescribe_medication", args_schema=PrescriptionInput) |
|
def prescribe_medication(medication_name: str, dosage: str, route: str, frequency: str, duration: str, reason: str) -> str: |
|
"""Prescribes a medication with detailed instructions and clinical indication. IMPORTANT: Requires prior interaction check.""" |
|
print(f"Executing prescribe_medication: {medication_name} {dosage}...") |
|
|
|
|
|
return json.dumps({ |
|
"status": "success", |
|
"message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}", |
|
"details": f"Duration: {duration}. Reason: {reason}" |
|
}) |
|
|
|
class InteractionCheckInput(BaseModel): |
|
potential_prescription: str = Field(..., description="The name of the NEW medication being considered for prescribing.") |
|
|
|
|
|
|
|
|
|
current_medications: Optional[List[str]] = Field(None, description="List of patient's current medication names (populated from state).") |
|
allergies: Optional[List[str]] = Field(None, description="List of patient's known allergies (populated from state).") |
|
|
|
|
|
@tool("check_drug_interactions", args_schema=InteractionCheckInput) |
|
def check_drug_interactions(potential_prescription: str, current_medications: Optional[List[str]] = None, allergies: Optional[List[str]] = None) -> str: |
|
"""Checks for potential drug-drug and drug-allergy interactions BEFORE prescribing.""" |
|
print(f"Executing check_drug_interactions for: {potential_prescription}") |
|
warnings = [] |
|
potential_med_lower = potential_prescription.lower() |
|
|
|
|
|
current_meds_list = current_medications or [] |
|
allergies_list = allergies or [] |
|
|
|
current_meds_lower = [str(med).lower() for med in current_meds_list] |
|
allergies_lower = [str(a).lower() for a in allergies_list] |
|
|
|
print(f" Checking against Meds: {current_meds_lower}") |
|
print(f" Checking against Allergies: {allergies_lower}") |
|
|
|
|
|
for allergy in allergies_lower: |
|
|
|
if allergy == potential_med_lower: |
|
warnings.append(f"CRITICAL ALLERGY: Patient allergic to '{allergy}'. Cannot prescribe '{potential_prescription}'.") |
|
continue |
|
|
|
if allergy in ALLERGY_INTERACTIONS: |
|
for cross_reactant in ALLERGY_INTERACTIONS[allergy]: |
|
if cross_reactant.lower() == potential_med_lower: |
|
warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to '{allergy}'. High risk with '{potential_prescription}'.") |
|
|
|
|
|
for current_med in current_meds_lower: |
|
|
|
pair1 = (current_med, potential_med_lower) |
|
pair2 = (potential_med_lower, current_med) |
|
interaction_msg = MOCK_INTERACTION_DB.get(pair1) or MOCK_INTERACTION_DB.get(pair2) |
|
if interaction_msg: |
|
warnings.append(f"Interaction: {potential_prescription.capitalize()} with {current_med.capitalize()} - {interaction_msg}") |
|
|
|
status = "warning" if warnings else "clear" |
|
message = f"Interaction check for '{potential_prescription}': {len(warnings)} potential issue(s) found." if warnings else f"No major interactions identified for '{potential_prescription}' based on provided lists." |
|
print(f" Interaction Check Result: {status}, Message: {message}, Warnings: {warnings}") |
|
return json.dumps({"status": status, "message": message, "warnings": warnings}) |
|
|
|
|
|
class FlagRiskInput(BaseModel): |
|
risk_description: str = Field(..., description="Specific critical risk identified (e.g., 'Suspected Sepsis', 'Acute Coronary Syndrome', 'Stroke Alert').") |
|
urgency: str = Field("High", description="Urgency level (e.g., 'Critical', 'High', 'Moderate').") |
|
|
|
@tool("flag_risk", args_schema=FlagRiskInput) |
|
def flag_risk(risk_description: str, urgency: str) -> str: |
|
"""Flags a critical risk identified during analysis for immediate attention.""" |
|
print(f"Executing flag_risk: {risk_description}, Urgency: {urgency}") |
|
|
|
st.error(f"π¨ **{urgency.upper()} RISK FLAGGED by AI:** {risk_description}", icon="π¨") |
|
return json.dumps({ |
|
"status": "flagged", |
|
"message": f"Risk '{risk_description}' flagged with {urgency} urgency." |
|
}) |
|
|
|
|
|
search_tool = TavilySearchResults( |
|
max_results=ClinicalAppSettings.MAX_SEARCH_RESULTS, |
|
name="tavily_search_results" |
|
) |
|
|
|
|
|
|
|
|
|
class AgentState(TypedDict): |
|
messages: Annotated[list[Any], operator.add] |
|
patient_data: Optional[dict] |
|
|
|
|
|
tools = [ |
|
order_lab_test, |
|
prescribe_medication, |
|
check_drug_interactions, |
|
flag_risk, |
|
search_tool |
|
] |
|
tool_executor = ToolExecutor(tools) |
|
|
|
|
|
model = ChatGroq( |
|
temperature=ClinicalAppSettings.TEMPERATURE, |
|
model=ClinicalAppSettings.MODEL_NAME, |
|
|
|
|
|
) |
|
|
|
model_with_tools = model.bind_tools(tools) |
|
|
|
|
|
|
|
|
|
def agent_node(state: AgentState): |
|
"""Invokes the LLM to decide the next action or response.""" |
|
print("\n---AGENT NODE---") |
|
current_messages = state['messages'] |
|
|
|
if not current_messages or not isinstance(current_messages[0], SystemMessage): |
|
print("Prepending System Prompt.") |
|
current_messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + current_messages |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"Invoking LLM with {len(current_messages)} messages.") |
|
|
|
try: |
|
response = model_with_tools.invoke(current_messages) |
|
print(f"Agent Raw Response Type: {type(response)}") |
|
|
|
if hasattr(response, 'tool_calls') and response.tool_calls: |
|
print(f"Agent Response Tool Calls: {response.tool_calls}") |
|
else: |
|
print("Agent Response: No tool calls.") |
|
|
|
except Exception as e: |
|
print(f"ERROR in agent_node during LLM invocation: {type(e).__name__} - {e}") |
|
traceback.print_exc() |
|
|
|
error_message = AIMessage(content=f"Sorry, an internal error occurred while processing the request: {type(e).__name__}") |
|
return {"messages": [error_message]} |
|
|
|
return {"messages": [response]} |
|
|
|
|
|
def tool_node(state: AgentState): |
|
"""Executes tools called by the LLM and returns results.""" |
|
print("\n---TOOL NODE---") |
|
tool_messages = [] |
|
last_message = state['messages'][-1] |
|
|
|
|
|
if not isinstance(last_message, AIMessage) or not getattr(last_message, 'tool_calls', None): |
|
print("Warning: Tool node called unexpectedly without tool calls in the last AI message.") |
|
|
|
|
|
|
|
return {"messages": []} |
|
|
|
tool_calls = last_message.tool_calls |
|
print(f"Tool calls received: {json.dumps(tool_calls, indent=2)}") |
|
|
|
|
|
prescriptions_requested = {} |
|
interaction_checks_requested = {} |
|
|
|
for call in tool_calls: |
|
tool_name = call.get('name') |
|
tool_args = call.get('args', {}) |
|
if tool_name == 'prescribe_medication': |
|
med_name = tool_args.get('medication_name', '').lower() |
|
if med_name: |
|
prescriptions_requested[med_name] = call |
|
elif tool_name == 'check_drug_interactions': |
|
potential_med = tool_args.get('potential_prescription', '').lower() |
|
if potential_med: |
|
interaction_checks_requested[potential_med] = call |
|
|
|
valid_tool_calls_for_execution = [] |
|
|
|
|
|
for med_name, prescribe_call in prescriptions_requested.items(): |
|
if med_name not in interaction_checks_requested: |
|
st.error(f"**Safety Violation:** AI attempted to prescribe '{med_name}' without requesting `check_drug_interactions` in the *same turn*. Prescription blocked.") |
|
error_msg = ToolMessage( |
|
content=json.dumps({"status": "error", "message": f"Interaction check for '{med_name}' must be requested *before or alongside* the prescription call."}), |
|
tool_call_id=prescribe_call['id'], |
|
name=prescribe_call['name'] |
|
) |
|
tool_messages.append(error_msg) |
|
else: |
|
|
|
pass |
|
|
|
|
|
blocked_ids = {msg.tool_call_id for msg in tool_messages if msg.content and '"status": "error"' in msg.content} |
|
valid_tool_calls_for_execution = [call for call in tool_calls if call['id'] not in blocked_ids] |
|
|
|
|
|
patient_meds = state.get("patient_data", {}).get("medications", {}).get("names_only", []) |
|
patient_allergies = state.get("patient_data", {}).get("allergies", []) |
|
|
|
for call in valid_tool_calls_for_execution: |
|
if call['name'] == 'check_drug_interactions': |
|
|
|
if 'args' not in call: call['args'] = {} |
|
call['args']['current_medications'] = patient_meds |
|
call['args']['allergies'] = patient_allergies |
|
print(f"Augmented interaction check args for call ID {call['id']}: {call['args']}") |
|
|
|
|
|
|
|
if valid_tool_calls_for_execution: |
|
print(f"Attempting to execute {len(valid_tool_calls_for_execution)} tools: {[c['name'] for c in valid_tool_calls_for_execution]}") |
|
try: |
|
responses = tool_executor.batch(valid_tool_calls_for_execution, return_exceptions=True) |
|
|
|
|
|
for call, resp in zip(valid_tool_calls_for_execution, responses): |
|
tool_call_id = call['id'] |
|
tool_name = call['name'] |
|
|
|
if isinstance(resp, Exception): |
|
|
|
error_type = type(resp).__name__ |
|
error_str = str(resp) |
|
print(f"ERROR executing tool '{tool_name}' (ID: {tool_call_id}): {error_type} - {error_str}") |
|
traceback.print_exc() |
|
st.error(f"Error executing action '{tool_name}': {error_type}") |
|
error_content = json.dumps({ |
|
"status": "error", |
|
"message": f"Failed to execute '{tool_name}': {error_type} - {error_str}" |
|
}) |
|
tool_messages.append(ToolMessage(content=error_content, tool_call_id=tool_call_id, name=tool_name)) |
|
|
|
|
|
if isinstance(resp, AttributeError) and "'dict' object has no attribute 'tool'" in error_str: |
|
print("\n *** DETECTED SPECIFIC ATTRIBUTE ERROR ('dict' object has no attribute 'tool') ***") |
|
print(f" Tool Call causing error: {json.dumps(call, indent=2)}") |
|
print(" This likely indicates an internal issue within Langchain/LangGraph or ToolExecutor expecting a different object structure.") |
|
print(" Ensure tool definitions (@tool decorators) and Pydantic schemas are correct.\n") |
|
|
|
else: |
|
|
|
print(f"Tool '{tool_name}' (ID: {tool_call_id}) executed successfully. Result type: {type(resp)}") |
|
|
|
content_str = str(resp) |
|
tool_messages.append(ToolMessage(content=content_str, tool_call_id=tool_call_id, name=tool_name)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
print(f"CRITICAL UNEXPECTED ERROR within tool_node logic: {type(e).__name__} - {e}") |
|
traceback.print_exc() |
|
st.error(f"Critical internal error processing actions: {e}") |
|
|
|
error_content = json.dumps({"status": "error", "message": f"Internal error processing tools: {e}"}) |
|
|
|
processed_ids = {msg.tool_call_id for msg in tool_messages} |
|
for call in valid_tool_calls_for_execution: |
|
if call['id'] not in processed_ids: |
|
tool_messages.append(ToolMessage(content=error_content, tool_call_id=call['id'], name=call['name'])) |
|
|
|
print(f"Returning {len(tool_messages)} tool messages.") |
|
|
|
return {"messages": tool_messages} |
|
|
|
|
|
|
|
def should_continue(state: AgentState) -> str: |
|
"""Determines whether to call tools, end the conversation turn, or handle errors.""" |
|
print("\n---ROUTING DECISION---") |
|
last_message = state['messages'][-1] if state['messages'] else None |
|
|
|
if not isinstance(last_message, AIMessage): |
|
|
|
print("Routing: Last message not AI. Ending turn.") |
|
return "end_conversation_turn" |
|
|
|
|
|
if "Sorry, an internal error occurred" in last_message.content: |
|
print("Routing: AI returned internal error. Ending turn.") |
|
return "end_conversation_turn" |
|
|
|
|
|
if getattr(last_message, 'tool_calls', None): |
|
print("Routing: AI requested tool calls. Continue to tools node.") |
|
return "continue_tools" |
|
|
|
else: |
|
print("Routing: AI provided final response or asked question. Ending turn.") |
|
return "end_conversation_turn" |
|
|
|
|
|
workflow = StateGraph(AgentState) |
|
|
|
|
|
workflow.add_node("agent", agent_node) |
|
workflow.add_node("tools", tool_node) |
|
|
|
|
|
workflow.set_entry_point("agent") |
|
|
|
|
|
workflow.add_conditional_edges( |
|
"agent", |
|
should_continue, |
|
{ |
|
"continue_tools": "tools", |
|
"end_conversation_turn": END |
|
} |
|
) |
|
|
|
|
|
workflow.add_edge("tools", "agent") |
|
|
|
|
|
|
|
|
|
app = workflow.compile() |
|
print("LangGraph compiled successfully.") |
|
|
|
|
|
def main(): |
|
st.set_page_config(page_title=ClinicalAppSettings.APP_TITLE, layout=ClinicalAppSettings.PAGE_LAYOUT) |
|
st.title(f"π©Ί {ClinicalAppSettings.APP_TITLE}") |
|
st.caption(f"Interactive Assistant | Powered by Langchain/LangGraph & Groq ({ClinicalAppSettings.MODEL_NAME})") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
if "patient_data" not in st.session_state: |
|
st.session_state.patient_data = None |
|
if "graph_app" not in st.session_state: |
|
st.session_state.graph_app = app |
|
|
|
|
|
with st.sidebar: |
|
st.header("π Patient Intake Form") |
|
|
|
st.subheader("Demographics") |
|
age = st.number_input("Age", min_value=0, max_value=120, value=55, key="age_input") |
|
sex = st.selectbox("Biological Sex", ["Male", "Female", "Other/Prefer not to say"], key="sex_input") |
|
|
|
st.subheader("History of Present Illness (HPI)") |
|
chief_complaint = st.text_input("Chief Complaint", "Chest pain", key="cc_input") |
|
hpi_details = st.text_area("Detailed HPI", "55 y/o male presents with substernal chest pain started 2 hours ago, described as pressure, radiating to left arm. Associated with nausea and diaphoresis. Pain is 8/10 severity. No relief with rest.", key="hpi_input", height=150) |
|
symptoms = st.multiselect("Associated Symptoms", ["Nausea", "Diaphoresis", "Shortness of Breath", "Dizziness", "Palpitations", "Fever", "Cough", "Severe Headache", "Syncope", "Hemoptysis"], default=["Nausea", "Diaphoresis"], key="sym_input") |
|
|
|
st.subheader("Past History") |
|
pmh = st.text_area("Past Medical History (PMH)", "Hypertension (HTN), Hyperlipidemia (HLD), Type 2 Diabetes Mellitus (DM2), History of MI", key="pmh_input") |
|
psh = st.text_area("Past Surgical History (PSH)", "Appendectomy (2005)", key="psh_input") |
|
|
|
st.subheader("Medications & Allergies") |
|
current_meds_str = st.text_area("Current Medications (name, dose, freq)", "Lisinopril 10mg daily\nMetformin 1000mg BID\nAtorvastatin 40mg daily\nAspirin 81mg daily", key="meds_input") |
|
allergies_str = st.text_area("Allergies (comma separated, specify reaction if known)", "Penicillin (rash), Sulfa (hives)", key="allergy_input") |
|
|
|
st.subheader("Social/Family History") |
|
social_history = st.text_area("Social History (SH)", "Smoker (1 ppd x 30 years), occasional alcohol.", key="sh_input") |
|
family_history = st.text_area("Family History (FHx)", "Father had MI at age 60. Mother has HTN.", key="fhx_input") |
|
|
|
st.subheader("Vitals & Exam Findings") |
|
col1, col2 = st.columns(2) |
|
with col1: |
|
temp_c = st.number_input("Temp (Β°C)", 35.0, 42.0, 36.8, format="%.1f", key="temp_input") |
|
hr_bpm = st.number_input("HR (bpm)", 30, 250, 95, key="hr_input") |
|
rr_rpm = st.number_input("RR (rpm)", 5, 50, 18, key="rr_input") |
|
with col2: |
|
bp_mmhg = st.text_input("BP (SYS/DIA)", "155/90", key="bp_input") |
|
spo2_percent = st.number_input("SpO2 (%)", 70, 100, 96, key="spo2_input") |
|
pain_scale = st.slider("Pain (0-10)", 0, 10, 8, key="pain_input") |
|
exam_notes = st.text_area("Brief Physical Exam Notes", "Awake, alert, oriented x3. Mild distress. Lungs clear bilaterally. Cardiac exam: Regular rhythm, S1/S2 normal, no murmurs/gallops/rubs. Abdomen soft, non-tender. No lower extremity edema.", key="exam_input", height=100) |
|
|
|
|
|
if st.button("Start/Update Consultation", key="start_button"): |
|
current_meds_list = [med.strip() for med in current_meds_str.split('\n') if med.strip()] |
|
|
|
current_med_names = [] |
|
for med in current_meds_list: |
|
match = re.match(r"^\s*([a-zA-Z\-]+)", med) |
|
if match: |
|
current_med_names.append(match.group(1).lower()) |
|
|
|
|
|
allergies_list = [] |
|
for a in allergies_str.split(','): |
|
cleaned_allergy = a.strip() |
|
if cleaned_allergy: |
|
match = re.match(r"^\s*([a-zA-Z\-\s]+)(?:\s*\(.*\))?", cleaned_allergy) |
|
if match: |
|
allergies_list.append(match.group(1).strip().lower()) |
|
else: |
|
allergies_list.append(cleaned_allergy.lower()) |
|
|
|
st.session_state.patient_data = { |
|
"demographics": {"age": age, "sex": sex}, |
|
"hpi": {"chief_complaint": chief_complaint, "details": hpi_details, "symptoms": symptoms}, |
|
"pmh": {"conditions": pmh}, "psh": {"procedures": psh}, |
|
"medications": {"current": current_meds_list, "names_only": current_med_names}, |
|
"allergies": allergies_list, |
|
"social_history": {"details": social_history}, "family_history": {"details": family_history}, |
|
"vitals": { "temp_c": temp_c, "hr_bpm": hr_bpm, "bp_mmhg": bp_mmhg, "rr_rpm": rr_rpm, "spo2_percent": spo2_percent, "pain_scale": pain_scale}, |
|
"exam_findings": {"notes": exam_notes} |
|
} |
|
|
|
|
|
red_flags = check_red_flags(st.session_state.patient_data) |
|
st.sidebar.markdown("---") |
|
if red_flags: |
|
st.sidebar.warning("**Initial Red Flags Detected:**") |
|
for flag in red_flags: st.sidebar.warning(f"- {flag.replace('Red Flag: ','')}") |
|
else: |
|
st.sidebar.success("No immediate red flags detected in initial data.") |
|
|
|
|
|
initial_prompt = f"Initiate consultation for the patient described in the intake form. Start the analysis." |
|
|
|
st.session_state.messages = [HumanMessage(content=initial_prompt)] |
|
st.success("Patient data loaded. Ready for analysis.") |
|
|
|
|
|
|
|
st.header("π¬ Clinical Consultation") |
|
|
|
|
|
for msg_index, msg in enumerate(st.session_state.messages): |
|
unique_key = f"msg_{msg_index}" |
|
if isinstance(msg, HumanMessage): |
|
with st.chat_message("user", key=f"{unique_key}_user"): |
|
st.markdown(msg.content) |
|
elif isinstance(msg, AIMessage): |
|
with st.chat_message("assistant", key=f"{unique_key}_ai"): |
|
|
|
ai_content = msg.content |
|
structured_output = None |
|
|
|
|
|
try: |
|
|
|
json_match = re.search(r"```json\s*(\{.*?\})\s*```", ai_content, re.DOTALL | re.IGNORECASE) |
|
if json_match: |
|
json_str = json_match.group(1) |
|
|
|
prefix = ai_content[:json_match.start()].strip() |
|
suffix = ai_content[json_match.end():].strip() |
|
if prefix: st.markdown(prefix) |
|
structured_output = json.loads(json_str) |
|
if suffix: st.markdown(suffix) |
|
|
|
elif ai_content.strip().startswith("{") and ai_content.strip().endswith("}"): |
|
structured_output = json.loads(ai_content) |
|
ai_content = "" |
|
else: |
|
|
|
st.markdown(ai_content) |
|
|
|
except json.JSONDecodeError: |
|
|
|
st.markdown(ai_content) |
|
st.warning("Note: Could not parse structured JSON in AI response.", icon="β οΈ") |
|
except Exception as e: |
|
st.markdown(ai_content) |
|
st.error(f"Error processing AI message display: {e}", icon="β") |
|
|
|
|
|
if structured_output and isinstance(structured_output, dict): |
|
st.divider() |
|
st.subheader("π AI Analysis & Recommendations") |
|
cols = st.columns(2) |
|
with cols[0]: |
|
st.markdown(f"**Assessment:**") |
|
st.markdown(f"> {structured_output.get('assessment', 'N/A')}") |
|
|
|
st.markdown(f"**Differential Diagnosis:**") |
|
ddx = structured_output.get('differential_diagnosis', []) |
|
if ddx: |
|
for item in ddx: |
|
likelihood = item.get('likelihood', 'Unknown').capitalize() |
|
icon = "π₯" if likelihood=="High" else ("π₯" if likelihood=="Medium" else "π₯") |
|
with st.expander(f"{icon} {item.get('diagnosis', 'Unknown')} ({likelihood})"): |
|
st.write(f"**Rationale:** {item.get('rationale', 'N/A')}") |
|
else: st.info("No differential diagnosis provided.") |
|
|
|
st.markdown(f"**Risk Assessment:**") |
|
risk = structured_output.get('risk_assessment', {}) |
|
flags = risk.get('identified_red_flags', []) |
|
if flags: st.warning(f"**Flags:** {', '.join(flags)}") |
|
if risk.get("immediate_concerns"): st.warning(f"**Concerns:** {', '.join(risk.get('immediate_concerns'))}") |
|
if risk.get("potential_complications"): st.info(f"**Potential Complications:** {', '.join(risk.get('potential_complications'))}") |
|
if not flags and not risk.get("immediate_concerns"): st.success("No major risks highlighted in this assessment.") |
|
|
|
with cols[1]: |
|
st.markdown(f"**Recommended Plan:**") |
|
plan = structured_output.get('recommended_plan', {}) |
|
sub_sections = ["investigations", "therapeutics", "consultations", "patient_education"] |
|
for section in sub_sections: |
|
st.markdown(f"_{section.replace('_',' ').capitalize()}:_") |
|
items = plan.get(section) |
|
if items and isinstance(items, list): |
|
for item in items: st.markdown(f"- {item}") |
|
elif items: |
|
st.markdown(f"- {items}") |
|
else: st.markdown("_None suggested._") |
|
st.markdown("") |
|
|
|
|
|
st.markdown(f"**Rationale & Guideline Check:**") |
|
st.markdown(f"> {structured_output.get('rationale_summary', 'N/A')}") |
|
interaction_summary = structured_output.get("interaction_check_summary", "") |
|
if interaction_summary: |
|
st.markdown(f"**Interaction Check Summary:**") |
|
st.markdown(f"> {interaction_summary}") |
|
|
|
st.divider() |
|
|
|
|
|
if getattr(msg, 'tool_calls', None): |
|
with st.expander("π οΈ AI requested actions", expanded=False): |
|
for tc in msg.tool_calls: |
|
try: |
|
|
|
st.code(f"Action: {tc.get('name', 'Unknown Tool')}\nArgs: {json.dumps(tc.get('args', {}), indent=2)}", language="json") |
|
except Exception as display_e: |
|
st.error(f"Could not display tool call: {display_e}") |
|
st.code(str(tc)) |
|
|
|
elif isinstance(msg, ToolMessage): |
|
|
|
tool_name_display = getattr(msg, 'name', 'tool_execution') |
|
with st.chat_message(tool_name_display, avatar="π οΈ", key=f"{unique_key}_tool"): |
|
try: |
|
|
|
tool_data = json.loads(msg.content) |
|
status = tool_data.get("status", "info") |
|
message = tool_data.get("message", msg.content) |
|
details = tool_data.get("details") |
|
warnings = tool_data.get("warnings") |
|
|
|
if status == "success" or status == "clear" or status == "flagged": |
|
st.success(f"{message}", icon="β
" if status != "flagged" else "π¨") |
|
elif status == "warning": |
|
st.warning(f"{message}", icon="β οΈ") |
|
if warnings and isinstance(warnings, list): |
|
st.caption("Details:") |
|
for warn in warnings: st.caption(f"- {warn}") |
|
else: |
|
st.error(f"{message}", icon="β") |
|
|
|
if details: st.caption(f"Details: {details}") |
|
|
|
except json.JSONDecodeError: |
|
|
|
st.info(f"{msg.content}") |
|
except Exception as e: |
|
st.error(f"Error displaying tool message: {e}", icon="β") |
|
st.caption(f"Raw content: {msg.content}") |
|
|
|
|
|
if prompt := st.chat_input("Your message or follow-up query..."): |
|
if not st.session_state.patient_data: |
|
st.warning("Please load patient data using the sidebar first.") |
|
st.stop() |
|
|
|
|
|
user_message = HumanMessage(content=prompt) |
|
st.session_state.messages.append(user_message) |
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
|
|
current_state = AgentState( |
|
messages=st.session_state.messages, |
|
patient_data=st.session_state.patient_data |
|
) |
|
|
|
|
|
with st.spinner("SynapseAI is thinking..."): |
|
try: |
|
|
|
final_state = st.session_state.graph_app.invoke( |
|
current_state, |
|
{"recursion_limit": 15} |
|
) |
|
|
|
st.session_state.messages = final_state['messages'] |
|
|
|
except Exception as e: |
|
print(f"CRITICAL ERROR during graph invocation: {type(e).__name__} - {e}") |
|
traceback.print_exc() |
|
st.error(f"An error occurred during the conversation turn: {e}", icon="β") |
|
|
|
error_ai_msg = AIMessage(content=f"Sorry, a critical error occurred: {type(e).__name__}. Please check logs or try again.") |
|
|
|
|
|
|
|
|
|
|
|
st.rerun() |
|
|
|
|
|
st.markdown("---") |
|
st.warning( |
|
"""**Disclaimer:** SynapseAI is an AI assistant for clinical decision support and does not replace professional medical judgment. |
|
All outputs must be critically reviewed and verified by a qualified healthcare provider before making any clinical decisions. |
|
Validate all information, especially diagnoses, dosages, and interactions, independently using standard medical resources.""" |
|
) |
|
|
|
if __name__ == "__main__": |
|
main() |