File size: 10,926 Bytes
a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 f194991 be49f6d f194991 a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 be49f6d f194991 a6d04e1 be49f6d f194991 be49f6d f194991 a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 f194991 be49f6d f194991 be49f6d f194991 a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 f194991 be49f6d f194991 be49f6d f194991 a6d04e1 f194991 a6d04e1 be49f6d f194991 be49f6d f194991 be49f6d f194991 be49f6d f194991 be49f6d a6d04e1 be49f6d a6d04e1 f194991 be49f6d f194991 a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 be49f6d a6d04e1 be49f6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
# /home/user/app/agent.py
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.agents import AgentExecutor, create_structured_chat_agent
# from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
# --- Import your defined tools ---
from tools.bioportal_tool import BioPortalLookupTool, BioPortalInput
from tools.gemini_tool import GeminiTool, GeminiInput # For using Gemini as a specific sub-task tool
from tools.umls_tool import UMLSLookupTool, UMLSInput
from tools.quantum_treatment_optimizer_tool import QuantumTreatmentOptimizerTool, QuantumOptimizerInput # Assuming this path and model name
from config.settings import settings
from services.logger import app_logger
# --- Initialize LLM (Gemini) ---
try:
# Ensure GOOGLE_API_KEY is set in your environment (HuggingFace Secrets)
# or settings.GEMINI_API_KEY correctly maps to it.
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
raise ValueError("GOOGLE_API_KEY (for Gemini) not found in settings or environment.")
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-pro-latest", # Using a more capable Gemini model if available
# model="gemini-pro", # Fallback if 1.5-pro is not yet available or preferred
temperature=0.3,
# google_api_key=settings.GEMINI_API_KEY, # Explicitly pass if GOOGLE_API_KEY env var isn't set
convert_system_message_to_human=True, # Can be helpful for some models
# safety_settings={ # Example safety settings
# HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
# HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
# }
)
app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully.")
except Exception as e:
app_logger.error(f"Failed to initialize ChatGoogleGenerativeAI: {e}", exc_info=True)
raise ValueError(f"Gemini LLM initialization failed: {e}. Check API key and configurations in HF Secrets.")
# --- Initialize Tools ---
# Ensure each tool's description is clear and guides the LLM on when and how to use it.
# Also, ensure their args_schema is correctly defined.
tools = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
# GeminiTool(), # Consider if this is needed. The main LLM is already Gemini.
# Useful if this tool performs a very specific, different task with Gemini,
# or uses a different Gemini model (e.g., for vision if main is text).
# If it's just for general queries, the main agent LLM can handle it.
]
app_logger.info(f"Tools initialized: {[tool.name for tool in tools]}")
# --- Agent Prompt (Adapted for Structured Chat with Gemini and your tools) ---
# This prompt guides the LLM to:
# 1. Understand its role and capabilities.
# 2. Know which tools are available and their purpose (from {tools}).
# 3. Format tool invocations as a JSON blob with "action" and "action_input".
# - "action_input" should be a string for simple tools (UMLSInput, GeminiInput).
# - "action_input" should be a dictionary for tools with multiple args (BioPortalInput, QuantumOptimizerInput).
# 4. Use the provided {patient_context}.
# 5. Refer to {chat_history}.
# 6. Process the new {input}.
# 7. Use {agent_scratchpad} for its internal monologue/tool results.
SYSTEM_PROMPT_TEMPLATE = (
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
"Your primary goal is to provide accurate information and insights based on user queries and available tools. "
"You must adhere to the following guidelines:\n"
"1. Disclaimers: Always remind the user that you are an AI, not a human medical professional, and your information "
"is for support, not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases "
"unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
"2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
"You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
"3. Tool Usage: You have access to the following tools:\n{tools}\n"
" To use a tool, respond with a JSON markdown code block like this:\n"
" ```json\n"
" {{\n"
' "action": "tool_name",\n'
' "action_input": "query string for the tool" OR {{"arg1": "value1", "arg2": "value2", ...}} \n'
" }}\n"
" ```\n"
" - For `umls_lookup` and `google_gemini_chat`, `action_input` is a single string (the 'term' or 'query').\n"
" - For `bioportal_lookup`, `action_input` is a dictionary like `{{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_CODE\"}}`. If ontology is not specified by user, you can default to SNOMEDCT or ask.\n"
" - For `quantum_treatment_optimizer`, `action_input` is a dictionary like `{{\"patient_data\": {{...patient details...}}, \"current_treatments\": [\"med1\"], \"conditions\": [\"cond1\"]}}`. You MUST populate 'patient_data' using the overall {patient_context} if available and relevant.\n"
"4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive answer. Cite the tool if you used one (e.g., 'According to UMLS Lookup...').\n"
"5. Specific Tool Guidance:\n"
" - If asked about treatment optimization for a specific patient (especially if context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
" - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
" - If the query is very general, complex, or creative beyond simple lookups, you might consider using `google_gemini_chat` (if enabled) or answering directly if confident.\n"
"6. Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n"
"Begin!\n\n"
"Previous conversation history:\n"
"{chat_history}\n\n"
"New human question: {input}\n"
"{agent_scratchpad}"
)
prompt = ChatPromptTemplate.from_messages([
("system", SYSTEM_PROMPT_TEMPLATE),
# For structured chat agent, HumanMessage/AIMessage sequence is often handled by MessagesPlaceholder("agent_scratchpad")
# or by how the agent formats history into the main prompt.
# The key is that the {chat_history} and {input} placeholders are in the system prompt.
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
app_logger.info("Agent prompt template created.")
# --- Create Agent ---
try:
agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt)
app_logger.info("Structured chat agent created successfully with Gemini LLM.")
except Exception as e:
app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
raise ValueError(f"Gemini agent creation failed: {e}")
# --- Create Agent Executor ---
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True, # Set to True for debugging, False for production
handle_parsing_errors=True, # Crucial for LLM-generated JSON for tool calls
max_iterations=10, # Increased slightly for potentially complex tool interactions
# return_intermediate_steps=True, # Enable if you need to see thoughts/tool calls in the response object
early_stopping_method="generate", # Stop if LLM generates a stop token or a final answer
)
app_logger.info("AgentExecutor with Gemini agent created successfully.")
# --- Getter Function for Streamlit App ---
def get_agent_executor():
"""Returns the configured agent executor for Gemini."""
# Initialization happens above when the module is loaded.
# This function just returns the already created executor.
# A check for API key is good practice, though it would have failed earlier if not set.
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
# This log might be redundant if LLM init failed, but good as a sanity check here.
app_logger.error("CRITICAL: GOOGLE_API_KEY (for Gemini) is not available at get_agent_executor call.")
raise ValueError("Google API Key for Gemini not configured. Agent cannot function.")
return agent_executor
# --- Example Usage (for local testing of this agent.py file) ---
if __name__ == "__main__":
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
print("Please set your GOOGLE_API_KEY in .env file or as an environment variable.")
else:
print("\nQuantum Health Navigator (Gemini Agent Test Console)")
print("Type 'exit' or 'quit' to stop.")
print("Example queries:")
print(" - What is hypertension?")
print(" - Lookup 'myocardial infarction' in UMLS.")
print(" - Search for 'diabetes mellitus type 2' in BioPortal using SNOMEDCT ontology.")
print(" - Optimize treatment for a patient (context will be simulated).")
print("-" * 30)
executor = get_agent_executor()
current_chat_history_for_test = [] # List of HumanMessage, AIMessage
# Simulated patient context for testing the {patient_context} variable
test_patient_context_summary = (
"Age: 45; Gender: Male; Chief Complaint: Intermittent chest pain; "
"Key Medical History: Hyperlipidemia; Current Medications: Atorvastatin 20mg."
)
while True:
user_input_str = input("\n👤 You: ")
if user_input_str.lower() in ["exit", "quit"]:
print("Exiting test console.")
break
try:
app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
response = executor.invoke({
"input": user_input_str,
"chat_history": current_chat_history_for_test,
"patient_context": test_patient_context_summary # Passing the context
})
ai_output_str = response.get('output', "Agent did not produce an output.")
print(f"🤖 Agent: {ai_output_str}")
current_chat_history_for_test.append(HumanMessage(content=user_input_str))
current_chat_history_for_test.append(AIMessage(content=ai_output_str))
except Exception as e:
print(f"⚠️ Error during agent invocation: {e}")
app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True) |