MedQA / agent.py
mgbam's picture
Update agent.py
93406ed verified
raw
history blame
8.8 kB
# /home/user/app/agent.py
import os
from langchain_openai import ChatOpenAI # For OpenAI models
from langchain.agents import AgentExecutor, create_openai_functions_agent # Agent optimized for OpenAI function calling
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage # SystemMessage not always explicitly needed in prompt list for this agent
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
from tools import (
BioPortalLookupTool,
UMLSLookupTool,
QuantumTreatmentOptimizerTool,
# QuantumOptimizerInput, # Import if needed for type hints directly in this file
# GeminiTool, # Not needed if primary LLM is OpenAI
)
from config.settings import settings # This loads your HF secrets into the settings object
from services.logger import app_logger
# --- Initialize LLM (OpenAI) ---
llm = None
try:
if not settings.OPENAI_API_KEY:
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
# Choose your preferred OpenAI model
# "gpt-3.5-turbo-0125" is a good balance of cost and capability for function calling.
# "gpt-4-turbo-preview" or "gpt-4" is more capable but more expensive.
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-0125", # Or "gpt-4-turbo-preview"
temperature=0.2, # Lower for more predictable tool use
openai_api_key=settings.OPENAI_API_KEY
)
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
except Exception as e:
detailed_error_message = str(e)
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name."
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid."
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
else:
app_logger.error(user_facing_error, exc_info=True)
raise ValueError(user_facing_error)
# --- Initialize Tools List ---
tools_list = [
UMLSLookupTool(),
BioPortalLookupTool(),
QuantumTreatmentOptimizerTool(),
]
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
# --- Agent Prompt (for OpenAI Functions Agent) ---
# This prompt is simpler because much of the tool-calling logic is handled by
# OpenAI's function-calling mechanism, which create_openai_functions_agent leverages.
# The agent will be able to see the tool descriptions.
# We still provide system instructions and placeholders for history and input.
# The {agent_scratchpad} is crucial for OpenAI Functions agent to work correctly.
# The {patient_context} variable needs to be passed in the invoke call.
OPENAI_SYSTEM_PROMPT_TEXT = (
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
"Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
"You have access to a set of tools to help you. Use them when appropriate.\n"
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
"Patient Context for this session (if provided by the user earlier): {patient_context}\n"
"When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}.\n"
"Always be clear and concise. Cite tools if their output forms a key part of your answer."
)
# `create_openai_functions_agent` typically works well with a system message,
# chat history placeholder, human input placeholder, and agent_scratchpad placeholder.
prompt = ChatPromptTemplate.from_messages([
("system", OPENAI_SYSTEM_PROMPT_TEXT),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad") # Essential for OpenAI Functions agent
])
app_logger.info("Agent prompt template created for OpenAI Functions agent.")
# --- Create Agent ---
if llm is None:
app_logger.critical("LLM object is None at agent creation stage (OpenAI). Application cannot proceed.")
raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
try:
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
app_logger.info("OpenAI Functions agent created successfully.")
except Exception as e:
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
raise ValueError(f"OpenAI agent creation failed: {e}")
# --- Create Agent Executor ---
agent_executor = AgentExecutor(
agent=agent,
tools=tools_list,
verbose=True,
handle_parsing_errors=True, # Handles cases where LLM output for function call is malformed
max_iterations=10,
# return_intermediate_steps=True, # Useful for debugging
)
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
# --- Getter Function for Streamlit App ---
_agent_executor_instance = agent_executor
def get_agent_executor():
"""
Returns the configured agent executor for OpenAI.
The executor is initialized when this module is first imported.
"""
global _agent_executor_instance
if _agent_executor_instance is None:
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check startup logs.")
# You can add a check for settings.OPENAI_API_KEY here too if desired,
# but the LLM init should have caught it.
return _agent_executor_instance
# --- Example Usage (for local testing) ---
if __name__ == "__main__":
if not settings.OPENAI_API_KEY:
print("🚨 Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
else:
print("\nπŸš€ Quantum Health Navigator (OpenAI Agent Test Console) πŸš€")
print("-----------------------------------------------------------")
# ... (rest of the __main__ block from the previous agent.py, it should work.
# The invoke payload will still need "input", "chat_history", and "patient_context") ...
try:
test_executor = get_agent_executor()
except ValueError as e_init:
print(f"⚠️ Agent initialization failed: {e_init}")
exit()
current_chat_history_for_test_run = []
test_patient_context_summary_str = (
"Age: 70; Gender: Male; Chief Complaint: Shortness of breath on exertion; "
"Key Medical History: COPD, Atrial Fibrillation; "
"Current Medications: Tiotropium inhaler, Apixaban 5mg BID; Allergies: Penicillin."
)
print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
while True:
user_input_str = input("πŸ‘€ You: ").strip()
if user_input_str.lower() in ["exit", "quit"]:
print("πŸ‘‹ Exiting.")
break
if not user_input_str:
continue
try:
app_logger.info(f"__main__ test (OpenAI): Invoking with: '{user_input_str}'")
# Keys for invoke: "input", "chat_history", and any other variables in your prompt (like "patient_context")
response_dict = test_executor.invoke({
"input": user_input_str,
"chat_history": current_chat_history_for_test_run, # List of BaseMessage
"patient_context": test_patient_context_summary_str
})
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
print(f"πŸ€– Agent: {ai_output_str}")
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
if len(current_chat_history_for_test_run) > 10:
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
except Exception as e:
print(f"⚠️ Error during agent invocation: {e}")
app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e}", exc_info=True)