|
|
|
import os |
|
from langchain_openai import ChatOpenAI |
|
from langchain.agents import AgentExecutor, create_openai_functions_agent |
|
|
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
from langchain_core.messages import AIMessage, HumanMessage |
|
|
|
|
|
from tools import ( |
|
BioPortalLookupTool, |
|
UMLSLookupTool, |
|
QuantumTreatmentOptimizerTool, |
|
|
|
|
|
) |
|
|
|
from config.settings import settings |
|
from services.logger import app_logger |
|
|
|
|
|
llm = None |
|
try: |
|
if not settings.OPENAI_API_KEY: |
|
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.") |
|
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.") |
|
|
|
llm = ChatOpenAI( |
|
model_name="gpt-4-turbo-preview", |
|
|
|
temperature=0.1, |
|
openai_api_key=settings.OPENAI_API_KEY |
|
) |
|
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.") |
|
|
|
except Exception as e: |
|
detailed_error_message = str(e) |
|
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name." |
|
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower(): |
|
user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid." |
|
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False) |
|
else: |
|
app_logger.error(user_facing_error, exc_info=True) |
|
raise ValueError(user_facing_error) |
|
|
|
|
|
|
|
tools_list = [ |
|
UMLSLookupTool(), |
|
BioPortalLookupTool(), |
|
QuantumTreatmentOptimizerTool(), |
|
] |
|
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
OPENAI_SYSTEM_PROMPT_TEXT_SIMPLIFIED = ( |
|
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. " |
|
"Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. " |
|
"You have access to a set of specialized tools. Use them when a user's query can be best answered by one of them, based on their descriptions.\n" |
|
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n" |
|
"Patient Context for this session (if provided by the user earlier): {patient_context}\n" |
|
"When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}.\n" |
|
"For `bioportal_lookup`, if the user doesn't specify an ontology, you may ask or default to 'SNOMEDCT_US'.\n" |
|
"Always be clear and concise. Cite tools if their output forms a key part of your answer." |
|
) |
|
|
|
|
|
|
|
prompt = ChatPromptTemplate.from_messages([ |
|
("system", OPENAI_SYSTEM_PROMPT_TEXT_SIMPLIFIED), |
|
MessagesPlaceholder(variable_name="chat_history"), |
|
("human", "{input}"), |
|
MessagesPlaceholder(variable_name="agent_scratchpad") |
|
]) |
|
app_logger.info("Agent prompt template (simplified for OpenAI Functions) created.") |
|
|
|
|
|
if llm is None: |
|
app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.") |
|
raise SystemExit("Agent LLM failed to initialize. Application cannot start.") |
|
|
|
try: |
|
|
|
|
|
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt) |
|
app_logger.info("OpenAI Functions agent created successfully.") |
|
except Exception as e: |
|
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True) |
|
|
|
|
|
raise ValueError(f"OpenAI agent creation failed: {e}") |
|
|
|
|
|
|
|
agent_executor = AgentExecutor( |
|
agent=agent, |
|
tools=tools_list, |
|
verbose=True, |
|
handle_parsing_errors=True, |
|
max_iterations=7, |
|
|
|
) |
|
app_logger.info("AgentExecutor with OpenAI agent created successfully.") |
|
|
|
|
|
|
|
_agent_executor_instance = agent_executor |
|
|
|
def get_agent_executor(): |
|
""" |
|
Returns the configured agent executor for OpenAI. |
|
The executor is initialized when this module is first imported. |
|
""" |
|
global _agent_executor_instance |
|
if _agent_executor_instance is None: |
|
|
|
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI). Initialization likely failed.") |
|
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs for errors (e.g., API key issues, prompt errors).") |
|
|
|
|
|
if not settings.OPENAI_API_KEY: |
|
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.") |
|
raise ValueError("OpenAI API Key not configured.") |
|
|
|
return _agent_executor_instance |
|
|
|
|
|
if __name__ == "__main__": |
|
if not settings.OPENAI_API_KEY: |
|
print("π¨ Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.") |
|
else: |
|
print("\nπ Quantum Health Navigator (OpenAI Agent Test Console) π") |
|
print("-----------------------------------------------------------") |
|
print("Type 'exit' or 'quit' to stop.") |
|
print("Example topics: medical definitions, treatment optimization (will use simulated patient context).") |
|
print("-" * 59) |
|
|
|
try: |
|
test_executor = get_agent_executor() |
|
except ValueError as e_init: |
|
print(f"β οΈ Agent initialization failed during test startup: {e_init}") |
|
print("Ensure your API key is correctly configured and prompt variables are set.") |
|
exit() |
|
|
|
current_chat_history_for_test_run = [] |
|
|
|
|
|
test_patient_context_summary_str = ( |
|
"Age: 70; Gender: Male; Chief Complaint: Shortness of breath on exertion; " |
|
"Key Medical History: COPD, Atrial Fibrillation; " |
|
"Current Medications: Tiotropium inhaler, Apixaban 5mg BID; Allergies: Penicillin." |
|
) |
|
print(f"βΉοΈ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n") |
|
|
|
while True: |
|
user_input_str = input("π€ You: ").strip() |
|
if user_input_str.lower() in ["exit", "quit"]: |
|
print("π Exiting test console.") |
|
break |
|
if not user_input_str: |
|
continue |
|
|
|
try: |
|
app_logger.info(f"__main__ test (OpenAI): Invoking with input: '{user_input_str}'") |
|
|
|
|
|
response_dict = test_executor.invoke({ |
|
"input": user_input_str, |
|
"chat_history": current_chat_history_for_test_run, |
|
"patient_context": test_patient_context_summary_str |
|
}) |
|
|
|
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.") |
|
print(f"π€ Agent: {ai_output_str}") |
|
|
|
|
|
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str)) |
|
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str)) |
|
|
|
|
|
if len(current_chat_history_for_test_run) > 10: |
|
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:] |
|
|
|
except Exception as e_invoke: |
|
print(f"β οΈ Error during agent invocation: {type(e_invoke).__name__} - {e_invoke}") |
|
app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e_invoke}", exc_info=True) |