Update agent.py
Browse files
agent.py
CHANGED
@@ -4,31 +4,28 @@ from langchain_openai import ChatOpenAI
|
|
4 |
from langchain.agents import AgentExecutor, create_openai_functions_agent
|
5 |
|
6 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
-
from langchain_core.messages import AIMessage, HumanMessage # SystemMessage can be implicitly created
|
8 |
|
9 |
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
|
10 |
from tools import (
|
11 |
BioPortalLookupTool,
|
12 |
UMLSLookupTool,
|
13 |
QuantumTreatmentOptimizerTool,
|
14 |
-
# QuantumOptimizerInput, # Schemas are primarily used by tools themselves
|
15 |
-
# GeminiTool, # Not using in this OpenAI-centric agent
|
16 |
)
|
17 |
|
18 |
from config.settings import settings
|
19 |
from services.logger import app_logger
|
20 |
|
21 |
# --- Initialize LLM (OpenAI) ---
|
22 |
-
llm = None
|
23 |
try:
|
24 |
if not settings.OPENAI_API_KEY:
|
25 |
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
|
26 |
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
|
27 |
|
28 |
llm = ChatOpenAI(
|
29 |
-
model_name="gpt-4-turbo-preview", # More capable
|
30 |
-
|
31 |
-
temperature=0.1, # Lower for more deterministic tool use and function calls
|
32 |
openai_api_key=settings.OPENAI_API_KEY
|
33 |
)
|
34 |
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
|
@@ -41,7 +38,7 @@ except Exception as e:
|
|
41 |
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
|
42 |
else:
|
43 |
app_logger.error(user_facing_error, exc_info=True)
|
44 |
-
raise ValueError(user_facing_error)
|
45 |
|
46 |
|
47 |
# --- Initialize Tools List ---
|
@@ -53,46 +50,47 @@ tools_list = [
|
|
53 |
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
54 |
|
55 |
|
56 |
-
# --- Agent Prompt (for OpenAI Functions Agent -
|
57 |
-
|
58 |
-
# via the function-calling mechanism. Explicitly listing {tools} and {tool_names} in the
|
59 |
-
# system prompt string might be redundant or conflict with how this agent type works.
|
60 |
-
# We will still provide overall instructions and patient_context placeholder.
|
61 |
-
OPENAI_SYSTEM_PROMPT_TEXT_SIMPLIFIED = (
|
62 |
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
|
63 |
-
"Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
|
64 |
-
"You have access to a set of specialized tools. Use them when a user's query can be best answered by one of them
|
65 |
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
|
66 |
"Patient Context for this session (if provided by the user earlier): {patient_context}\n" # This variable is passed from invoke
|
67 |
-
|
68 |
-
"
|
69 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
)
|
71 |
|
72 |
-
# The ChatPromptTemplate defines the sequence of messages sent to the LLM.
|
73 |
-
# `create_openai_functions_agent` expects specific placeholders.
|
74 |
prompt = ChatPromptTemplate.from_messages([
|
75 |
-
("system",
|
76 |
-
MessagesPlaceholder(variable_name="chat_history"),
|
77 |
-
("human", "{input}"),
|
78 |
-
MessagesPlaceholder(variable_name="agent_scratchpad")
|
79 |
])
|
80 |
-
app_logger.info("Agent prompt template (
|
81 |
|
82 |
# --- Create Agent ---
|
83 |
-
if llm is None:
|
84 |
app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.")
|
85 |
-
raise SystemExit("Agent LLM failed to initialize.
|
86 |
|
87 |
try:
|
88 |
-
# `create_openai_functions_agent` will use the tools' Pydantic schemas to define
|
89 |
-
# the "functions" that the OpenAI model can call.
|
90 |
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
|
91 |
app_logger.info("OpenAI Functions agent created successfully.")
|
92 |
except Exception as e:
|
93 |
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
|
94 |
-
# This is where the "Input to ChatPromptTemplate is missing variables" error would occur
|
95 |
-
# if the prompt object was still expecting variables not provided by the agent constructor or invoke.
|
96 |
raise ValueError(f"OpenAI agent creation failed: {e}")
|
97 |
|
98 |
|
@@ -100,36 +98,28 @@ except Exception as e:
|
|
100 |
agent_executor = AgentExecutor(
|
101 |
agent=agent,
|
102 |
tools=tools_list,
|
103 |
-
verbose=True,
|
104 |
-
handle_parsing_errors=True, #
|
105 |
-
max_iterations=7,
|
106 |
-
# return_intermediate_steps=True, #
|
107 |
)
|
108 |
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
|
109 |
|
110 |
|
111 |
# --- Getter Function for Streamlit App ---
|
112 |
-
_agent_executor_instance = agent_executor
|
113 |
|
114 |
def get_agent_executor():
|
115 |
-
"""
|
116 |
-
Returns the configured agent executor for OpenAI.
|
117 |
-
The executor is initialized when this module is first imported.
|
118 |
-
"""
|
119 |
global _agent_executor_instance
|
120 |
if _agent_executor_instance is None:
|
121 |
-
# This indicates a failure during the initial module load (LLM or agent creation).
|
122 |
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI). Initialization likely failed.")
|
123 |
-
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs for errors
|
124 |
-
|
125 |
-
# Final check for API key, though LLM initialization should be the primary guard.
|
126 |
-
if not settings.OPENAI_API_KEY:
|
127 |
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
|
128 |
raise ValueError("OpenAI API Key not configured.")
|
129 |
-
|
130 |
return _agent_executor_instance
|
131 |
|
132 |
-
# --- Example Usage (for local testing
|
133 |
if __name__ == "__main__":
|
134 |
if not settings.OPENAI_API_KEY:
|
135 |
print("🚨 Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
|
@@ -141,19 +131,17 @@ if __name__ == "__main__":
|
|
141 |
print("-" * 59)
|
142 |
|
143 |
try:
|
144 |
-
test_executor = get_agent_executor()
|
145 |
-
except ValueError as e_init:
|
146 |
print(f"⚠️ Agent initialization failed during test startup: {e_init}")
|
147 |
print("Ensure your API key is correctly configured and prompt variables are set.")
|
148 |
-
exit()
|
149 |
-
|
150 |
-
current_chat_history_for_test_run = [] # List of HumanMessage, AIMessage
|
151 |
|
152 |
-
|
153 |
test_patient_context_summary_str = (
|
154 |
-
"Age:
|
155 |
-
"Key Medical History:
|
156 |
-
"Current Medications:
|
157 |
)
|
158 |
print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
|
159 |
|
@@ -162,13 +150,11 @@ if __name__ == "__main__":
|
|
162 |
if user_input_str.lower() in ["exit", "quit"]:
|
163 |
print("👋 Exiting test console.")
|
164 |
break
|
165 |
-
if not user_input_str:
|
166 |
continue
|
167 |
|
168 |
try:
|
169 |
app_logger.info(f"__main__ test (OpenAI): Invoking with input: '{user_input_str}'")
|
170 |
-
# These are the keys expected by the ChatPromptTemplate and agent:
|
171 |
-
# "input", "chat_history", and "patient_context" (because it's in our system prompt)
|
172 |
response_dict = test_executor.invoke({
|
173 |
"input": user_input_str,
|
174 |
"chat_history": current_chat_history_for_test_run,
|
@@ -178,12 +164,10 @@ if __name__ == "__main__":
|
|
178 |
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
|
179 |
print(f"🤖 Agent: {ai_output_str}")
|
180 |
|
181 |
-
# Update history for the next turn
|
182 |
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
|
183 |
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
|
184 |
|
185 |
-
|
186 |
-
if len(current_chat_history_for_test_run) > 10: # Keep last 5 pairs
|
187 |
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
|
188 |
|
189 |
except Exception as e_invoke:
|
|
|
4 |
from langchain.agents import AgentExecutor, create_openai_functions_agent
|
5 |
|
6 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
+
from langchain_core.messages import AIMessage, HumanMessage # SystemMessage can be implicitly created
|
8 |
|
9 |
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
|
10 |
from tools import (
|
11 |
BioPortalLookupTool,
|
12 |
UMLSLookupTool,
|
13 |
QuantumTreatmentOptimizerTool,
|
|
|
|
|
14 |
)
|
15 |
|
16 |
from config.settings import settings
|
17 |
from services.logger import app_logger
|
18 |
|
19 |
# --- Initialize LLM (OpenAI) ---
|
20 |
+
llm = None
|
21 |
try:
|
22 |
if not settings.OPENAI_API_KEY:
|
23 |
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
|
24 |
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
|
25 |
|
26 |
llm = ChatOpenAI(
|
27 |
+
model_name="gpt-4-turbo-preview", # More capable for complex instruction following
|
28 |
+
temperature=0.1, # Low temperature for more deterministic and structured output for tool calls
|
|
|
29 |
openai_api_key=settings.OPENAI_API_KEY
|
30 |
)
|
31 |
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
|
|
|
38 |
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
|
39 |
else:
|
40 |
app_logger.error(user_facing_error, exc_info=True)
|
41 |
+
raise ValueError(user_facing_error)
|
42 |
|
43 |
|
44 |
# --- Initialize Tools List ---
|
|
|
50 |
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
51 |
|
52 |
|
53 |
+
# --- Agent Prompt (for OpenAI Functions Agent - Enhanced System Prompt) ---
|
54 |
+
OPENAI_SYSTEM_PROMPT_TEXT_ENHANCED = (
|
|
|
|
|
|
|
|
|
55 |
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
|
56 |
+
"Your primary goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
|
57 |
+
"You have access to a set of specialized tools (their names are: {tool_names}). Their detailed descriptions are available to you: {tools}. Use them when a user's query can be best answered by one of them.\n"
|
58 |
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
|
59 |
"Patient Context for this session (if provided by the user earlier): {patient_context}\n" # This variable is passed from invoke
|
60 |
+
|
61 |
+
"Tool Usage Guidelines:\n"
|
62 |
+
"1. When using the 'quantum_treatment_optimizer' tool, its 'action_input' argument requires three main keys: 'patient_data', 'current_treatments', and 'conditions'.\n"
|
63 |
+
" - The 'patient_data' key MUST be a dictionary. Populate this dictionary by extracting relevant details from the {patient_context}. "
|
64 |
+
" For example, if {patient_context} is 'Age: 50; Gender: Male; Key Medical History: Hypertension; Chief Complaint: headache', "
|
65 |
+
" then 'patient_data' could be {{\"age\": 50, \"gender\": \"Male\", \"relevant_history\": [\"Hypertension\"], \"symptoms\": [\"headache\"]}}. " # Added symptoms to example
|
66 |
+
" Include details like age, gender, chief complaint, key medical history, and current medications from {patient_context} within this 'patient_data' dictionary. If a value is not present in context, omit the key or use null/None if appropriate for the tool, but prioritize providing what is available.\n"
|
67 |
+
" - 'current_treatments' should be a list of strings derived from the 'Current Medications' part of {patient_context}.\n"
|
68 |
+
" - 'conditions' should be a list of strings, including primary conditions from the 'Key Medical History' or 'Chief Complaint' parts of {patient_context}, and any conditions explicitly mentioned or implied by the current user query.\n"
|
69 |
+
"2. For `bioportal_lookup`, the 'action_input' should be a dictionary like {{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_ACRONYM\"}}. If the user doesn't specify an ontology, you may ask for clarification or default to 'SNOMEDCT_US'.\n"
|
70 |
+
"3. For `umls_lookup`, the 'action_input' is a single string: the medical term to search.\n"
|
71 |
+
"4. After using a tool, you will receive an observation. Use this observation and your general knowledge to formulate a comprehensive final answer to the human. Clearly cite the tool if its output forms a key part of your answer (e.g., 'According to UMLS Lookup...').\n"
|
72 |
+
"5. If a user's query seems to ask for treatment advice or medication suggestions for a specific scenario (especially if patient context is available), you MUST prioritize using the 'quantum_treatment_optimizer' tool.\n"
|
73 |
+
"6. For general medical knowledge questions not requiring patient-specific optimization or specific ontology/CUI lookups, you may answer directly from your training data, but always include the standard disclaimer."
|
74 |
)
|
75 |
|
|
|
|
|
76 |
prompt = ChatPromptTemplate.from_messages([
|
77 |
+
("system", OPENAI_SYSTEM_PROMPT_TEXT_ENHANCED),
|
78 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
79 |
+
("human", "{input}"),
|
80 |
+
MessagesPlaceholder(variable_name="agent_scratchpad")
|
81 |
])
|
82 |
+
app_logger.info("Agent prompt template (enhanced for OpenAI Functions) created.")
|
83 |
|
84 |
# --- Create Agent ---
|
85 |
+
if llm is None:
|
86 |
app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.")
|
87 |
+
raise SystemExit("Agent LLM failed to initialize.")
|
88 |
|
89 |
try:
|
|
|
|
|
90 |
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
|
91 |
app_logger.info("OpenAI Functions agent created successfully.")
|
92 |
except Exception as e:
|
93 |
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
|
|
|
|
|
94 |
raise ValueError(f"OpenAI agent creation failed: {e}")
|
95 |
|
96 |
|
|
|
98 |
agent_executor = AgentExecutor(
|
99 |
agent=agent,
|
100 |
tools=tools_list,
|
101 |
+
verbose=True,
|
102 |
+
handle_parsing_errors=True, # Important for robust function calling
|
103 |
+
max_iterations=7, # Good to have a limit
|
104 |
+
# return_intermediate_steps=True, # Enable this for deeper debugging of agent steps
|
105 |
)
|
106 |
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
|
107 |
|
108 |
|
109 |
# --- Getter Function for Streamlit App ---
|
110 |
+
_agent_executor_instance = agent_executor
|
111 |
|
112 |
def get_agent_executor():
|
|
|
|
|
|
|
|
|
113 |
global _agent_executor_instance
|
114 |
if _agent_executor_instance is None:
|
|
|
115 |
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI). Initialization likely failed.")
|
116 |
+
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs for errors.")
|
117 |
+
if not settings.OPENAI_API_KEY: # Final check
|
|
|
|
|
118 |
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
|
119 |
raise ValueError("OpenAI API Key not configured.")
|
|
|
120 |
return _agent_executor_instance
|
121 |
|
122 |
+
# --- Example Usage (for local testing) ---
|
123 |
if __name__ == "__main__":
|
124 |
if not settings.OPENAI_API_KEY:
|
125 |
print("🚨 Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
|
|
|
131 |
print("-" * 59)
|
132 |
|
133 |
try:
|
134 |
+
test_executor = get_agent_executor()
|
135 |
+
except ValueError as e_init:
|
136 |
print(f"⚠️ Agent initialization failed during test startup: {e_init}")
|
137 |
print("Ensure your API key is correctly configured and prompt variables are set.")
|
138 |
+
exit()
|
|
|
|
|
139 |
|
140 |
+
current_chat_history_for_test_run = []
|
141 |
test_patient_context_summary_str = (
|
142 |
+
"Age: 60; Gender: Male; Chief Complaint: general fatigue and occasional dizziness; "
|
143 |
+
"Key Medical History: Type 2 Diabetes, Hypertension; "
|
144 |
+
"Current Medications: Metformin 1000mg daily, Lisinopril 20mg daily; Allergies: None."
|
145 |
)
|
146 |
print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
|
147 |
|
|
|
150 |
if user_input_str.lower() in ["exit", "quit"]:
|
151 |
print("👋 Exiting test console.")
|
152 |
break
|
153 |
+
if not user_input_str:
|
154 |
continue
|
155 |
|
156 |
try:
|
157 |
app_logger.info(f"__main__ test (OpenAI): Invoking with input: '{user_input_str}'")
|
|
|
|
|
158 |
response_dict = test_executor.invoke({
|
159 |
"input": user_input_str,
|
160 |
"chat_history": current_chat_history_for_test_run,
|
|
|
164 |
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
|
165 |
print(f"🤖 Agent: {ai_output_str}")
|
166 |
|
|
|
167 |
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
|
168 |
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
|
169 |
|
170 |
+
if len(current_chat_history_for_test_run) > 10:
|
|
|
171 |
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
|
172 |
|
173 |
except Exception as e_invoke:
|