Update agent.py
Browse files
agent.py
CHANGED
@@ -6,34 +6,44 @@ from langchain.agents import AgentExecutor, create_openai_functions_agent
|
|
6 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
from langchain_core.messages import AIMessage, HumanMessage
|
8 |
|
|
|
|
|
9 |
from tools import (
|
10 |
BioPortalLookupTool,
|
11 |
UMLSLookupTool,
|
12 |
QuantumTreatmentOptimizerTool,
|
|
|
13 |
)
|
14 |
|
15 |
from config.settings import settings
|
16 |
from services.logger import app_logger
|
17 |
|
|
|
18 |
llm = None
|
19 |
try:
|
20 |
if not settings.OPENAI_API_KEY:
|
21 |
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
|
22 |
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
|
|
|
23 |
llm = ChatOpenAI(
|
24 |
-
model_name="gpt-4-turbo-preview",
|
25 |
-
temperature=0.1,
|
26 |
openai_api_key=settings.OPENAI_API_KEY
|
27 |
)
|
28 |
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
|
|
|
29 |
except Exception as e:
|
30 |
detailed_error_message = str(e)
|
31 |
-
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}."
|
32 |
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
|
33 |
-
user_facing_error = "OpenAI LLM initialization failed: API key issue.
|
34 |
-
|
|
|
|
|
35 |
raise ValueError(user_facing_error)
|
36 |
|
|
|
|
|
37 |
tools_list = [
|
38 |
UMLSLookupTool(),
|
39 |
BioPortalLookupTool(),
|
@@ -41,67 +51,92 @@ tools_list = [
|
|
41 |
]
|
42 |
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
47 |
"Your primary goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
|
48 |
-
"You have access to a set of specialized tools
|
49 |
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
|
50 |
-
"Patient Context for this session (if provided by the user earlier): {patient_context}\n"
|
51 |
|
52 |
"Tool Usage Guidelines:\n"
|
53 |
"1. When using the 'quantum_treatment_optimizer' tool, its 'action_input' argument requires three main keys: 'patient_data', 'current_treatments', and 'conditions'.\n"
|
54 |
" - The 'patient_data' key MUST be a dictionary. Populate this dictionary by extracting relevant details from the {patient_context}. "
|
55 |
" For example, if {patient_context} is 'Age: 50; Gender: Male; Key Medical History: Hypertension; Chief Complaint: headache', "
|
56 |
" then 'patient_data' could be {{\"age\": 50, \"gender\": \"Male\", \"relevant_history\": [\"Hypertension\"], \"symptoms\": [\"headache\"]}}. "
|
57 |
-
" Include details like age, gender, chief complaint, key medical history, and current medications from {patient_context} within this 'patient_data' dictionary
|
58 |
" - 'current_treatments' should be a list of strings derived from the 'Current Medications' part of {patient_context}.\n"
|
59 |
" - 'conditions' should be a list of strings, including primary conditions from the 'Key Medical History' or 'Chief Complaint' parts of {patient_context}, and any conditions explicitly mentioned or implied by the current user query.\n"
|
60 |
"2. For `bioportal_lookup`, the 'action_input' should be a dictionary like {{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_ACRONYM\"}}. If the user doesn't specify an ontology, you may ask for clarification or default to 'SNOMEDCT_US'.\n"
|
61 |
"3. For `umls_lookup`, the 'action_input' is a single string: the medical term to search.\n"
|
62 |
-
"4. After using a tool, you will receive an observation. Use this observation and your general knowledge to formulate a comprehensive final answer to the human. Clearly cite the tool if its output forms a key part of your answer
|
63 |
"5. If a user's query seems to ask for treatment advice or medication suggestions for a specific scenario (especially if patient context is available), you MUST prioritize using the 'quantum_treatment_optimizer' tool.\n"
|
64 |
"6. For general medical knowledge questions not requiring patient-specific optimization or specific ontology/CUI lookups, you may answer directly from your training data, but always include the standard disclaimer."
|
65 |
)
|
66 |
|
|
|
|
|
67 |
prompt = ChatPromptTemplate.from_messages([
|
68 |
-
("system",
|
69 |
-
MessagesPlaceholder(variable_name="chat_history"),
|
70 |
-
("human", "{input}"),
|
71 |
-
MessagesPlaceholder(variable_name="agent_scratchpad")
|
72 |
])
|
73 |
app_logger.info("Agent prompt template (with explicit tools/tool_names in system message) created.")
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
|
|
75 |
if llm is None:
|
76 |
-
app_logger.critical("LLM object is None at agent creation (OpenAI).
|
77 |
raise SystemExit("Agent LLM failed to initialize.")
|
|
|
78 |
try:
|
|
|
|
|
|
|
79 |
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
|
80 |
app_logger.info("OpenAI Functions agent created successfully.")
|
81 |
except Exception as e:
|
|
|
|
|
82 |
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
|
83 |
raise ValueError(f"OpenAI agent creation failed: {e}")
|
84 |
|
|
|
|
|
85 |
agent_executor = AgentExecutor(
|
86 |
agent=agent,
|
87 |
-
tools=tools_list,
|
88 |
verbose=True,
|
89 |
handle_parsing_errors=True,
|
90 |
max_iterations=7,
|
|
|
91 |
)
|
92 |
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
|
93 |
|
|
|
|
|
94 |
_agent_executor_instance = agent_executor
|
|
|
95 |
def get_agent_executor():
|
96 |
global _agent_executor_instance
|
97 |
if _agent_executor_instance is None:
|
98 |
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
|
99 |
-
raise RuntimeError("Agent executor (OpenAI) was not properly initialized.")
|
100 |
-
if not settings.OPENAI_API_KEY:
|
101 |
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
|
102 |
raise ValueError("OpenAI API Key not configured.")
|
103 |
return _agent_executor_instance
|
104 |
|
|
|
105 |
if __name__ == "__main__":
|
106 |
if not settings.OPENAI_API_KEY:
|
107 |
print("π¨ Please set your OPENAI_API_KEY in .env or environment.")
|
@@ -109,24 +144,36 @@ if __name__ == "__main__":
|
|
109 |
print("\nπ Quantum Health Navigator (OpenAI Agent Test Console) π")
|
110 |
try: test_executor = get_agent_executor()
|
111 |
except ValueError as e_init: print(f"β οΈ Agent init failed: {e_init}"); exit()
|
|
|
112 |
history = []
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
print(f"βΉοΈ Simulated Context: {
|
|
|
117 |
while True:
|
118 |
usr_in = input("π€ You: ").strip()
|
119 |
if usr_in.lower() in ["exit", "quit"]: print("π Exiting."); break
|
120 |
if not usr_in: continue
|
121 |
try:
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
ai_out = res.get('output', "No output.")
|
129 |
print(f"π€ Agent: {ai_out}")
|
130 |
history.extend([HumanMessage(content=usr_in), AIMessage(content=ai_out)])
|
131 |
if len(history) > 8: history = history[-8:]
|
132 |
-
except Exception as e_invoke:
|
|
|
|
|
|
6 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
from langchain_core.messages import AIMessage, HumanMessage
|
8 |
|
9 |
+
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
|
10 |
+
# This relies on tools/__init__.py correctly exporting these names.
|
11 |
from tools import (
|
12 |
BioPortalLookupTool,
|
13 |
UMLSLookupTool,
|
14 |
QuantumTreatmentOptimizerTool,
|
15 |
+
# QuantumOptimizerInput, # Only if needed for type hints directly in this file
|
16 |
)
|
17 |
|
18 |
from config.settings import settings
|
19 |
from services.logger import app_logger
|
20 |
|
21 |
+
# --- Initialize LLM (OpenAI) ---
|
22 |
llm = None
|
23 |
try:
|
24 |
if not settings.OPENAI_API_KEY:
|
25 |
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
|
26 |
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
|
27 |
+
|
28 |
llm = ChatOpenAI(
|
29 |
+
model_name="gpt-4-turbo-preview", # More capable for function calling & instruction following
|
30 |
+
temperature=0.1, # Low for more deterministic tool use
|
31 |
openai_api_key=settings.OPENAI_API_KEY
|
32 |
)
|
33 |
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
|
34 |
+
|
35 |
except Exception as e:
|
36 |
detailed_error_message = str(e)
|
37 |
+
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name."
|
38 |
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
|
39 |
+
user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid."
|
40 |
+
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
|
41 |
+
else:
|
42 |
+
app_logger.error(user_facing_error, exc_info=True)
|
43 |
raise ValueError(user_facing_error)
|
44 |
|
45 |
+
|
46 |
+
# --- Initialize Tools List ---
|
47 |
tools_list = [
|
48 |
UMLSLookupTool(),
|
49 |
BioPortalLookupTool(),
|
|
|
51 |
]
|
52 |
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
53 |
|
54 |
+
|
55 |
+
# --- Agent Prompt (for OpenAI Functions Agent - Explicitly including {tools} and {tool_names}) ---
|
56 |
+
# The KeyError indicated that ChatPromptTemplate was expecting 'tools' and 'tool_names' as input variables.
|
57 |
+
# create_openai_functions_agent should populate these if these placeholders are in the system message.
|
58 |
+
OPENAI_SYSTEM_PROMPT_WITH_EXPLICIT_TOOLS_VARS = (
|
59 |
+
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
|
60 |
"Your primary goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
|
61 |
+
"You have access to a set of specialized tools. Their names are: {tool_names}. Their detailed descriptions are: {tools}. Use them when a user's query can be best answered by one of them.\n"
|
62 |
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
|
63 |
+
"Patient Context for this session (if provided by the user earlier): {patient_context}\n" # This variable is passed from invoke
|
64 |
|
65 |
"Tool Usage Guidelines:\n"
|
66 |
"1. When using the 'quantum_treatment_optimizer' tool, its 'action_input' argument requires three main keys: 'patient_data', 'current_treatments', and 'conditions'.\n"
|
67 |
" - The 'patient_data' key MUST be a dictionary. Populate this dictionary by extracting relevant details from the {patient_context}. "
|
68 |
" For example, if {patient_context} is 'Age: 50; Gender: Male; Key Medical History: Hypertension; Chief Complaint: headache', "
|
69 |
" then 'patient_data' could be {{\"age\": 50, \"gender\": \"Male\", \"relevant_history\": [\"Hypertension\"], \"symptoms\": [\"headache\"]}}. "
|
70 |
+
" Include details like age, gender, chief complaint, key medical history, and current medications from {patient_context} within this 'patient_data' dictionary.\n"
|
71 |
" - 'current_treatments' should be a list of strings derived from the 'Current Medications' part of {patient_context}.\n"
|
72 |
" - 'conditions' should be a list of strings, including primary conditions from the 'Key Medical History' or 'Chief Complaint' parts of {patient_context}, and any conditions explicitly mentioned or implied by the current user query.\n"
|
73 |
"2. For `bioportal_lookup`, the 'action_input' should be a dictionary like {{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_ACRONYM\"}}. If the user doesn't specify an ontology, you may ask for clarification or default to 'SNOMEDCT_US'.\n"
|
74 |
"3. For `umls_lookup`, the 'action_input' is a single string: the medical term to search.\n"
|
75 |
+
"4. After using a tool, you will receive an observation. Use this observation and your general knowledge to formulate a comprehensive final answer to the human. Clearly cite the tool if its output forms a key part of your answer.\n"
|
76 |
"5. If a user's query seems to ask for treatment advice or medication suggestions for a specific scenario (especially if patient context is available), you MUST prioritize using the 'quantum_treatment_optimizer' tool.\n"
|
77 |
"6. For general medical knowledge questions not requiring patient-specific optimization or specific ontology/CUI lookups, you may answer directly from your training data, but always include the standard disclaimer."
|
78 |
)
|
79 |
|
80 |
+
# ChatPromptTemplate defines the sequence of messages.
|
81 |
+
# Variables here are what the agent_executor.invoke will ultimately need to provide or what the agent manages.
|
82 |
prompt = ChatPromptTemplate.from_messages([
|
83 |
+
("system", OPENAI_SYSTEM_PROMPT_WITH_EXPLICIT_TOOLS_VARS), # System instructions, expects {patient_context}, {tools}, {tool_names}
|
84 |
+
MessagesPlaceholder(variable_name="chat_history"), # For past Human/AI messages
|
85 |
+
("human", "{input}"), # For the current user query
|
86 |
+
MessagesPlaceholder(variable_name="agent_scratchpad") # For agent's internal work (function calls/responses)
|
87 |
])
|
88 |
app_logger.info("Agent prompt template (with explicit tools/tool_names in system message) created.")
|
89 |
+
# Log the input variables that this prompt structure will expect.
|
90 |
+
# `create_openai_functions_agent` should provide 'tools' and 'tool_names' to this prompt.
|
91 |
+
# The user (via invoke) provides 'input', 'chat_history', 'patient_context'.
|
92 |
+
# 'agent_scratchpad' is managed by the AgentExecutor.
|
93 |
+
app_logger.debug(f"Prompt expected input variables: {prompt.input_variables}")
|
94 |
+
|
95 |
|
96 |
+
# --- Create Agent ---
|
97 |
if llm is None:
|
98 |
+
app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.")
|
99 |
raise SystemExit("Agent LLM failed to initialize.")
|
100 |
+
|
101 |
try:
|
102 |
+
# `create_openai_functions_agent` is given the llm, the raw tools_list, and the prompt.
|
103 |
+
# It should process `tools_list` to make them available as OpenAI functions AND
|
104 |
+
# populate the `{tools}` and `{tool_names}` placeholders in the prompt.
|
105 |
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
|
106 |
app_logger.info("OpenAI Functions agent created successfully.")
|
107 |
except Exception as e:
|
108 |
+
# This is where the KeyError "Input to ChatPromptTemplate is missing variables {'tools', 'tool_names'}"
|
109 |
+
# was occurring.
|
110 |
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
|
111 |
raise ValueError(f"OpenAI agent creation failed: {e}")
|
112 |
|
113 |
+
|
114 |
+
# --- Create Agent Executor ---
|
115 |
agent_executor = AgentExecutor(
|
116 |
agent=agent,
|
117 |
+
tools=tools_list, # Tools are also provided to the executor
|
118 |
verbose=True,
|
119 |
handle_parsing_errors=True,
|
120 |
max_iterations=7,
|
121 |
+
# return_intermediate_steps=True, # Good for debugging
|
122 |
)
|
123 |
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
|
124 |
|
125 |
+
|
126 |
+
# --- Getter Function for Streamlit App ---
|
127 |
_agent_executor_instance = agent_executor
|
128 |
+
|
129 |
def get_agent_executor():
|
130 |
global _agent_executor_instance
|
131 |
if _agent_executor_instance is None:
|
132 |
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
|
133 |
+
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs.")
|
134 |
+
if not settings.OPENAI_API_KEY: # Final check
|
135 |
app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
|
136 |
raise ValueError("OpenAI API Key not configured.")
|
137 |
return _agent_executor_instance
|
138 |
|
139 |
+
# --- Example Usage (for local testing) ---
|
140 |
if __name__ == "__main__":
|
141 |
if not settings.OPENAI_API_KEY:
|
142 |
print("π¨ Please set your OPENAI_API_KEY in .env or environment.")
|
|
|
144 |
print("\nπ Quantum Health Navigator (OpenAI Agent Test Console) π")
|
145 |
try: test_executor = get_agent_executor()
|
146 |
except ValueError as e_init: print(f"β οΈ Agent init failed: {e_init}"); exit()
|
147 |
+
|
148 |
history = []
|
149 |
+
context_str = ("Age: 60; Gender: Male; Chief Complaint: general fatigue and occasional dizziness; "
|
150 |
+
"Key Medical History: Type 2 Diabetes, Hypertension; "
|
151 |
+
"Current Medications: Metformin 1000mg daily, Lisinopril 20mg daily; Allergies: None.")
|
152 |
+
print(f"βΉοΈ Simulated Context: {context_str}\n")
|
153 |
+
|
154 |
while True:
|
155 |
usr_in = input("π€ You: ").strip()
|
156 |
if usr_in.lower() in ["exit", "quit"]: print("π Exiting."); break
|
157 |
if not usr_in: continue
|
158 |
try:
|
159 |
+
# The keys here ('input', 'chat_history', 'patient_context') must match
|
160 |
+
# what the ChatPromptTemplate ultimately expects after create_openai_functions_agent
|
161 |
+
# has done its work with 'tools' and 'tool_names'.
|
162 |
+
payload = {
|
163 |
+
"input": usr_in,
|
164 |
+
"chat_history": history,
|
165 |
+
"patient_context": context_str,
|
166 |
+
# Note: We do NOT explicitly pass 'tools' or 'tool_names' in invoke.
|
167 |
+
# The `create_openai_functions_agent` is responsible for making these available
|
168 |
+
# to the `prompt` object during its formatting process.
|
169 |
+
}
|
170 |
+
app_logger.info(f"__main__ test (OpenAI): Invoking with payload keys: {list(payload.keys())}")
|
171 |
+
res = test_executor.invoke(payload)
|
172 |
+
|
173 |
ai_out = res.get('output', "No output.")
|
174 |
print(f"π€ Agent: {ai_out}")
|
175 |
history.extend([HumanMessage(content=usr_in), AIMessage(content=ai_out)])
|
176 |
if len(history) > 8: history = history[-8:]
|
177 |
+
except Exception as e_invoke:
|
178 |
+
print(f"β οΈ Invoke Error: {type(e_invoke).__name__} - {e_invoke}")
|
179 |
+
app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e_invoke}", exc_info=True)
|