Update agent.py
Browse files
agent.py
CHANGED
@@ -1,165 +1,173 @@
|
|
|
|
1 |
import os
|
2 |
-
import
|
3 |
-
from
|
4 |
-
|
5 |
-
from
|
6 |
-
from
|
7 |
-
|
8 |
-
|
9 |
-
from
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from services.logger import app_logger
|
13 |
-
from tools import BioPortalLookupTool, UMLSLookupTool, QuantumTreatmentOptimizerTool
|
14 |
|
15 |
-
#
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
try:
|
30 |
-
llm = ChatGoogleGenerativeAI(
|
31 |
-
model="gemini-1.5-pro-latest",
|
32 |
-
temperature=0.2,
|
33 |
-
google_api_key=api_key,
|
34 |
-
convert_system_message_to_human=True,
|
35 |
-
)
|
36 |
-
app_logger.info(f"Gemini LLM initialized ({llm.model})")
|
37 |
-
return llm
|
38 |
-
except Exception as e:
|
39 |
-
err = f"Failed to initialize Gemini LLM: {e}"
|
40 |
-
app_logger.error(err, exc_info=True)
|
41 |
-
raise ValueError(err)
|
42 |
-
|
43 |
-
# -----------------------------------------------------------------------------
|
44 |
-
# 2. Build the structured chat prompt
|
45 |
-
# -----------------------------------------------------------------------------
|
46 |
-
def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
|
47 |
-
"""
|
48 |
-
Construct a ChatPromptTemplate that includes:
|
49 |
-
- a system instruction block,
|
50 |
-
- a placeholder for chat_history (List[BaseMessage]),
|
51 |
-
- the current human input,
|
52 |
-
- a placeholder for agent_scratchpad (List[BaseMessage]) to manage tool calls.
|
53 |
-
"""
|
54 |
-
system_text = (
|
55 |
-
"You are Quantum Health Navigator, an AI assistant for healthcare professionals.\n\n"
|
56 |
-
"β’ Disclaim: you are an AI, not a substitute for clinical judgment.\n"
|
57 |
-
"β’ Patient context: {patient_context}\n"
|
58 |
-
"β’ Available tools: {tool_names}\n"
|
59 |
-
"{tools}\n\n"
|
60 |
-
"To call a tool, reply *only* with a JSON code block:\n"
|
61 |
-
"{{\"action\": \"<tool_name>\", \"action_input\": <input>}}\n\n"
|
62 |
-
"After you receive the toolβs output, craft a full answer for the user, citing any tools used."
|
63 |
)
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
"""
|
77 |
-
Returns
|
78 |
-
|
79 |
"""
|
80 |
global _agent_executor_instance
|
81 |
-
if
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
tools_list = [
|
87 |
-
UMLSLookupTool(),
|
88 |
-
BioPortalLookupTool(),
|
89 |
-
QuantumTreatmentOptimizerTool(),
|
90 |
-
]
|
91 |
-
app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
|
92 |
-
|
93 |
-
# 3.3 Build prompt
|
94 |
-
prompt = _build_prompt_template(
|
95 |
-
tool_names=[t.name for t in tools_list],
|
96 |
-
tools=tools_list
|
97 |
-
)
|
98 |
-
app_logger.info("Prompt template built")
|
99 |
-
|
100 |
-
# 3.4 Create the structured agent
|
101 |
-
agent = create_structured_chat_agent(
|
102 |
-
llm=llm,
|
103 |
-
tools=tools_list,
|
104 |
-
prompt=prompt
|
105 |
-
)
|
106 |
-
app_logger.info("Structured chat agent created")
|
107 |
-
|
108 |
-
# 3.5 Create the executor
|
109 |
-
executor = AgentExecutor(
|
110 |
-
agent=agent,
|
111 |
-
tools=tools_list,
|
112 |
-
verbose=True,
|
113 |
-
handle_parsing_errors=True,
|
114 |
-
max_iterations=10,
|
115 |
-
early_stopping_method="generate",
|
116 |
-
)
|
117 |
-
app_logger.info("AgentExecutor initialized")
|
118 |
-
_agent_executor_instance = executor
|
119 |
-
|
120 |
return _agent_executor_instance
|
121 |
|
122 |
-
#
|
123 |
-
# 4. Optional REPL for local testing
|
124 |
-
# -----------------------------------------------------------------------------
|
125 |
if __name__ == "__main__":
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
print(
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
patient_context = (
|
134 |
-
"Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
|
135 |
-
"History: Prediabetes, mild dyslipidemia; Medications: None."
|
136 |
-
)
|
137 |
-
chat_history: List[Union[SystemMessage, HumanMessage, AIMessage]] = []
|
138 |
-
|
139 |
-
print("π Quantum Health Navigator Console (type 'exit' to quit)")
|
140 |
-
while True:
|
141 |
-
user_input = input("π€ You: ").strip()
|
142 |
-
if user_input.lower() in {"exit", "quit"}:
|
143 |
-
print("π Goodbye!")
|
144 |
-
break
|
145 |
-
if not user_input:
|
146 |
-
continue
|
147 |
-
|
148 |
try:
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# /home/user/app/agent.py
|
2 |
import os
|
3 |
+
from langchain_openai import ChatOpenAI # For OpenAI models
|
4 |
+
from langchain.agents import AgentExecutor, create_openai_functions_agent # Agent optimized for OpenAI function calling
|
5 |
+
|
6 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
7 |
+
from langchain_core.messages import AIMessage, HumanMessage # SystemMessage not always explicitly needed in prompt list for this agent
|
8 |
+
|
9 |
+
# --- Import your defined tools FROM THE 'tools' PACKAGE ---
|
10 |
+
from tools import (
|
11 |
+
BioPortalLookupTool,
|
12 |
+
UMLSLookupTool,
|
13 |
+
QuantumTreatmentOptimizerTool,
|
14 |
+
# QuantumOptimizerInput, # Import if needed for type hints directly in this file
|
15 |
+
# GeminiTool, # Not needed if primary LLM is OpenAI
|
16 |
+
)
|
17 |
+
|
18 |
+
from config.settings import settings # This loads your HF secrets into the settings object
|
19 |
from services.logger import app_logger
|
|
|
20 |
|
21 |
+
# --- Initialize LLM (OpenAI) ---
|
22 |
+
llm = None
|
23 |
+
try:
|
24 |
+
if not settings.OPENAI_API_KEY:
|
25 |
+
app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
|
26 |
+
raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
|
27 |
+
|
28 |
+
# Choose your preferred OpenAI model
|
29 |
+
# "gpt-3.5-turbo-0125" is a good balance of cost and capability for function calling.
|
30 |
+
# "gpt-4-turbo-preview" or "gpt-4" is more capable but more expensive.
|
31 |
+
llm = ChatOpenAI(
|
32 |
+
model_name="gpt-3.5-turbo-0125", # Or "gpt-4-turbo-preview"
|
33 |
+
temperature=0.2, # Lower for more predictable tool use
|
34 |
+
openai_api_key=settings.OPENAI_API_KEY
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
)
|
36 |
+
app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
|
37 |
+
|
38 |
+
except Exception as e:
|
39 |
+
detailed_error_message = str(e)
|
40 |
+
user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name."
|
41 |
+
if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
|
42 |
+
user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid."
|
43 |
+
app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
|
44 |
+
else:
|
45 |
+
app_logger.error(user_facing_error, exc_info=True)
|
46 |
+
raise ValueError(user_facing_error)
|
47 |
+
|
48 |
+
|
49 |
+
# --- Initialize Tools List ---
|
50 |
+
tools_list = [
|
51 |
+
UMLSLookupTool(),
|
52 |
+
BioPortalLookupTool(),
|
53 |
+
QuantumTreatmentOptimizerTool(),
|
54 |
+
]
|
55 |
+
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
56 |
+
|
57 |
+
|
58 |
+
# --- Agent Prompt (for OpenAI Functions Agent) ---
|
59 |
+
# This prompt is simpler because much of the tool-calling logic is handled by
|
60 |
+
# OpenAI's function-calling mechanism, which create_openai_functions_agent leverages.
|
61 |
+
# The agent will be able to see the tool descriptions.
|
62 |
+
# We still provide system instructions and placeholders for history and input.
|
63 |
+
# The {agent_scratchpad} is crucial for OpenAI Functions agent to work correctly.
|
64 |
+
# The {patient_context} variable needs to be passed in the invoke call.
|
65 |
+
|
66 |
+
OPENAI_SYSTEM_PROMPT_TEXT = (
|
67 |
+
"You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
|
68 |
+
"Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
|
69 |
+
"You have access to a set of tools to help you. Use them when appropriate.\n"
|
70 |
+
"Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
|
71 |
+
"Patient Context for this session (if provided by the user earlier): {patient_context}\n"
|
72 |
+
"When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}.\n"
|
73 |
+
"Always be clear and concise. Cite tools if their output forms a key part of your answer."
|
74 |
+
)
|
75 |
+
|
76 |
+
# `create_openai_functions_agent` typically works well with a system message,
|
77 |
+
# chat history placeholder, human input placeholder, and agent_scratchpad placeholder.
|
78 |
+
prompt = ChatPromptTemplate.from_messages([
|
79 |
+
("system", OPENAI_SYSTEM_PROMPT_TEXT),
|
80 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
81 |
+
("human", "{input}"),
|
82 |
+
MessagesPlaceholder(variable_name="agent_scratchpad") # Essential for OpenAI Functions agent
|
83 |
+
])
|
84 |
+
app_logger.info("Agent prompt template created for OpenAI Functions agent.")
|
85 |
+
|
86 |
+
# --- Create Agent ---
|
87 |
+
if llm is None:
|
88 |
+
app_logger.critical("LLM object is None at agent creation stage (OpenAI). Application cannot proceed.")
|
89 |
+
raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
|
90 |
+
|
91 |
+
try:
|
92 |
+
agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
|
93 |
+
app_logger.info("OpenAI Functions agent created successfully.")
|
94 |
+
except Exception as e:
|
95 |
+
app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
|
96 |
+
raise ValueError(f"OpenAI agent creation failed: {e}")
|
97 |
+
|
98 |
+
|
99 |
+
# --- Create Agent Executor ---
|
100 |
+
agent_executor = AgentExecutor(
|
101 |
+
agent=agent,
|
102 |
+
tools=tools_list,
|
103 |
+
verbose=True,
|
104 |
+
handle_parsing_errors=True, # Handles cases where LLM output for function call is malformed
|
105 |
+
max_iterations=10,
|
106 |
+
# return_intermediate_steps=True, # Useful for debugging
|
107 |
+
)
|
108 |
+
app_logger.info("AgentExecutor with OpenAI agent created successfully.")
|
109 |
+
|
110 |
+
|
111 |
+
# --- Getter Function for Streamlit App ---
|
112 |
+
_agent_executor_instance = agent_executor
|
113 |
+
|
114 |
+
def get_agent_executor():
|
115 |
"""
|
116 |
+
Returns the configured agent executor for OpenAI.
|
117 |
+
The executor is initialized when this module is first imported.
|
118 |
"""
|
119 |
global _agent_executor_instance
|
120 |
+
if _agent_executor_instance is None:
|
121 |
+
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
|
122 |
+
raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check startup logs.")
|
123 |
+
# You can add a check for settings.OPENAI_API_KEY here too if desired,
|
124 |
+
# but the LLM init should have caught it.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
return _agent_executor_instance
|
126 |
|
127 |
+
# --- Example Usage (for local testing) ---
|
|
|
|
|
128 |
if __name__ == "__main__":
|
129 |
+
if not settings.OPENAI_API_KEY:
|
130 |
+
print("π¨ Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
|
131 |
+
else:
|
132 |
+
print("\nπ Quantum Health Navigator (OpenAI Agent Test Console) π")
|
133 |
+
print("-----------------------------------------------------------")
|
134 |
+
# ... (rest of the __main__ block from the previous agent.py, it should work.
|
135 |
+
# The invoke payload will still need "input", "chat_history", and "patient_context") ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
try:
|
137 |
+
test_executor = get_agent_executor()
|
138 |
+
except ValueError as e_init:
|
139 |
+
print(f"β οΈ Agent initialization failed: {e_init}")
|
140 |
+
exit()
|
141 |
+
|
142 |
+
current_chat_history_for_test_run = []
|
143 |
+
test_patient_context_summary_str = (
|
144 |
+
"Age: 70; Gender: Male; Chief Complaint: Shortness of breath on exertion; "
|
145 |
+
"Key Medical History: COPD, Atrial Fibrillation; "
|
146 |
+
"Current Medications: Tiotropium inhaler, Apixaban 5mg BID; Allergies: Penicillin."
|
147 |
+
)
|
148 |
+
print(f"βΉοΈ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
|
149 |
+
|
150 |
+
while True:
|
151 |
+
user_input_str = input("π€ You: ").strip()
|
152 |
+
if user_input_str.lower() in ["exit", "quit"]:
|
153 |
+
print("π Exiting.")
|
154 |
+
break
|
155 |
+
if not user_input_str:
|
156 |
+
continue
|
157 |
+
try:
|
158 |
+
app_logger.info(f"__main__ test (OpenAI): Invoking with: '{user_input_str}'")
|
159 |
+
# Keys for invoke: "input", "chat_history", and any other variables in your prompt (like "patient_context")
|
160 |
+
response_dict = test_executor.invoke({
|
161 |
+
"input": user_input_str,
|
162 |
+
"chat_history": current_chat_history_for_test_run, # List of BaseMessage
|
163 |
+
"patient_context": test_patient_context_summary_str
|
164 |
+
})
|
165 |
+
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
|
166 |
+
print(f"π€ Agent: {ai_output_str}")
|
167 |
+
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
|
168 |
+
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
|
169 |
+
if len(current_chat_history_for_test_run) > 10:
|
170 |
+
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
|
171 |
+
except Exception as e:
|
172 |
+
print(f"β οΈ Error during agent invocation: {e}")
|
173 |
+
app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e}", exc_info=True)
|