mgbam commited on
Commit
267db18
Β·
verified Β·
1 Parent(s): 3c808a4

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +99 -87
agent.py CHANGED
@@ -4,30 +4,31 @@ from langchain_openai import ChatOpenAI
4
  from langchain.agents import AgentExecutor, create_openai_functions_agent
5
 
6
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
- from langchain_core.messages import AIMessage, HumanMessage
8
 
9
  # --- Import your defined tools FROM THE 'tools' PACKAGE ---
10
- # This relies on tools/__init__.py correctly exporting these names.
11
  from tools import (
12
  BioPortalLookupTool,
13
  UMLSLookupTool,
14
  QuantumTreatmentOptimizerTool,
15
- # QuantumOptimizerInput, # Only if needed for type hints directly in this file
 
16
  )
17
 
18
  from config.settings import settings
19
  from services.logger import app_logger
20
 
21
  # --- Initialize LLM (OpenAI) ---
22
- llm = None
23
  try:
24
  if not settings.OPENAI_API_KEY:
25
  app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
26
  raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
27
 
28
  llm = ChatOpenAI(
29
- model_name="gpt-4-turbo-preview", # More capable for function calling & instruction following
30
- temperature=0.1, # Low for more deterministic tool use
 
31
  openai_api_key=settings.OPENAI_API_KEY
32
  )
33
  app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
@@ -40,7 +41,7 @@ except Exception as e:
40
  app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
41
  else:
42
  app_logger.error(user_facing_error, exc_info=True)
43
- raise ValueError(user_facing_error)
44
 
45
 
46
  # --- Initialize Tools List ---
@@ -52,128 +53,139 @@ tools_list = [
52
  app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
53
 
54
 
55
- # --- Agent Prompt (for OpenAI Functions Agent - Explicitly including {tools} and {tool_names}) ---
56
- # The KeyError indicated that ChatPromptTemplate was expecting 'tools' and 'tool_names' as input variables.
57
- # create_openai_functions_agent should populate these if these placeholders are in the system message.
58
- OPENAI_SYSTEM_PROMPT_WITH_EXPLICIT_TOOLS_VARS = (
 
 
59
  "You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
60
- "Your primary goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
61
- "You have access to a set of specialized tools. Their names are: {tool_names}. Their detailed descriptions are: {tools}. Use them when a user's query can be best answered by one of them.\n"
62
  "Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
63
  "Patient Context for this session (if provided by the user earlier): {patient_context}\n" # This variable is passed from invoke
64
-
65
- "Tool Usage Guidelines:\n"
66
- "1. When using the 'quantum_treatment_optimizer' tool, its 'action_input' argument requires three main keys: 'patient_data', 'current_treatments', and 'conditions'.\n"
67
- " - The 'patient_data' key MUST be a dictionary. Populate this dictionary by extracting relevant details from the {patient_context}. "
68
- " For example, if {patient_context} is 'Age: 50; Gender: Male; Key Medical History: Hypertension; Chief Complaint: headache', "
69
- " then 'patient_data' could be {{\"age\": 50, \"gender\": \"Male\", \"relevant_history\": [\"Hypertension\"], \"symptoms\": [\"headache\"]}}. "
70
- " Include details like age, gender, chief complaint, key medical history, and current medications from {patient_context} within this 'patient_data' dictionary.\n"
71
- " - 'current_treatments' should be a list of strings derived from the 'Current Medications' part of {patient_context}.\n"
72
- " - 'conditions' should be a list of strings, including primary conditions from the 'Key Medical History' or 'Chief Complaint' parts of {patient_context}, and any conditions explicitly mentioned or implied by the current user query.\n"
73
- "2. For `bioportal_lookup`, the 'action_input' should be a dictionary like {{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_ACRONYM\"}}. If the user doesn't specify an ontology, you may ask for clarification or default to 'SNOMEDCT_US'.\n"
74
- "3. For `umls_lookup`, the 'action_input' is a single string: the medical term to search.\n"
75
- "4. After using a tool, you will receive an observation. Use this observation and your general knowledge to formulate a comprehensive final answer to the human. Clearly cite the tool if its output forms a key part of your answer.\n"
76
- "5. If a user's query seems to ask for treatment advice or medication suggestions for a specific scenario (especially if patient context is available), you MUST prioritize using the 'quantum_treatment_optimizer' tool.\n"
77
- "6. For general medical knowledge questions not requiring patient-specific optimization or specific ontology/CUI lookups, you may answer directly from your training data, but always include the standard disclaimer."
78
  )
79
 
80
- # ChatPromptTemplate defines the sequence of messages.
81
- # Variables here are what the agent_executor.invoke will ultimately need to provide or what the agent manages.
82
  prompt = ChatPromptTemplate.from_messages([
83
- ("system", OPENAI_SYSTEM_PROMPT_WITH_EXPLICIT_TOOLS_VARS), # System instructions, expects {patient_context}, {tools}, {tool_names}
84
- MessagesPlaceholder(variable_name="chat_history"), # For past Human/AI messages
85
- ("human", "{input}"), # For the current user query
86
- MessagesPlaceholder(variable_name="agent_scratchpad") # For agent's internal work (function calls/responses)
87
  ])
88
- app_logger.info("Agent prompt template (with explicit tools/tool_names in system message) created.")
89
- # Log the input variables that this prompt structure will expect.
90
- # `create_openai_functions_agent` should provide 'tools' and 'tool_names' to this prompt.
91
- # The user (via invoke) provides 'input', 'chat_history', 'patient_context'.
92
- # 'agent_scratchpad' is managed by the AgentExecutor.
93
- app_logger.debug(f"Prompt expected input variables: {prompt.input_variables}")
94
-
95
 
96
  # --- Create Agent ---
97
- if llm is None:
98
  app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.")
99
- raise SystemExit("Agent LLM failed to initialize.")
100
 
101
  try:
102
- # `create_openai_functions_agent` is given the llm, the raw tools_list, and the prompt.
103
- # It should process `tools_list` to make them available as OpenAI functions AND
104
- # populate the `{tools}` and `{tool_names}` placeholders in the prompt.
105
  agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
106
  app_logger.info("OpenAI Functions agent created successfully.")
107
  except Exception as e:
108
- # This is where the KeyError "Input to ChatPromptTemplate is missing variables {'tools', 'tool_names'}"
109
- # was occurring.
110
  app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
 
 
111
  raise ValueError(f"OpenAI agent creation failed: {e}")
112
 
113
 
114
  # --- Create Agent Executor ---
115
  agent_executor = AgentExecutor(
116
  agent=agent,
117
- tools=tools_list, # Tools are also provided to the executor
118
- verbose=True,
119
- handle_parsing_errors=True,
120
- max_iterations=7,
121
- # return_intermediate_steps=True, # Good for debugging
122
  )
123
  app_logger.info("AgentExecutor with OpenAI agent created successfully.")
124
 
125
 
126
  # --- Getter Function for Streamlit App ---
127
- _agent_executor_instance = agent_executor
128
 
129
  def get_agent_executor():
 
 
 
 
130
  global _agent_executor_instance
131
  if _agent_executor_instance is None:
132
- app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
133
- raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs.")
134
- if not settings.OPENAI_API_KEY: # Final check
 
 
 
135
  app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
136
  raise ValueError("OpenAI API Key not configured.")
 
137
  return _agent_executor_instance
138
 
139
- # --- Example Usage (for local testing) ---
140
  if __name__ == "__main__":
141
  if not settings.OPENAI_API_KEY:
142
- print("🚨 Please set your OPENAI_API_KEY in .env or environment.")
143
  else:
144
  print("\nπŸš€ Quantum Health Navigator (OpenAI Agent Test Console) πŸš€")
145
- try: test_executor = get_agent_executor()
146
- except ValueError as e_init: print(f"⚠️ Agent init failed: {e_init}"); exit()
147
-
148
- history = []
149
- context_str = ("Age: 60; Gender: Male; Chief Complaint: general fatigue and occasional dizziness; "
150
- "Key Medical History: Type 2 Diabetes, Hypertension; "
151
- "Current Medications: Metformin 1000mg daily, Lisinopril 20mg daily; Allergies: None.")
152
- print(f"ℹ️ Simulated Context: {context_str}\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
  while True:
155
- usr_in = input("πŸ‘€ You: ").strip()
156
- if usr_in.lower() in ["exit", "quit"]: print("πŸ‘‹ Exiting."); break
157
- if not usr_in: continue
 
 
 
 
158
  try:
159
- # The keys here ('input', 'chat_history', 'patient_context') must match
160
- # what the ChatPromptTemplate ultimately expects after create_openai_functions_agent
161
- # has done its work with 'tools' and 'tool_names'.
162
- payload = {
163
- "input": usr_in,
164
- "chat_history": history,
165
- "patient_context": context_str,
166
- # Note: We do NOT explicitly pass 'tools' or 'tool_names' in invoke.
167
- # The `create_openai_functions_agent` is responsible for making these available
168
- # to the `prompt` object during its formatting process.
169
- }
170
- app_logger.info(f"__main__ test (OpenAI): Invoking with payload keys: {list(payload.keys())}")
171
- res = test_executor.invoke(payload)
172
 
173
- ai_out = res.get('output', "No output.")
174
- print(f"πŸ€– Agent: {ai_out}")
175
- history.extend([HumanMessage(content=usr_in), AIMessage(content=ai_out)])
176
- if len(history) > 8: history = history[-8:]
 
 
 
 
177
  except Exception as e_invoke:
178
- print(f"⚠️ Invoke Error: {type(e_invoke).__name__} - {e_invoke}")
179
  app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e_invoke}", exc_info=True)
 
4
  from langchain.agents import AgentExecutor, create_openai_functions_agent
5
 
6
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
+ from langchain_core.messages import AIMessage, HumanMessage # SystemMessage can be implicitly created or explicit
8
 
9
  # --- Import your defined tools FROM THE 'tools' PACKAGE ---
 
10
  from tools import (
11
  BioPortalLookupTool,
12
  UMLSLookupTool,
13
  QuantumTreatmentOptimizerTool,
14
+ # QuantumOptimizerInput, # Schemas are primarily used by tools themselves
15
+ # GeminiTool, # Not using in this OpenAI-centric agent
16
  )
17
 
18
  from config.settings import settings
19
  from services.logger import app_logger
20
 
21
  # --- Initialize LLM (OpenAI) ---
22
+ llm = None # Initialize to None for robust error handling if init fails
23
  try:
24
  if not settings.OPENAI_API_KEY:
25
  app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
26
  raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
27
 
28
  llm = ChatOpenAI(
29
+ model_name="gpt-4-turbo-preview", # More capable model for function calling
30
+ # model_name="gpt-3.5-turbo-0125", # More cost-effective alternative
31
+ temperature=0.1, # Lower for more deterministic tool use and function calls
32
  openai_api_key=settings.OPENAI_API_KEY
33
  )
34
  app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
 
41
  app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
42
  else:
43
  app_logger.error(user_facing_error, exc_info=True)
44
+ raise ValueError(user_facing_error) # Propagate error to stop further agent setup
45
 
46
 
47
  # --- Initialize Tools List ---
 
53
  app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
54
 
55
 
56
+ # --- Agent Prompt (for OpenAI Functions Agent - Simplified System Prompt) ---
57
+ # The create_openai_functions_agent implicitly makes tool descriptions available to the LLM
58
+ # via the function-calling mechanism. Explicitly listing {tools} and {tool_names} in the
59
+ # system prompt string might be redundant or conflict with how this agent type works.
60
+ # We will still provide overall instructions and patient_context placeholder.
61
+ OPENAI_SYSTEM_PROMPT_TEXT_SIMPLIFIED = (
62
  "You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
63
+ "Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
64
+ "You have access to a set of specialized tools. Use them when a user's query can be best answered by one of them, based on their descriptions.\n"
65
  "Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
66
  "Patient Context for this session (if provided by the user earlier): {patient_context}\n" # This variable is passed from invoke
67
+ "When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}.\n"
68
+ "For `bioportal_lookup`, if the user doesn't specify an ontology, you may ask or default to 'SNOMEDCT_US'.\n"
69
+ "Always be clear and concise. Cite tools if their output forms a key part of your answer."
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
 
72
+ # The ChatPromptTemplate defines the sequence of messages sent to the LLM.
73
+ # `create_openai_functions_agent` expects specific placeholders.
74
  prompt = ChatPromptTemplate.from_messages([
75
+ ("system", OPENAI_SYSTEM_PROMPT_TEXT_SIMPLIFIED), # System instructions, expects {patient_context}
76
+ MessagesPlaceholder(variable_name="chat_history"), # For past Human/AI messages
77
+ ("human", "{input}"), # For the current user query
78
+ MessagesPlaceholder(variable_name="agent_scratchpad") # For agent's internal work (function calls/responses)
79
  ])
80
+ app_logger.info("Agent prompt template (simplified for OpenAI Functions) created.")
 
 
 
 
 
 
81
 
82
  # --- Create Agent ---
83
+ if llm is None: # Defensive check, should have been caught by earlier raise
84
  app_logger.critical("LLM object is None at agent creation (OpenAI). Application cannot proceed.")
85
+ raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
86
 
87
  try:
88
+ # `create_openai_functions_agent` will use the tools' Pydantic schemas to define
89
+ # the "functions" that the OpenAI model can call.
 
90
  agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
91
  app_logger.info("OpenAI Functions agent created successfully.")
92
  except Exception as e:
 
 
93
  app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
94
+ # This is where the "Input to ChatPromptTemplate is missing variables" error would occur
95
+ # if the prompt object was still expecting variables not provided by the agent constructor or invoke.
96
  raise ValueError(f"OpenAI agent creation failed: {e}")
97
 
98
 
99
  # --- Create Agent Executor ---
100
  agent_executor = AgentExecutor(
101
  agent=agent,
102
+ tools=tools_list,
103
+ verbose=True, # Essential for debugging tool usage and agent thoughts
104
+ handle_parsing_errors=True, # Tries to gracefully handle LLM output parsing issues
105
+ max_iterations=7, # Prevent runaway agent loops
106
+ # return_intermediate_steps=True, # Set to True to get detailed thought/action steps in the response
107
  )
108
  app_logger.info("AgentExecutor with OpenAI agent created successfully.")
109
 
110
 
111
  # --- Getter Function for Streamlit App ---
112
+ _agent_executor_instance = agent_executor # Store the successfully initialized executor
113
 
114
  def get_agent_executor():
115
+ """
116
+ Returns the configured agent executor for OpenAI.
117
+ The executor is initialized when this module is first imported.
118
+ """
119
  global _agent_executor_instance
120
  if _agent_executor_instance is None:
121
+ # This indicates a failure during the initial module load (LLM or agent creation).
122
+ app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI). Initialization likely failed.")
123
+ raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check application startup logs for errors (e.g., API key issues, prompt errors).")
124
+
125
+ # Final check for API key, though LLM initialization should be the primary guard.
126
+ if not settings.OPENAI_API_KEY:
127
  app_logger.error("OpenAI API Key is missing at get_agent_executor call. Agent will fail.")
128
  raise ValueError("OpenAI API Key not configured.")
129
+
130
  return _agent_executor_instance
131
 
132
+ # --- Example Usage (for local testing of this agent.py file) ---
133
  if __name__ == "__main__":
134
  if not settings.OPENAI_API_KEY:
135
+ print("🚨 Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
136
  else:
137
  print("\nπŸš€ Quantum Health Navigator (OpenAI Agent Test Console) πŸš€")
138
+ print("-----------------------------------------------------------")
139
+ print("Type 'exit' or 'quit' to stop.")
140
+ print("Example topics: medical definitions, treatment optimization (will use simulated patient context).")
141
+ print("-" * 59)
142
+
143
+ try:
144
+ test_executor = get_agent_executor() # Get the executor
145
+ except ValueError as e_init: # Catch errors from get_agent_executor or LLM/agent init
146
+ print(f"⚠️ Agent initialization failed during test startup: {e_init}")
147
+ print("Ensure your API key is correctly configured and prompt variables are set.")
148
+ exit() # Exit if agent can't be initialized
149
+
150
+ current_chat_history_for_test_run = [] # List of HumanMessage, AIMessage
151
+
152
+ # Simulated patient context for testing the {patient_context} variable
153
+ test_patient_context_summary_str = (
154
+ "Age: 70; Gender: Male; Chief Complaint: Shortness of breath on exertion; "
155
+ "Key Medical History: COPD, Atrial Fibrillation; "
156
+ "Current Medications: Tiotropium inhaler, Apixaban 5mg BID; Allergies: Penicillin."
157
+ )
158
+ print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
159
 
160
  while True:
161
+ user_input_str = input("πŸ‘€ You: ").strip()
162
+ if user_input_str.lower() in ["exit", "quit"]:
163
+ print("πŸ‘‹ Exiting test console.")
164
+ break
165
+ if not user_input_str: # Skip empty input
166
+ continue
167
+
168
  try:
169
+ app_logger.info(f"__main__ test (OpenAI): Invoking with input: '{user_input_str}'")
170
+ # These are the keys expected by the ChatPromptTemplate and agent:
171
+ # "input", "chat_history", and "patient_context" (because it's in our system prompt)
172
+ response_dict = test_executor.invoke({
173
+ "input": user_input_str,
174
+ "chat_history": current_chat_history_for_test_run,
175
+ "patient_context": test_patient_context_summary_str
176
+ })
177
+
178
+ ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
179
+ print(f"πŸ€– Agent: {ai_output_str}")
 
 
180
 
181
+ # Update history for the next turn
182
+ current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
183
+ current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
184
+
185
+ # Optional: Limit history length to prevent overly long contexts
186
+ if len(current_chat_history_for_test_run) > 10: # Keep last 5 pairs
187
+ current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
188
+
189
  except Exception as e_invoke:
190
+ print(f"⚠️ Error during agent invocation: {type(e_invoke).__name__} - {e_invoke}")
191
  app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e_invoke}", exc_info=True)