mgbam commited on
Commit
2a0415a
ยท
verified ยท
1 Parent(s): f791ddf

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +41 -99
agent.py CHANGED
@@ -1,52 +1,42 @@
1
  # /home/user/app/agent.py
2
  import os
3
- from langchain_openai import ChatOpenAI # For OpenAI models
4
- from langchain.agents import AgentExecutor, create_openai_functions_agent # Agent optimized for OpenAI function calling
5
 
6
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
- from langchain_core.messages import AIMessage, HumanMessage # SystemMessage not always explicitly needed in prompt list for this agent
8
 
9
- # --- Import your defined tools FROM THE 'tools' PACKAGE ---
10
  from tools import (
11
  BioPortalLookupTool,
12
  UMLSLookupTool,
13
  QuantumTreatmentOptimizerTool,
14
- # QuantumOptimizerInput, # Import if needed for type hints directly in this file
15
- # GeminiTool, # Not needed if primary LLM is OpenAI
16
  )
17
 
18
- from config.settings import settings # This loads your HF secrets into the settings object
19
  from services.logger import app_logger
20
 
21
- # --- Initialize LLM (OpenAI) ---
22
  llm = None
23
  try:
24
  if not settings.OPENAI_API_KEY:
25
- app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
26
- raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
27
-
28
- # Choose your preferred OpenAI model
29
- # "gpt-3.5-turbo-0125" is a good balance of cost and capability for function calling.
30
- # "gpt-4-turbo-preview" or "gpt-4" is more capable but more expensive.
31
  llm = ChatOpenAI(
32
- model_name="gpt-3.5-turbo-0125", # Or "gpt-4-turbo-preview"
33
- temperature=0.2, # Lower for more predictable tool use
 
34
  openai_api_key=settings.OPENAI_API_KEY
35
  )
36
  app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
37
-
38
  except Exception as e:
39
  detailed_error_message = str(e)
40
- user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name."
41
  if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
42
- user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid."
43
- app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
44
- else:
45
- app_logger.error(user_facing_error, exc_info=True)
46
  raise ValueError(user_facing_error)
47
 
48
-
49
- # --- Initialize Tools List ---
50
  tools_list = [
51
  UMLSLookupTool(),
52
  BioPortalLookupTool(),
@@ -54,40 +44,28 @@ tools_list = [
54
  ]
55
  app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
56
 
57
-
58
- # --- Agent Prompt (for OpenAI Functions Agent) ---
59
- # This prompt is simpler because much of the tool-calling logic is handled by
60
- # OpenAI's function-calling mechanism, which create_openai_functions_agent leverages.
61
- # The agent will be able to see the tool descriptions.
62
- # We still provide system instructions and placeholders for history and input.
63
- # The {agent_scratchpad} is crucial for OpenAI Functions agent to work correctly.
64
- # The {patient_context} variable needs to be passed in the invoke call.
65
-
66
  OPENAI_SYSTEM_PROMPT_TEXT = (
67
  "You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
68
  "Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
69
- "You have access to a set of tools to help you. Use them when appropriate.\n"
70
  "Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
71
  "Patient Context for this session (if provided by the user earlier): {patient_context}\n"
72
- "When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}.\n"
 
73
  "Always be clear and concise. Cite tools if their output forms a key part of your answer."
74
  )
75
 
76
- # `create_openai_functions_agent` typically works well with a system message,
77
- # chat history placeholder, human input placeholder, and agent_scratchpad placeholder.
78
  prompt = ChatPromptTemplate.from_messages([
79
  ("system", OPENAI_SYSTEM_PROMPT_TEXT),
80
  MessagesPlaceholder(variable_name="chat_history"),
81
  ("human", "{input}"),
82
- MessagesPlaceholder(variable_name="agent_scratchpad") # Essential for OpenAI Functions agent
83
  ])
84
  app_logger.info("Agent prompt template created for OpenAI Functions agent.")
85
 
86
- # --- Create Agent ---
87
  if llm is None:
88
- app_logger.critical("LLM object is None at agent creation stage (OpenAI). Application cannot proceed.")
89
- raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
90
-
91
  try:
92
  agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
93
  app_logger.info("OpenAI Functions agent created successfully.")
@@ -95,79 +73,43 @@ except Exception as e:
95
  app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
96
  raise ValueError(f"OpenAI agent creation failed: {e}")
97
 
98
-
99
- # --- Create Agent Executor ---
100
  agent_executor = AgentExecutor(
101
  agent=agent,
102
  tools=tools_list,
103
  verbose=True,
104
- handle_parsing_errors=True, # Handles cases where LLM output for function call is malformed
105
- max_iterations=10,
106
- # return_intermediate_steps=True, # Useful for debugging
107
  )
108
  app_logger.info("AgentExecutor with OpenAI agent created successfully.")
109
 
110
-
111
- # --- Getter Function for Streamlit App ---
112
  _agent_executor_instance = agent_executor
113
-
114
  def get_agent_executor():
115
- """
116
- Returns the configured agent executor for OpenAI.
117
- The executor is initialized when this module is first imported.
118
- """
119
  global _agent_executor_instance
120
  if _agent_executor_instance is None:
121
- app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
122
- raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check startup logs.")
123
- # You can add a check for settings.OPENAI_API_KEY here too if desired,
124
- # but the LLM init should have caught it.
125
  return _agent_executor_instance
126
 
127
- # --- Example Usage (for local testing) ---
128
  if __name__ == "__main__":
129
  if not settings.OPENAI_API_KEY:
130
- print("๐Ÿšจ Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
131
  else:
132
  print("\n๐Ÿš€ Quantum Health Navigator (OpenAI Agent Test Console) ๐Ÿš€")
133
- print("-----------------------------------------------------------")
134
- # ... (rest of the __main__ block from the previous agent.py, it should work.
135
- # The invoke payload will still need "input", "chat_history", and "patient_context") ...
136
- try:
137
- test_executor = get_agent_executor()
138
- except ValueError as e_init:
139
- print(f"โš ๏ธ Agent initialization failed: {e_init}")
140
- exit()
141
-
142
- current_chat_history_for_test_run = []
143
- test_patient_context_summary_str = (
144
- "Age: 70; Gender: Male; Chief Complaint: Shortness of breath on exertion; "
145
- "Key Medical History: COPD, Atrial Fibrillation; "
146
- "Current Medications: Tiotropium inhaler, Apixaban 5mg BID; Allergies: Penicillin."
147
- )
148
- print(f"โ„น๏ธ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
149
-
150
  while True:
151
- user_input_str = input("๐Ÿ‘ค You: ").strip()
152
- if user_input_str.lower() in ["exit", "quit"]:
153
- print("๐Ÿ‘‹ Exiting.")
154
- break
155
- if not user_input_str:
156
- continue
157
  try:
158
- app_logger.info(f"__main__ test (OpenAI): Invoking with: '{user_input_str}'")
159
- # Keys for invoke: "input", "chat_history", and any other variables in your prompt (like "patient_context")
160
- response_dict = test_executor.invoke({
161
- "input": user_input_str,
162
- "chat_history": current_chat_history_for_test_run, # List of BaseMessage
163
- "patient_context": test_patient_context_summary_str
164
- })
165
- ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
166
- print(f"๐Ÿค– Agent: {ai_output_str}")
167
- current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
168
- current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
169
- if len(current_chat_history_for_test_run) > 10:
170
- current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
171
- except Exception as e:
172
- print(f"โš ๏ธ Error during agent invocation: {e}")
173
- app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e}", exc_info=True)
 
1
  # /home/user/app/agent.py
2
  import os
3
+ from langchain_openai import ChatOpenAI
4
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
5
 
6
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
+ from langchain_core.messages import AIMessage, HumanMessage # SystemMessage not explicitly needed in prompt list for this agent type
8
 
 
9
  from tools import (
10
  BioPortalLookupTool,
11
  UMLSLookupTool,
12
  QuantumTreatmentOptimizerTool,
13
+ # QuantumOptimizerInput, # Schemas are primarily used by tools themselves
14
+ # GeminiTool, # Not using in this OpenAI-centric agent
15
  )
16
 
17
+ from config.settings import settings
18
  from services.logger import app_logger
19
 
 
20
  llm = None
21
  try:
22
  if not settings.OPENAI_API_KEY:
23
+ app_logger.error("CRITICAL: OPENAI_API_KEY not found. Agent cannot initialize.")
24
+ raise ValueError("OpenAI API Key not configured.")
 
 
 
 
25
  llm = ChatOpenAI(
26
+ model_name="gpt-4-turbo-preview", # More capable model for better tool use
27
+ # model_name="gpt-3.5-turbo-0125", # More cost-effective alternative
28
+ temperature=0.1, # Low temperature for more deterministic tool calls
29
  openai_api_key=settings.OPENAI_API_KEY
30
  )
31
  app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
 
32
  except Exception as e:
33
  detailed_error_message = str(e)
34
+ user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}."
35
  if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
36
+ user_facing_error = "OpenAI LLM initialization failed: API key issue. Check HF Secrets."
37
+ app_logger.error(user_facing_error, exc_info=True)
 
 
38
  raise ValueError(user_facing_error)
39
 
 
 
40
  tools_list = [
41
  UMLSLookupTool(),
42
  BioPortalLookupTool(),
 
44
  ]
45
  app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
46
 
 
 
 
 
 
 
 
 
 
47
  OPENAI_SYSTEM_PROMPT_TEXT = (
48
  "You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
49
  "Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
50
+ "You have access to a set of tools (names: {tool_names}) detailed here: {tools}. Use them when appropriate.\n"
51
  "Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
52
  "Patient Context for this session (if provided by the user earlier): {patient_context}\n"
53
+ "When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}. "
54
+ "For `bioportal_lookup`, if the user doesn't specify an ontology, you may ask or default to 'SNOMEDCT_US'.\n"
55
  "Always be clear and concise. Cite tools if their output forms a key part of your answer."
56
  )
57
 
 
 
58
  prompt = ChatPromptTemplate.from_messages([
59
  ("system", OPENAI_SYSTEM_PROMPT_TEXT),
60
  MessagesPlaceholder(variable_name="chat_history"),
61
  ("human", "{input}"),
62
+ MessagesPlaceholder(variable_name="agent_scratchpad")
63
  ])
64
  app_logger.info("Agent prompt template created for OpenAI Functions agent.")
65
 
 
66
  if llm is None:
67
+ app_logger.critical("LLM object is None at agent creation (OpenAI). Cannot proceed.")
68
+ raise SystemExit("Agent LLM failed to initialize.")
 
69
  try:
70
  agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
71
  app_logger.info("OpenAI Functions agent created successfully.")
 
73
  app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
74
  raise ValueError(f"OpenAI agent creation failed: {e}")
75
 
 
 
76
  agent_executor = AgentExecutor(
77
  agent=agent,
78
  tools=tools_list,
79
  verbose=True,
80
+ handle_parsing_errors=True,
81
+ max_iterations=7, # Adjusted max_iterations
82
+ # return_intermediate_steps=True, # Enable for deep debugging
83
  )
84
  app_logger.info("AgentExecutor with OpenAI agent created successfully.")
85
 
 
 
86
  _agent_executor_instance = agent_executor
 
87
  def get_agent_executor():
 
 
 
 
88
  global _agent_executor_instance
89
  if _agent_executor_instance is None:
90
+ app_logger.critical("CRITICAL: Agent executor is None (OpenAI).")
91
+ raise RuntimeError("Agent executor (OpenAI) not initialized.")
 
 
92
  return _agent_executor_instance
93
 
 
94
  if __name__ == "__main__":
95
  if not settings.OPENAI_API_KEY:
96
+ print("๐Ÿšจ Set OPENAI_API_KEY in .env or environment.")
97
  else:
98
  print("\n๐Ÿš€ Quantum Health Navigator (OpenAI Agent Test Console) ๐Ÿš€")
99
+ # ... (The __main__ test block from the last full OpenAI agent.py version) ...
100
+ try: test_executor = get_agent_executor()
101
+ except ValueError as e_init: print(f"โš ๏ธ Agent init failed: {e_init}"); exit()
102
+ history = []
103
+ context = "Age: 50; Gender: Male; History: Hypertension."
104
+ print(f"โ„น๏ธ Simulated Context: {context}\n")
 
 
 
 
 
 
 
 
 
 
 
105
  while True:
106
+ usr_in = input("๐Ÿ‘ค You: ").strip()
107
+ if usr_in.lower() in ["exit", "quit"]: print("๐Ÿ‘‹ Exiting."); break
108
+ if not usr_in: continue
 
 
 
109
  try:
110
+ res = test_executor.invoke({"input": usr_in, "chat_history": history, "patient_context": context})
111
+ ai_out = res.get('output', "No output.")
112
+ print(f"๐Ÿค– Agent: {ai_out}")
113
+ history.extend([HumanMessage(content=usr_in), AIMessage(content=ai_out)])
114
+ if len(history) > 8: history = history[-8:]
115
+ except Exception as e_invoke: print(f"โš ๏ธ Invoke Error: {e_invoke}")