mgbam commited on
Commit
216ab9a
Β·
verified Β·
1 Parent(s): 5c5120f

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +104 -62
agent.py CHANGED
@@ -2,7 +2,7 @@
2
  import os
3
  from langchain_google_genai import ChatGoogleGenerativeAI
4
  from langchain.agents import AgentExecutor, create_structured_chat_agent
5
- # from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety
6
 
7
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
@@ -13,47 +13,80 @@ from tools import (
13
  BioPortalLookupTool,
14
  UMLSLookupTool,
15
  QuantumTreatmentOptimizerTool,
16
- # QuantumOptimizerInput, # Only if needed for type hints directly in this file
17
- # GeminiTool, # Uncomment and add to __all__ in tools/__init__.py if you decide to use it
18
  )
19
 
20
- from config.settings import settings
21
  from services.logger import app_logger
22
 
23
  # --- Initialize LLM (Gemini) ---
 
 
24
  try:
25
- if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
26
- # This check is crucial. If no key, LLM init will fail.
27
- app_logger.error("CRITICAL: GOOGLE_API_KEY (for Gemini) not found in settings or environment. Agent cannot initialize.")
28
- raise ValueError("GOOGLE_API_KEY (for Gemini) not configured.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  llm = ChatGoogleGenerativeAI(
31
- model="gemini-1.5-pro-latest", # Or "gemini-pro"
32
- temperature=0.2, # Lower temperature for more deterministic tool use
33
- # google_api_key=settings.GEMINI_API_KEY, # Explicitly pass if GOOGLE_API_KEY env var might not be picked up
34
- convert_system_message_to_human=True, # Can help with models that don't strictly follow system role
35
- # safety_settings={...} # Optional safety settings
 
 
 
 
 
36
  )
37
- app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully for agent.")
 
38
  except Exception as e:
39
- app_logger.error(f"Failed to initialize ChatGoogleGenerativeAI for agent: {e}", exc_info=True)
40
- # This error needs to be propagated so get_agent_executor fails clearly
41
- raise ValueError(f"Gemini LLM initialization failed: {e}. Check API key and configurations in HF Secrets.")
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
 
44
  # --- Initialize Tools List ---
45
- # The tool instances are created here. Their internal logic (like API calls)
46
- # will be executed when the agent calls their .run() or ._run() method.
47
  tools_list = [
48
  UMLSLookupTool(),
49
  BioPortalLookupTool(),
50
  QuantumTreatmentOptimizerTool(),
51
- # GeminiTool(), # Add if using
52
  ]
53
  app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
54
 
55
 
56
- # --- Agent Prompt (Adapted for Structured Chat with Gemini and your tools) ---
57
  SYSTEM_PROMPT_TEMPLATE = (
58
  "You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
59
  "Your primary goal is to provide accurate information and insights based on user queries and available tools. "
@@ -63,30 +96,25 @@ SYSTEM_PROMPT_TEMPLATE = (
63
  "unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
64
  "2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
65
  "You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
66
- "3. Tool Usage: You have access to the following tools:\n{tools}\n" # {tools} is filled by the agent with tool names and descriptions
67
- " To use a tool, respond with a JSON markdown code block with the 'action' and 'action_input' keys. "
68
- " The 'action_input' should match the schema for the specified tool. Examples:\n"
69
  " For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
70
  " For `bioportal_lookup`: ```json\n{{\"action\": \"bioportal_lookup\", \"action_input\": {{\"term\": \"diabetes mellitus\", \"ontology\": \"SNOMEDCT\"}}}}\n```\n"
71
- " For `quantum_treatment_optimizer`: ```json\n{{\"action\": \"quantum_treatment_optimizer\", \"action_input\": {{\"patient_data\": {{\"age\": 55, \"gender\": \"Male\"}}, \"current_treatments\": [\"metformin\"], \"conditions\": [\"Type 2 Diabetes\"]}}}}\n```\n"
72
  " Ensure the `action_input` for `quantum_treatment_optimizer` includes a `patient_data` dictionary populated from the overall {patient_context}.\n"
73
- "4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive answer. Cite the tool if you used one (e.g., 'According to UMLS Lookup...').\n"
74
  "5. Specific Tool Guidance:\n"
75
  " - If asked about treatment optimization for a specific patient (especially if patient context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
76
  " - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
77
- # " - If the query is very general, complex, or creative beyond simple lookups, you might consider using `google_gemini_chat` (if enabled as a tool) or answering directly if confident.\n" # If GeminiTool is used
78
  "6. Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n"
79
  "Begin!\n\n"
80
  "Previous conversation history:\n"
81
  "{chat_history}\n\n"
82
  "New human question: {input}\n"
83
- "{agent_scratchpad}" # Placeholder for agent's thoughts and tool outputs
84
  )
85
 
86
- # Create the prompt template
87
- # The input_variables are what agent_executor.invoke expects, plus what create_structured_chat_agent adds.
88
- # create_structured_chat_agent uses 'tools' and 'tool_names' internally when formatting the prompt for the LLM.
89
- # The primary inputs we pass to invoke are 'input', 'chat_history', and 'patient_context'.
90
  prompt = ChatPromptTemplate.from_messages([
91
  ("system", SYSTEM_PROMPT_TEMPLATE),
92
  MessagesPlaceholder(variable_name="agent_scratchpad"),
@@ -94,9 +122,16 @@ prompt = ChatPromptTemplate.from_messages([
94
  app_logger.info("Agent prompt template created for Gemini structured chat agent.")
95
 
96
  # --- Create Agent ---
 
 
 
 
 
 
 
 
 
97
  try:
98
- # create_structured_chat_agent is suitable for LLMs that can follow instructions
99
- # to produce structured output (like JSON for tool calls) when prompted.
100
  agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
101
  app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
102
  except Exception as e:
@@ -108,31 +143,39 @@ except Exception as e:
108
  agent_executor = AgentExecutor(
109
  agent=agent,
110
  tools=tools_list,
111
- verbose=True, # Essential for debugging tool usage
112
- handle_parsing_errors=True, # Gracefully handle if LLM output for tool call isn't perfect JSON
113
- max_iterations=10, # Prevents overly long or runaway chains
114
- # return_intermediate_steps=True, # Set to True to get thoughts/actions in the response dict
115
- early_stopping_method="generate", # Sensible default
116
  )
117
  app_logger.info("AgentExecutor with Gemini agent created successfully.")
118
 
119
 
120
  # --- Getter Function for Streamlit App ---
 
 
121
  def get_agent_executor():
122
  """
123
  Returns the configured agent executor for Gemini.
124
- Initialization of LLM, tools, agent, and executor happens when this module is imported.
125
  """
126
- # A final check for API key availability, though LLM initialization should have caught it.
127
- if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
128
- app_logger.critical("CRITICAL: GOOGLE_API_KEY (for Gemini) is not available when get_agent_executor is called. This indicates an earlier init failure or misconfiguration.")
129
- raise ValueError("Google API Key for Gemini not configured. Agent cannot function.")
130
- return agent_executor
 
 
 
 
131
 
132
  # --- Example Usage (for local testing of this agent.py file) ---
133
  if __name__ == "__main__":
134
- if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")):
135
- print("🚨 Please set your GOOGLE_API_KEY in .env file or as an environment variable to run the test.")
 
 
136
  else:
137
  print("\nπŸš€ Quantum Health Navigator (Gemini Agent Test Console) πŸš€")
138
  print("-----------------------------------------------------------")
@@ -140,31 +183,32 @@ if __name__ == "__main__":
140
  print("Example topics: medical definitions, treatment optimization (will use simulated patient context).")
141
  print("-" * 59)
142
 
143
- test_executor = get_agent_executor() # Get the globally defined executor
144
- current_chat_history_for_test_run = [] # List of HumanMessage, AIMessage
 
 
 
 
 
 
145
 
146
- # Simulated patient context for testing the {patient_context} variable
147
  test_patient_context_summary_str = (
148
- "Age: 62; Gender: Female; Chief Complaint: Fatigue and increased thirst; "
149
- "Key Medical History: Obesity, family history of diabetes; "
150
- "Current Medications: None reported; Allergies: Sulfa drugs."
151
  )
152
  print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
153
 
154
-
155
  while True:
156
- user_input_str = input("πŸ‘€ You: ")
157
  if user_input_str.lower() in ["exit", "quit"]:
158
  print("πŸ‘‹ Exiting test console.")
159
  break
160
-
161
- if not user_input_str.strip():
162
  continue
163
 
164
  try:
165
  app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
166
- # These are the keys expected by the prompt template
167
- # and processed by create_structured_chat_agent
168
  response_dict = test_executor.invoke({
169
  "input": user_input_str,
170
  "chat_history": current_chat_history_for_test_run,
@@ -174,12 +218,10 @@ if __name__ == "__main__":
174
  ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
175
  print(f"πŸ€– Agent: {ai_output_str}")
176
 
177
- # Update history for the next turn
178
  current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
179
  current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
180
 
181
- # Optional: Limit history length
182
- if len(current_chat_history_for_test_run) > 10: # Keep last 5 pairs
183
  current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
184
 
185
  except Exception as e:
 
2
  import os
3
  from langchain_google_genai import ChatGoogleGenerativeAI
4
  from langchain.agents import AgentExecutor, create_structured_chat_agent
5
+ # from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety settings
6
 
7
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
8
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
 
13
  BioPortalLookupTool,
14
  UMLSLookupTool,
15
  QuantumTreatmentOptimizerTool,
16
+ # QuantumOptimizerInput, # Only if needed for type hints directly in this file for some reason
17
+ # GeminiTool, # Assuming not used for now as main LLM is Gemini
18
  )
19
 
20
+ from config.settings import settings # This loads your HF secrets into the settings object
21
  from services.logger import app_logger
22
 
23
  # --- Initialize LLM (Gemini) ---
24
+ # This block is critical for ensuring the API key is used.
25
+ llm = None # Initialize to None in case of failure
26
  try:
27
+ # Prioritize the API key from settings (loaded from HF Secrets)
28
+ # settings.GEMINI_API_KEY should be populated by Pydantic BaseSettings from the HF Secret
29
+ gemini_api_key_from_settings = settings.GEMINI_API_KEY
30
+
31
+ # Fallback to environment variable GOOGLE_API_KEY if settings.GEMINI_API_KEY is not found/set
32
+ # (though ideally, settings.GEMINI_API_KEY should be the primary source via HF Secrets)
33
+ api_key_to_use = gemini_api_key_from_settings or os.getenv("GOOGLE_API_KEY")
34
+
35
+ if not api_key_to_use:
36
+ app_logger.error(
37
+ "CRITICAL: Gemini API Key not found. "
38
+ "Ensure GEMINI_API_KEY is set in Hugging Face Space secrets and loaded into settings, "
39
+ "or GOOGLE_API_KEY is set as an environment variable."
40
+ )
41
+ raise ValueError(
42
+ "Gemini API Key not configured. Please set it in Hugging Face Space secrets "
43
+ "as GEMINI_API_KEY or ensure GOOGLE_API_KEY environment variable is available."
44
+ )
45
 
46
  llm = ChatGoogleGenerativeAI(
47
+ model="gemini-1.5-pro-latest", # Using a more capable model
48
+ # model="gemini-pro", # Fallback if 1.5-pro is not available or for cost reasons
49
+ temperature=0.2,
50
+ google_api_key=api_key_to_use, # *** EXPLICITLY PASS THE KEY HERE ***
51
+ convert_system_message_to_human=True, # Often useful for non-OpenAI models
52
+ # Example safety settings (optional, adjust as needed)
53
+ # safety_settings={
54
+ # HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
55
+ # HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
56
+ # }
57
  )
58
+ app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully using provided API key.")
59
+
60
  except Exception as e:
61
+ # This broad exception catch is to provide a clear error message if LLM init fails for any reason.
62
+ detailed_error_message = str(e)
63
+ user_facing_error = f"Gemini LLM initialization failed: {detailed_error_message}. " \
64
+ "Check API key validity, model name, and configurations in Hugging Face Secrets."
65
+
66
+ if "default credentials were not found" in detailed_error_message.lower() or \
67
+ "could not find default credentials" in detailed_error_message.lower() or \
68
+ "api_key" in detailed_error_message.lower(): # Catch common API key related messages
69
+ user_facing_error = "Gemini LLM initialization failed: API key issue or missing credentials. " \
70
+ "Ensure GEMINI_API_KEY is correctly set in Hugging Face Secrets and is valid."
71
+ app_logger.error(user_facing_error + f" Original error details: {detailed_error_message}", exc_info=False)
72
+ else:
73
+ app_logger.error(user_facing_error, exc_info=True) # Log full traceback for other errors
74
+
75
+ # Re-raise to stop agent setup if LLM fails. This will be caught in get_agent_executor.
76
+ raise ValueError(user_facing_error)
77
 
78
 
79
  # --- Initialize Tools List ---
 
 
80
  tools_list = [
81
  UMLSLookupTool(),
82
  BioPortalLookupTool(),
83
  QuantumTreatmentOptimizerTool(),
84
+ # GeminiTool(), # Add if you have a specific reason to use Gemini as a sub-tool
85
  ]
86
  app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
87
 
88
 
89
+ # --- Agent Prompt (for Structured Chat with Gemini) ---
90
  SYSTEM_PROMPT_TEMPLATE = (
91
  "You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
92
  "Your primary goal is to provide accurate information and insights based on user queries and available tools. "
 
96
  "unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
97
  "2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
98
  "You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
99
+ "3. Tool Usage: You have access to the following tools:\n{tools}\n" # {tools} is filled by the agent
100
+ " To use a tool, respond *only* with a JSON markdown code block with 'action' and 'action_input' keys. "
101
+ " The 'action_input' must match the schema for the specified tool. Examples:\n"
102
  " For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
103
  " For `bioportal_lookup`: ```json\n{{\"action\": \"bioportal_lookup\", \"action_input\": {{\"term\": \"diabetes mellitus\", \"ontology\": \"SNOMEDCT\"}}}}\n```\n"
104
+ " For `quantum_treatment_optimizer`: ```json\n{{\"action\": \"quantum_treatment_optimizer\", \"action_input\": {{\"patient_data\": {{\"age\": 55, \"gender\": \"Male\", \"symptoms\": [\"chest pain\"]}}, \"current_treatments\": [\"metformin\"], \"conditions\": [\"Type 2 Diabetes\"]}}}}\n```\n"
105
  " Ensure the `action_input` for `quantum_treatment_optimizer` includes a `patient_data` dictionary populated from the overall {patient_context}.\n"
106
+ "4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive final answer to the human. Cite the tool if you used one (e.g., 'According to UMLS Lookup...'). Do not output a tool call again unless necessary for a multi-step process.\n"
107
  "5. Specific Tool Guidance:\n"
108
  " - If asked about treatment optimization for a specific patient (especially if patient context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
109
  " - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
 
110
  "6. Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n"
111
  "Begin!\n\n"
112
  "Previous conversation history:\n"
113
  "{chat_history}\n\n"
114
  "New human question: {input}\n"
115
+ "{agent_scratchpad}" # Placeholder for agent's internal thoughts, tool calls, and tool observations
116
  )
117
 
 
 
 
 
118
  prompt = ChatPromptTemplate.from_messages([
119
  ("system", SYSTEM_PROMPT_TEMPLATE),
120
  MessagesPlaceholder(variable_name="agent_scratchpad"),
 
122
  app_logger.info("Agent prompt template created for Gemini structured chat agent.")
123
 
124
  # --- Create Agent ---
125
+ # This assumes `llm` was successfully initialized above.
126
+ if llm is None:
127
+ # This case should ideally not be reached if the ValueError was raised during LLM init,
128
+ # but as a defensive measure:
129
+ app_logger.critical("LLM object is None at agent creation stage. Cannot proceed.")
130
+ # The ValueError from LLM init should have already stopped the module loading.
131
+ # If somehow execution reaches here with llm=None, something is very wrong.
132
+ raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
133
+
134
  try:
 
 
135
  agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
136
  app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
137
  except Exception as e:
 
143
  agent_executor = AgentExecutor(
144
  agent=agent,
145
  tools=tools_list,
146
+ verbose=True,
147
+ handle_parsing_errors=True,
148
+ max_iterations=10,
149
+ early_stopping_method="generate",
150
+ # return_intermediate_steps=True, # Good for debugging, makes response a dict with 'intermediate_steps'
151
  )
152
  app_logger.info("AgentExecutor with Gemini agent created successfully.")
153
 
154
 
155
  # --- Getter Function for Streamlit App ---
156
+ _agent_executor_instance = agent_executor # Store the initialized executor
157
+
158
  def get_agent_executor():
159
  """
160
  Returns the configured agent executor for Gemini.
161
+ The executor is initialized when this module is first imported.
162
  """
163
+ global _agent_executor_instance
164
+ if _agent_executor_instance is None:
165
+ # This should not happen if module initialization was successful.
166
+ # It might indicate an issue where the module is reloaded or init failed silently.
167
+ app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called. Re-initialization attempt or fundamental error.")
168
+ # You could try to re-initialize here, but it's better to ensure init works on first load.
169
+ # For now, raise an error to make it obvious.
170
+ raise RuntimeError("Agent executor was not properly initialized. Check application startup logs.")
171
+ return _agent_executor_instance
172
 
173
  # --- Example Usage (for local testing of this agent.py file) ---
174
  if __name__ == "__main__":
175
+ # Check if the API key is available for the test
176
+ main_test_api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
177
+ if not main_test_api_key:
178
+ print("🚨 Please set your GOOGLE_API_KEY (for Gemini) in .env file or as an environment variable to run the test.")
179
  else:
180
  print("\nπŸš€ Quantum Health Navigator (Gemini Agent Test Console) πŸš€")
181
  print("-----------------------------------------------------------")
 
183
  print("Example topics: medical definitions, treatment optimization (will use simulated patient context).")
184
  print("-" * 59)
185
 
186
+ try:
187
+ test_executor = get_agent_executor() # Get the executor
188
+ except ValueError as e_init:
189
+ print(f"⚠️ Agent initialization failed during test startup: {e_init}")
190
+ print("Ensure your API key is correctly configured.")
191
+ exit() # Exit if agent can't be initialized
192
+
193
+ current_chat_history_for_test_run = []
194
 
 
195
  test_patient_context_summary_str = (
196
+ "Age: 58; Gender: Female; Chief Complaint: Recent onset of blurry vision and fatigue; "
197
+ "Key Medical History: Prediabetes, Mild dyslipidemia; "
198
+ "Current Medications: None; Allergies: None known."
199
  )
200
  print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
201
 
 
202
  while True:
203
+ user_input_str = input("πŸ‘€ You: ").strip()
204
  if user_input_str.lower() in ["exit", "quit"]:
205
  print("πŸ‘‹ Exiting test console.")
206
  break
207
+ if not user_input_str:
 
208
  continue
209
 
210
  try:
211
  app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
 
 
212
  response_dict = test_executor.invoke({
213
  "input": user_input_str,
214
  "chat_history": current_chat_history_for_test_run,
 
218
  ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
219
  print(f"πŸ€– Agent: {ai_output_str}")
220
 
 
221
  current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
222
  current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
223
 
224
+ if len(current_chat_history_for_test_run) > 10:
 
225
  current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
226
 
227
  except Exception as e: