Update agent.py
Browse files
agent.py
CHANGED
@@ -21,15 +21,9 @@ from config.settings import settings # This loads your HF secrets into the setti
|
|
21 |
from services.logger import app_logger
|
22 |
|
23 |
# --- Initialize LLM (Gemini) ---
|
24 |
-
# This block is critical for ensuring the API key is used.
|
25 |
llm = None # Initialize to None in case of failure
|
26 |
try:
|
27 |
-
# Prioritize the API key from settings (loaded from HF Secrets)
|
28 |
-
# settings.GEMINI_API_KEY should be populated by Pydantic BaseSettings from the HF Secret
|
29 |
gemini_api_key_from_settings = settings.GEMINI_API_KEY
|
30 |
-
|
31 |
-
# Fallback to environment variable GOOGLE_API_KEY if settings.GEMINI_API_KEY is not found/set
|
32 |
-
# (though ideally, settings.GEMINI_API_KEY should be the primary source via HF Secrets)
|
33 |
api_key_to_use = gemini_api_key_from_settings or os.getenv("GOOGLE_API_KEY")
|
34 |
|
35 |
if not api_key_to_use:
|
@@ -44,35 +38,25 @@ try:
|
|
44 |
)
|
45 |
|
46 |
llm = ChatGoogleGenerativeAI(
|
47 |
-
model="gemini-1.5-pro-latest",
|
48 |
-
# model="gemini-pro", # Fallback if 1.5-pro is not available or for cost reasons
|
49 |
temperature=0.2,
|
50 |
-
google_api_key=api_key_to_use,
|
51 |
-
convert_system_message_to_human=True,
|
52 |
-
# Example safety settings (optional, adjust as needed)
|
53 |
-
# safety_settings={
|
54 |
-
# HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
|
55 |
-
# HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
56 |
-
# }
|
57 |
)
|
58 |
app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully using provided API key.")
|
59 |
|
60 |
except Exception as e:
|
61 |
-
# This broad exception catch is to provide a clear error message if LLM init fails for any reason.
|
62 |
detailed_error_message = str(e)
|
63 |
user_facing_error = f"Gemini LLM initialization failed: {detailed_error_message}. " \
|
64 |
"Check API key validity, model name, and configurations in Hugging Face Secrets."
|
65 |
-
|
66 |
if "default credentials were not found" in detailed_error_message.lower() or \
|
67 |
"could not find default credentials" in detailed_error_message.lower() or \
|
68 |
-
"api_key" in detailed_error_message.lower():
|
69 |
user_facing_error = "Gemini LLM initialization failed: API key issue or missing credentials. " \
|
70 |
"Ensure GEMINI_API_KEY is correctly set in Hugging Face Secrets and is valid."
|
71 |
app_logger.error(user_facing_error + f" Original error details: {detailed_error_message}", exc_info=False)
|
72 |
else:
|
73 |
-
app_logger.error(user_facing_error, exc_info=True)
|
74 |
-
|
75 |
-
# Re-raise to stop agent setup if LLM fails. This will be caught in get_agent_executor.
|
76 |
raise ValueError(user_facing_error)
|
77 |
|
78 |
|
@@ -81,12 +65,11 @@ tools_list = [
|
|
81 |
UMLSLookupTool(),
|
82 |
BioPortalLookupTool(),
|
83 |
QuantumTreatmentOptimizerTool(),
|
84 |
-
# GeminiTool(), # Add if you have a specific reason to use Gemini as a sub-tool
|
85 |
]
|
86 |
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
87 |
|
88 |
|
89 |
-
# --- Agent Prompt (for Structured Chat with Gemini) ---
|
90 |
SYSTEM_PROMPT_TEMPLATE = (
|
91 |
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
|
92 |
"Your primary goal is to provide accurate information and insights based on user queries and available tools. "
|
@@ -96,7 +79,7 @@ SYSTEM_PROMPT_TEMPLATE = (
|
|
96 |
"unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
|
97 |
"2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
|
98 |
"You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
|
99 |
-
"3. Tool Usage: You have access to the following tools:\n{tools}\n"
|
100 |
" To use a tool, respond *only* with a JSON markdown code block with 'action' and 'action_input' keys. "
|
101 |
" The 'action_input' must match the schema for the specified tool. Examples:\n"
|
102 |
" For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
|
@@ -112,7 +95,7 @@ SYSTEM_PROMPT_TEMPLATE = (
|
|
112 |
"Previous conversation history:\n"
|
113 |
"{chat_history}\n\n"
|
114 |
"New human question: {input}\n"
|
115 |
-
"{agent_scratchpad}"
|
116 |
)
|
117 |
|
118 |
prompt = ChatPromptTemplate.from_messages([
|
@@ -122,22 +105,18 @@ prompt = ChatPromptTemplate.from_messages([
|
|
122 |
app_logger.info("Agent prompt template created for Gemini structured chat agent.")
|
123 |
|
124 |
# --- Create Agent ---
|
125 |
-
# This assumes `llm` was successfully initialized above.
|
126 |
if llm is None:
|
127 |
-
# This case should ideally not be reached if the ValueError was raised during LLM init,
|
128 |
-
# but as a defensive measure:
|
129 |
app_logger.critical("LLM object is None at agent creation stage. Cannot proceed.")
|
130 |
-
# The ValueError from LLM init should have already stopped the module loading.
|
131 |
-
# If somehow execution reaches here with llm=None, something is very wrong.
|
132 |
raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
|
133 |
|
134 |
try:
|
135 |
agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
|
136 |
app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
|
137 |
except Exception as e:
|
|
|
|
|
138 |
app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
|
139 |
-
raise ValueError(f"Gemini agent creation failed: {e}")
|
140 |
-
|
141 |
|
142 |
# --- Create Agent Executor ---
|
143 |
agent_executor = AgentExecutor(
|
@@ -147,55 +126,40 @@ agent_executor = AgentExecutor(
|
|
147 |
handle_parsing_errors=True,
|
148 |
max_iterations=10,
|
149 |
early_stopping_method="generate",
|
150 |
-
# return_intermediate_steps=True, # Good for debugging, makes response a dict with 'intermediate_steps'
|
151 |
)
|
152 |
app_logger.info("AgentExecutor with Gemini agent created successfully.")
|
153 |
|
154 |
-
|
155 |
# --- Getter Function for Streamlit App ---
|
156 |
-
_agent_executor_instance = agent_executor
|
157 |
|
158 |
def get_agent_executor():
|
159 |
-
"""
|
160 |
-
Returns the configured agent executor for Gemini.
|
161 |
-
The executor is initialized when this module is first imported.
|
162 |
-
"""
|
163 |
global _agent_executor_instance
|
164 |
if _agent_executor_instance is None:
|
165 |
-
|
166 |
-
# It might indicate an issue where the module is reloaded or init failed silently.
|
167 |
-
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called. Re-initialization attempt or fundamental error.")
|
168 |
-
# You could try to re-initialize here, but it's better to ensure init works on first load.
|
169 |
-
# For now, raise an error to make it obvious.
|
170 |
raise RuntimeError("Agent executor was not properly initialized. Check application startup logs.")
|
171 |
return _agent_executor_instance
|
172 |
|
173 |
-
# --- Example Usage (for local testing
|
174 |
if __name__ == "__main__":
|
175 |
-
# Check if the API key is available for the test
|
176 |
main_test_api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
|
177 |
if not main_test_api_key:
|
178 |
print("🚨 Please set your GOOGLE_API_KEY (for Gemini) in .env file or as an environment variable to run the test.")
|
179 |
else:
|
180 |
print("\n🚀 Quantum Health Navigator (Gemini Agent Test Console) 🚀")
|
181 |
print("-----------------------------------------------------------")
|
182 |
-
|
183 |
-
print("Example topics: medical definitions, treatment optimization (will use simulated patient context).")
|
184 |
-
print("-" * 59)
|
185 |
-
|
186 |
try:
|
187 |
-
test_executor = get_agent_executor()
|
188 |
except ValueError as e_init:
|
189 |
print(f"⚠️ Agent initialization failed during test startup: {e_init}")
|
190 |
print("Ensure your API key is correctly configured.")
|
191 |
-
exit()
|
192 |
|
193 |
current_chat_history_for_test_run = []
|
194 |
-
|
195 |
test_patient_context_summary_str = (
|
196 |
-
"Age:
|
197 |
"Key Medical History: Prediabetes, Mild dyslipidemia; "
|
198 |
-
"Current Medications: None; Allergies:
|
199 |
)
|
200 |
print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
|
201 |
|
@@ -206,7 +170,6 @@ if __name__ == "__main__":
|
|
206 |
break
|
207 |
if not user_input_str:
|
208 |
continue
|
209 |
-
|
210 |
try:
|
211 |
app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
|
212 |
response_dict = test_executor.invoke({
|
@@ -214,16 +177,12 @@ if __name__ == "__main__":
|
|
214 |
"chat_history": current_chat_history_for_test_run,
|
215 |
"patient_context": test_patient_context_summary_str
|
216 |
})
|
217 |
-
|
218 |
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
|
219 |
print(f"🤖 Agent: {ai_output_str}")
|
220 |
-
|
221 |
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
|
222 |
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
|
223 |
-
|
224 |
if len(current_chat_history_for_test_run) > 10:
|
225 |
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
|
226 |
-
|
227 |
except Exception as e:
|
228 |
print(f"⚠️ Error during agent invocation: {e}")
|
229 |
app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True)
|
|
|
21 |
from services.logger import app_logger
|
22 |
|
23 |
# --- Initialize LLM (Gemini) ---
|
|
|
24 |
llm = None # Initialize to None in case of failure
|
25 |
try:
|
|
|
|
|
26 |
gemini_api_key_from_settings = settings.GEMINI_API_KEY
|
|
|
|
|
|
|
27 |
api_key_to_use = gemini_api_key_from_settings or os.getenv("GOOGLE_API_KEY")
|
28 |
|
29 |
if not api_key_to_use:
|
|
|
38 |
)
|
39 |
|
40 |
llm = ChatGoogleGenerativeAI(
|
41 |
+
model="gemini-1.5-pro-latest",
|
|
|
42 |
temperature=0.2,
|
43 |
+
google_api_key=api_key_to_use,
|
44 |
+
convert_system_message_to_human=True,
|
|
|
|
|
|
|
|
|
|
|
45 |
)
|
46 |
app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully using provided API key.")
|
47 |
|
48 |
except Exception as e:
|
|
|
49 |
detailed_error_message = str(e)
|
50 |
user_facing_error = f"Gemini LLM initialization failed: {detailed_error_message}. " \
|
51 |
"Check API key validity, model name, and configurations in Hugging Face Secrets."
|
|
|
52 |
if "default credentials were not found" in detailed_error_message.lower() or \
|
53 |
"could not find default credentials" in detailed_error_message.lower() or \
|
54 |
+
"api_key" in detailed_error_message.lower():
|
55 |
user_facing_error = "Gemini LLM initialization failed: API key issue or missing credentials. " \
|
56 |
"Ensure GEMINI_API_KEY is correctly set in Hugging Face Secrets and is valid."
|
57 |
app_logger.error(user_facing_error + f" Original error details: {detailed_error_message}", exc_info=False)
|
58 |
else:
|
59 |
+
app_logger.error(user_facing_error, exc_info=True)
|
|
|
|
|
60 |
raise ValueError(user_facing_error)
|
61 |
|
62 |
|
|
|
65 |
UMLSLookupTool(),
|
66 |
BioPortalLookupTool(),
|
67 |
QuantumTreatmentOptimizerTool(),
|
|
|
68 |
]
|
69 |
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
|
70 |
|
71 |
|
72 |
+
# --- Agent Prompt (for Structured Chat with Gemini and your tools) ---
|
73 |
SYSTEM_PROMPT_TEMPLATE = (
|
74 |
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
|
75 |
"Your primary goal is to provide accurate information and insights based on user queries and available tools. "
|
|
|
79 |
"unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
|
80 |
"2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
|
81 |
"You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
|
82 |
+
"3. Tool Usage: You have access to the following tools (names: {tool_names}):\n{tools}\n" # <--- {tool_names} ADDED HERE
|
83 |
" To use a tool, respond *only* with a JSON markdown code block with 'action' and 'action_input' keys. "
|
84 |
" The 'action_input' must match the schema for the specified tool. Examples:\n"
|
85 |
" For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
|
|
|
95 |
"Previous conversation history:\n"
|
96 |
"{chat_history}\n\n"
|
97 |
"New human question: {input}\n"
|
98 |
+
"{agent_scratchpad}"
|
99 |
)
|
100 |
|
101 |
prompt = ChatPromptTemplate.from_messages([
|
|
|
105 |
app_logger.info("Agent prompt template created for Gemini structured chat agent.")
|
106 |
|
107 |
# --- Create Agent ---
|
|
|
108 |
if llm is None:
|
|
|
|
|
109 |
app_logger.critical("LLM object is None at agent creation stage. Cannot proceed.")
|
|
|
|
|
110 |
raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
|
111 |
|
112 |
try:
|
113 |
agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
|
114 |
app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
|
115 |
except Exception as e:
|
116 |
+
# The error "Prompt missing required variables: {'tool_names'}" would be caught here
|
117 |
+
# if the placeholder wasn't correctly handled or if others are missing.
|
118 |
app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
|
119 |
+
raise ValueError(f"Gemini agent creation failed: {e}") # This is what you saw in the UI
|
|
|
120 |
|
121 |
# --- Create Agent Executor ---
|
122 |
agent_executor = AgentExecutor(
|
|
|
126 |
handle_parsing_errors=True,
|
127 |
max_iterations=10,
|
128 |
early_stopping_method="generate",
|
|
|
129 |
)
|
130 |
app_logger.info("AgentExecutor with Gemini agent created successfully.")
|
131 |
|
|
|
132 |
# --- Getter Function for Streamlit App ---
|
133 |
+
_agent_executor_instance = agent_executor
|
134 |
|
135 |
def get_agent_executor():
|
|
|
|
|
|
|
|
|
136 |
global _agent_executor_instance
|
137 |
if _agent_executor_instance is None:
|
138 |
+
app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called.")
|
|
|
|
|
|
|
|
|
139 |
raise RuntimeError("Agent executor was not properly initialized. Check application startup logs.")
|
140 |
return _agent_executor_instance
|
141 |
|
142 |
+
# --- Example Usage (for local testing) ---
|
143 |
if __name__ == "__main__":
|
|
|
144 |
main_test_api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
|
145 |
if not main_test_api_key:
|
146 |
print("🚨 Please set your GOOGLE_API_KEY (for Gemini) in .env file or as an environment variable to run the test.")
|
147 |
else:
|
148 |
print("\n🚀 Quantum Health Navigator (Gemini Agent Test Console) 🚀")
|
149 |
print("-----------------------------------------------------------")
|
150 |
+
# ... (rest of __main__ block from previous full agent.py) ...
|
|
|
|
|
|
|
151 |
try:
|
152 |
+
test_executor = get_agent_executor()
|
153 |
except ValueError as e_init:
|
154 |
print(f"⚠️ Agent initialization failed during test startup: {e_init}")
|
155 |
print("Ensure your API key is correctly configured.")
|
156 |
+
exit()
|
157 |
|
158 |
current_chat_history_for_test_run = []
|
|
|
159 |
test_patient_context_summary_str = (
|
160 |
+
"Age: 62; Gender: Female; Chief Complaint: Recent onset of blurry vision and fatigue; "
|
161 |
"Key Medical History: Prediabetes, Mild dyslipidemia; "
|
162 |
+
"Current Medications: None reported; Allergies: Sulfa drugs."
|
163 |
)
|
164 |
print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
|
165 |
|
|
|
170 |
break
|
171 |
if not user_input_str:
|
172 |
continue
|
|
|
173 |
try:
|
174 |
app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
|
175 |
response_dict = test_executor.invoke({
|
|
|
177 |
"chat_history": current_chat_history_for_test_run,
|
178 |
"patient_context": test_patient_context_summary_str
|
179 |
})
|
|
|
180 |
ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
|
181 |
print(f"🤖 Agent: {ai_output_str}")
|
|
|
182 |
current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
|
183 |
current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
|
|
|
184 |
if len(current_chat_history_for_test_run) > 10:
|
185 |
current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
|
|
|
186 |
except Exception as e:
|
187 |
print(f"⚠️ Error during agent invocation: {e}")
|
188 |
app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True)
|