mgbam commited on
Commit
93406ed
Β·
verified Β·
1 Parent(s): 1aff0c6

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +162 -154
agent.py CHANGED
@@ -1,165 +1,173 @@
 
1
  import os
2
- import sys
3
- from typing import List, Union
4
-
5
- from langchain.prompts import ChatPromptTemplate
6
- from langchain.prompts.chat import MessagesPlaceholder
7
- from langchain.schema import BaseMessage, AIMessage, HumanMessage, SystemMessage
8
- from langchain.agents import AgentExecutor, create_structured_chat_agent
9
- from langchain_google_genai import ChatGoogleGenerativeAI
10
-
11
- from config.settings import settings
 
 
 
 
 
 
12
  from services.logger import app_logger
13
- from tools import BioPortalLookupTool, UMLSLookupTool, QuantumTreatmentOptimizerTool
14
 
15
- # -----------------------------------------------------------------------------
16
- # 1. Initialize the Gemini LLM
17
- # -----------------------------------------------------------------------------
18
- def _init_llm() -> ChatGoogleGenerativeAI:
19
- """
20
- Initialize the Google Gemini LLM with the configured API key.
21
- Raises ValueError if no key is found or initialization fails.
22
- """
23
- api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
24
- if not api_key:
25
- err = "Gemini API key not found: set GEMINI_API_KEY in settings or GOOGLE_API_KEY in env"
26
- app_logger.error(err)
27
- raise ValueError(err)
28
-
29
- try:
30
- llm = ChatGoogleGenerativeAI(
31
- model="gemini-1.5-pro-latest",
32
- temperature=0.2,
33
- google_api_key=api_key,
34
- convert_system_message_to_human=True,
35
- )
36
- app_logger.info(f"Gemini LLM initialized ({llm.model})")
37
- return llm
38
- except Exception as e:
39
- err = f"Failed to initialize Gemini LLM: {e}"
40
- app_logger.error(err, exc_info=True)
41
- raise ValueError(err)
42
-
43
- # -----------------------------------------------------------------------------
44
- # 2. Build the structured chat prompt
45
- # -----------------------------------------------------------------------------
46
- def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
47
- """
48
- Construct a ChatPromptTemplate that includes:
49
- - a system instruction block,
50
- - a placeholder for chat_history (List[BaseMessage]),
51
- - the current human input,
52
- - a placeholder for agent_scratchpad (List[BaseMessage]) to manage tool calls.
53
- """
54
- system_text = (
55
- "You are Quantum Health Navigator, an AI assistant for healthcare professionals.\n\n"
56
- "β€’ Disclaim: you are an AI, not a substitute for clinical judgment.\n"
57
- "β€’ Patient context: {patient_context}\n"
58
- "β€’ Available tools: {tool_names}\n"
59
- "{tools}\n\n"
60
- "To call a tool, reply *only* with a JSON code block:\n"
61
- "{{\"action\": \"<tool_name>\", \"action_input\": <input>}}\n\n"
62
- "After you receive the tool’s output, craft a full answer for the user, citing any tools used."
63
  )
64
-
65
- return ChatPromptTemplate.from_messages([
66
- ("system", system_text),
67
- MessagesPlaceholder(variable_name="chat_history"),
68
- ("human", "{input}"),
69
- MessagesPlaceholder(variable_name="agent_scratchpad"),
70
- ])
71
-
72
- # -----------------------------------------------------------------------------
73
- # 3. Lazily build and return the AgentExecutor singleton
74
- # -----------------------------------------------------------------------------
75
- def get_agent_executor() -> AgentExecutor:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  """
77
- Returns a singleton AgentExecutor, creating it on first call.
78
- Sets up LLM, tools, prompt, and executor params.
79
  """
80
  global _agent_executor_instance
81
- if "_agent_executor_instance" not in globals():
82
- # 3.1 Initialize LLM
83
- llm = _init_llm()
84
-
85
- # 3.2 Prepare tools
86
- tools_list = [
87
- UMLSLookupTool(),
88
- BioPortalLookupTool(),
89
- QuantumTreatmentOptimizerTool(),
90
- ]
91
- app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
92
-
93
- # 3.3 Build prompt
94
- prompt = _build_prompt_template(
95
- tool_names=[t.name for t in tools_list],
96
- tools=tools_list
97
- )
98
- app_logger.info("Prompt template built")
99
-
100
- # 3.4 Create the structured agent
101
- agent = create_structured_chat_agent(
102
- llm=llm,
103
- tools=tools_list,
104
- prompt=prompt
105
- )
106
- app_logger.info("Structured chat agent created")
107
-
108
- # 3.5 Create the executor
109
- executor = AgentExecutor(
110
- agent=agent,
111
- tools=tools_list,
112
- verbose=True,
113
- handle_parsing_errors=True,
114
- max_iterations=10,
115
- early_stopping_method="generate",
116
- )
117
- app_logger.info("AgentExecutor initialized")
118
- _agent_executor_instance = executor
119
-
120
  return _agent_executor_instance
121
 
122
- # -----------------------------------------------------------------------------
123
- # 4. Optional REPL for local testing
124
- # -----------------------------------------------------------------------------
125
  if __name__ == "__main__":
126
- try:
127
- executor = get_agent_executor()
128
- except Exception as e:
129
- print(f"❌ Initialization failed: {e}")
130
- sys.exit(1)
131
-
132
- # Sample patient context for testing
133
- patient_context = (
134
- "Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
135
- "History: Prediabetes, mild dyslipidemia; Medications: None."
136
- )
137
- chat_history: List[Union[SystemMessage, HumanMessage, AIMessage]] = []
138
-
139
- print("πŸš€ Quantum Health Navigator Console (type 'exit' to quit)")
140
- while True:
141
- user_input = input("πŸ‘€ You: ").strip()
142
- if user_input.lower() in {"exit", "quit"}:
143
- print("πŸ‘‹ Goodbye!")
144
- break
145
- if not user_input:
146
- continue
147
-
148
  try:
149
- result = executor.invoke({
150
- "input": user_input,
151
- "chat_history": chat_history,
152
- "patient_context": patient_context
153
- })
154
- reply = result.get("output", "")
155
- print(f"πŸ€– Agent: {reply}\n")
156
-
157
- # Update history
158
- chat_history.append(HumanMessage(content=user_input))
159
- chat_history.append(AIMessage(content=reply))
160
- # Trim to last 20 messages
161
- if len(chat_history) > 20:
162
- chat_history = chat_history[-20:]
163
- except Exception as err:
164
- print(f"⚠️ Inference error: {err}")
165
- app_logger.error("Runtime error in REPL", exc_info=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /home/user/app/agent.py
2
  import os
3
+ from langchain_openai import ChatOpenAI # For OpenAI models
4
+ from langchain.agents import AgentExecutor, create_openai_functions_agent # Agent optimized for OpenAI function calling
5
+
6
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
+ from langchain_core.messages import AIMessage, HumanMessage # SystemMessage not always explicitly needed in prompt list for this agent
8
+
9
+ # --- Import your defined tools FROM THE 'tools' PACKAGE ---
10
+ from tools import (
11
+ BioPortalLookupTool,
12
+ UMLSLookupTool,
13
+ QuantumTreatmentOptimizerTool,
14
+ # QuantumOptimizerInput, # Import if needed for type hints directly in this file
15
+ # GeminiTool, # Not needed if primary LLM is OpenAI
16
+ )
17
+
18
+ from config.settings import settings # This loads your HF secrets into the settings object
19
  from services.logger import app_logger
 
20
 
21
+ # --- Initialize LLM (OpenAI) ---
22
+ llm = None
23
+ try:
24
+ if not settings.OPENAI_API_KEY:
25
+ app_logger.error("CRITICAL: OPENAI_API_KEY not found in settings. Agent cannot initialize.")
26
+ raise ValueError("OpenAI API Key not configured. Please set it in Hugging Face Space secrets as OPENAI_API_KEY.")
27
+
28
+ # Choose your preferred OpenAI model
29
+ # "gpt-3.5-turbo-0125" is a good balance of cost and capability for function calling.
30
+ # "gpt-4-turbo-preview" or "gpt-4" is more capable but more expensive.
31
+ llm = ChatOpenAI(
32
+ model_name="gpt-3.5-turbo-0125", # Or "gpt-4-turbo-preview"
33
+ temperature=0.2, # Lower for more predictable tool use
34
+ openai_api_key=settings.OPENAI_API_KEY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  )
36
+ app_logger.info(f"ChatOpenAI ({llm.model_name}) initialized successfully for agent.")
37
+
38
+ except Exception as e:
39
+ detailed_error_message = str(e)
40
+ user_facing_error = f"OpenAI LLM initialization failed: {detailed_error_message}. Check API key and model name."
41
+ if "api_key" in detailed_error_message.lower() or "authenticate" in detailed_error_message.lower():
42
+ user_facing_error = "OpenAI LLM initialization failed: API key issue. Ensure OPENAI_API_KEY is correctly set in Hugging Face Secrets and is valid."
43
+ app_logger.error(user_facing_error + f" Original: {detailed_error_message}", exc_info=False)
44
+ else:
45
+ app_logger.error(user_facing_error, exc_info=True)
46
+ raise ValueError(user_facing_error)
47
+
48
+
49
+ # --- Initialize Tools List ---
50
+ tools_list = [
51
+ UMLSLookupTool(),
52
+ BioPortalLookupTool(),
53
+ QuantumTreatmentOptimizerTool(),
54
+ ]
55
+ app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")
56
+
57
+
58
+ # --- Agent Prompt (for OpenAI Functions Agent) ---
59
+ # This prompt is simpler because much of the tool-calling logic is handled by
60
+ # OpenAI's function-calling mechanism, which create_openai_functions_agent leverages.
61
+ # The agent will be able to see the tool descriptions.
62
+ # We still provide system instructions and placeholders for history and input.
63
+ # The {agent_scratchpad} is crucial for OpenAI Functions agent to work correctly.
64
+ # The {patient_context} variable needs to be passed in the invoke call.
65
+
66
+ OPENAI_SYSTEM_PROMPT_TEXT = (
67
+ "You are 'Quantum Health Navigator', an AI assistant for healthcare professionals. "
68
+ "Your goal is to assist with medical information lookup, treatment optimization queries, and general medical Q&A. "
69
+ "You have access to a set of tools to help you. Use them when appropriate.\n"
70
+ "Disclaimers: Always state that you are for informational support and not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases without using the 'quantum_treatment_optimizer' tool if relevant.\n"
71
+ "Patient Context for this session (if provided by the user earlier): {patient_context}\n"
72
+ "When using the 'quantum_treatment_optimizer' tool, ensure you populate its 'patient_data' argument using the available {patient_context}.\n"
73
+ "Always be clear and concise. Cite tools if their output forms a key part of your answer."
74
+ )
75
+
76
+ # `create_openai_functions_agent` typically works well with a system message,
77
+ # chat history placeholder, human input placeholder, and agent_scratchpad placeholder.
78
+ prompt = ChatPromptTemplate.from_messages([
79
+ ("system", OPENAI_SYSTEM_PROMPT_TEXT),
80
+ MessagesPlaceholder(variable_name="chat_history"),
81
+ ("human", "{input}"),
82
+ MessagesPlaceholder(variable_name="agent_scratchpad") # Essential for OpenAI Functions agent
83
+ ])
84
+ app_logger.info("Agent prompt template created for OpenAI Functions agent.")
85
+
86
+ # --- Create Agent ---
87
+ if llm is None:
88
+ app_logger.critical("LLM object is None at agent creation stage (OpenAI). Application cannot proceed.")
89
+ raise SystemExit("Agent LLM failed to initialize. Application cannot start.")
90
+
91
+ try:
92
+ agent = create_openai_functions_agent(llm=llm, tools=tools_list, prompt=prompt)
93
+ app_logger.info("OpenAI Functions agent created successfully.")
94
+ except Exception as e:
95
+ app_logger.error(f"Failed to create OpenAI Functions agent: {e}", exc_info=True)
96
+ raise ValueError(f"OpenAI agent creation failed: {e}")
97
+
98
+
99
+ # --- Create Agent Executor ---
100
+ agent_executor = AgentExecutor(
101
+ agent=agent,
102
+ tools=tools_list,
103
+ verbose=True,
104
+ handle_parsing_errors=True, # Handles cases where LLM output for function call is malformed
105
+ max_iterations=10,
106
+ # return_intermediate_steps=True, # Useful for debugging
107
+ )
108
+ app_logger.info("AgentExecutor with OpenAI agent created successfully.")
109
+
110
+
111
+ # --- Getter Function for Streamlit App ---
112
+ _agent_executor_instance = agent_executor
113
+
114
+ def get_agent_executor():
115
  """
116
+ Returns the configured agent executor for OpenAI.
117
+ The executor is initialized when this module is first imported.
118
  """
119
  global _agent_executor_instance
120
+ if _agent_executor_instance is None:
121
+ app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called (OpenAI).")
122
+ raise RuntimeError("Agent executor (OpenAI) was not properly initialized. Check startup logs.")
123
+ # You can add a check for settings.OPENAI_API_KEY here too if desired,
124
+ # but the LLM init should have caught it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  return _agent_executor_instance
126
 
127
+ # --- Example Usage (for local testing) ---
 
 
128
  if __name__ == "__main__":
129
+ if not settings.OPENAI_API_KEY:
130
+ print("🚨 Please set your OPENAI_API_KEY in .env file or as an environment variable to run the test.")
131
+ else:
132
+ print("\nπŸš€ Quantum Health Navigator (OpenAI Agent Test Console) πŸš€")
133
+ print("-----------------------------------------------------------")
134
+ # ... (rest of the __main__ block from the previous agent.py, it should work.
135
+ # The invoke payload will still need "input", "chat_history", and "patient_context") ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  try:
137
+ test_executor = get_agent_executor()
138
+ except ValueError as e_init:
139
+ print(f"⚠️ Agent initialization failed: {e_init}")
140
+ exit()
141
+
142
+ current_chat_history_for_test_run = []
143
+ test_patient_context_summary_str = (
144
+ "Age: 70; Gender: Male; Chief Complaint: Shortness of breath on exertion; "
145
+ "Key Medical History: COPD, Atrial Fibrillation; "
146
+ "Current Medications: Tiotropium inhaler, Apixaban 5mg BID; Allergies: Penicillin."
147
+ )
148
+ print(f"ℹ️ Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")
149
+
150
+ while True:
151
+ user_input_str = input("πŸ‘€ You: ").strip()
152
+ if user_input_str.lower() in ["exit", "quit"]:
153
+ print("πŸ‘‹ Exiting.")
154
+ break
155
+ if not user_input_str:
156
+ continue
157
+ try:
158
+ app_logger.info(f"__main__ test (OpenAI): Invoking with: '{user_input_str}'")
159
+ # Keys for invoke: "input", "chat_history", and any other variables in your prompt (like "patient_context")
160
+ response_dict = test_executor.invoke({
161
+ "input": user_input_str,
162
+ "chat_history": current_chat_history_for_test_run, # List of BaseMessage
163
+ "patient_context": test_patient_context_summary_str
164
+ })
165
+ ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
166
+ print(f"πŸ€– Agent: {ai_output_str}")
167
+ current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
168
+ current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
169
+ if len(current_chat_history_for_test_run) > 10:
170
+ current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]
171
+ except Exception as e:
172
+ print(f"⚠️ Error during agent invocation: {e}")
173
+ app_logger.error(f"Error in __main__ OpenAI agent test invocation: {e}", exc_info=True)