mgbam commited on
Commit
5a1303c
Β·
verified Β·
1 Parent(s): 4a3a03e

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +50 -50
agent.py CHANGED
@@ -1,35 +1,30 @@
1
- # /home/user/app/agent.py
2
  import os
3
  import sys
4
- from typing import List
5
 
6
- from langchain.agents import AgentExecutor, create_structured_chat_agent
7
  from langchain.prompts import ChatPromptTemplate
8
  from langchain.prompts.chat import MessagesPlaceholder
9
- from langchain.messages import AIMessage, HumanMessage, SystemMessage
 
10
  from langchain_google_genai import ChatGoogleGenerativeAI
11
 
12
  from config.settings import settings
13
  from services.logger import app_logger
14
- from tools import (
15
- BioPortalLookupTool,
16
- UMLSLookupTool,
17
- QuantumTreatmentOptimizerTool,
18
- )
19
 
20
  # -----------------------------------------------------------------------------
21
- # 1. Initialize Gemini LLM
22
  # -----------------------------------------------------------------------------
23
  def _init_llm() -> ChatGoogleGenerativeAI:
24
  """
25
- Initialize the Google Gemini LLM client with the configured API key.
26
  Raises ValueError if no key is found or initialization fails.
27
  """
28
  api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
29
  if not api_key:
30
- msg = "Gemini API key missing: set GEMINI_API_KEY in settings or GOOGLE_API_KEY env var"
31
- app_logger.error(msg)
32
- raise ValueError(msg)
33
 
34
  try:
35
  llm = ChatGoogleGenerativeAI(
@@ -38,54 +33,59 @@ def _init_llm() -> ChatGoogleGenerativeAI:
38
  google_api_key=api_key,
39
  convert_system_message_to_human=True,
40
  )
41
- app_logger.info(f"Initialized Gemini LLM ({llm.model}) successfully.")
42
  return llm
43
  except Exception as e:
44
- msg = f"Failed to initialize Gemini LLM: {e}"
45
- app_logger.error(msg, exc_info=True)
46
- raise ValueError(msg)
47
 
48
  # -----------------------------------------------------------------------------
49
- # 2. Build Prompt Template
50
  # -----------------------------------------------------------------------------
51
  def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
52
  """
53
- Build a structured chat prompt template including system instructions,
54
- chat history, user input, and an agent scratchpad for intermediate tool calls.
 
 
 
55
  """
56
  system_text = """
57
  You are Quantum Health Navigator, an AI assistant for healthcare professionals.
58
- β€’ Always disclose you are an AI and not a substitute for clinical judgment.
59
- β€’ Patient context: {patient_context}
60
- β€’ Tools available: {tool_names}
 
61
  {tools}
62
 
63
- To invoke a tool, respond only with a JSON code block containing:
64
  {"action": "<tool_name>", "action_input": <input>}
65
 
66
- After the tool observation, craft a user-facing answer, citing the tool when used.
67
  """.strip()
68
 
69
  return ChatPromptTemplate.from_messages([
70
  ("system", system_text),
71
- MessagesPlaceholder(variable_name="chat_history"), # List[BaseMessage]
72
- ("human", "{input}"), # Current user input
73
- MessagesPlaceholder(variable_name="agent_scratchpad"), # List[BaseMessage]
74
  ])
75
 
76
  # -----------------------------------------------------------------------------
77
- # 3. Assemble Agent and Executor
78
  # -----------------------------------------------------------------------------
79
  def get_agent_executor() -> AgentExecutor:
80
  """
81
- Lazily initialize and return the singleton AgentExecutor.
 
82
  """
83
  global _agent_executor_instance
84
- if '_agent_executor_instance' not in globals():
85
- # Initialize LLM
86
  llm = _init_llm()
87
 
88
- # Prepare tools
89
  tools_list = [
90
  UMLSLookupTool(),
91
  BioPortalLookupTool(),
@@ -93,22 +93,22 @@ def get_agent_executor() -> AgentExecutor:
93
  ]
94
  app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
95
 
96
- # Build prompt
97
  prompt = _build_prompt_template(
98
  tool_names=[t.name for t in tools_list],
99
  tools=tools_list
100
  )
101
- app_logger.info("Prompt template built successfully.")
102
 
103
- # Create structured agent
104
  agent = create_structured_chat_agent(
105
  llm=llm,
106
  tools=tools_list,
107
  prompt=prompt
108
  )
109
- app_logger.info("Structured chat agent created.")
110
 
111
- # Create executor
112
  executor = AgentExecutor(
113
  agent=agent,
114
  tools=tools_list,
@@ -117,27 +117,27 @@ def get_agent_executor() -> AgentExecutor:
117
  max_iterations=10,
118
  early_stopping_method="generate",
119
  )
120
- app_logger.info("AgentExecutor initialized.")
121
  _agent_executor_instance = executor
122
 
123
  return _agent_executor_instance
124
 
125
  # -----------------------------------------------------------------------------
126
- # 4. If run as script, provide a simple REPL for testing
127
  # -----------------------------------------------------------------------------
128
  if __name__ == "__main__":
129
  try:
130
  executor = get_agent_executor()
131
  except Exception as e:
132
- print(f"❌ Initialization error: {e}")
133
  sys.exit(1)
134
 
135
- # Example patient context
136
  patient_context = (
137
  "Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
138
- "Key History: Prediabetes, mild dyslipidemia; Medications: None."
139
  )
140
- chat_history: List[SystemMessage | HumanMessage | AIMessage] = []
141
 
142
  print("πŸš€ Quantum Health Navigator Console (type 'exit' to quit)")
143
  while True:
@@ -148,7 +148,6 @@ if __name__ == "__main__":
148
  if not user_input:
149
  continue
150
 
151
- # Invoke the agent
152
  try:
153
  result = executor.invoke({
154
  "input": user_input,
@@ -157,12 +156,13 @@ if __name__ == "__main__":
157
  })
158
  reply = result.get("output", "")
159
  print(f"πŸ€– Agent: {reply}\n")
160
- # Update local history
 
161
  chat_history.append(HumanMessage(content=user_input))
162
  chat_history.append(AIMessage(content=reply))
163
- # Keep history manageable
164
  if len(chat_history) > 20:
165
  chat_history = chat_history[-20:]
166
  except Exception as err:
167
- print(f"⚠️ Error during inference: {err}")
168
- app_logger.error("Inference error", exc_info=True)
 
 
1
  import os
2
  import sys
3
+ from typing import List, Union
4
 
 
5
  from langchain.prompts import ChatPromptTemplate
6
  from langchain.prompts.chat import MessagesPlaceholder
7
+ from langchain.schema import BaseMessage, AIMessage, HumanMessage, SystemMessage
8
+ from langchain.agents import AgentExecutor, create_structured_chat_agent
9
  from langchain_google_genai import ChatGoogleGenerativeAI
10
 
11
  from config.settings import settings
12
  from services.logger import app_logger
13
+ from tools import BioPortalLookupTool, UMLSLookupTool, QuantumTreatmentOptimizerTool
 
 
 
 
14
 
15
  # -----------------------------------------------------------------------------
16
+ # 1. Initialize the Gemini LLM
17
  # -----------------------------------------------------------------------------
18
  def _init_llm() -> ChatGoogleGenerativeAI:
19
  """
20
+ Initialize the Google Gemini LLM with the configured API key.
21
  Raises ValueError if no key is found or initialization fails.
22
  """
23
  api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
24
  if not api_key:
25
+ err = "Gemini API key not found: set GEMINI_API_KEY in settings or GOOGLE_API_KEY in env"
26
+ app_logger.error(err)
27
+ raise ValueError(err)
28
 
29
  try:
30
  llm = ChatGoogleGenerativeAI(
 
33
  google_api_key=api_key,
34
  convert_system_message_to_human=True,
35
  )
36
+ app_logger.info(f"Gemini LLM initialized ({llm.model})")
37
  return llm
38
  except Exception as e:
39
+ err = f"Failed to initialize Gemini LLM: {e}"
40
+ app_logger.error(err, exc_info=True)
41
+ raise ValueError(err)
42
 
43
  # -----------------------------------------------------------------------------
44
+ # 2. Build the structured chat prompt
45
  # -----------------------------------------------------------------------------
46
  def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
47
  """
48
+ Construct a ChatPromptTemplate that includes:
49
+ - a system instruction block,
50
+ - a placeholder for chat_history (List[BaseMessage]),
51
+ - the current human input,
52
+ - a placeholder for agent_scratchpad (List[BaseMessage]) to manage tool calls.
53
  """
54
  system_text = """
55
  You are Quantum Health Navigator, an AI assistant for healthcare professionals.
56
+
57
+ β€’ Disclaim: you are an AI, not a substitute for clinical judgment.
58
+ β€’ Patient context is provided as: {patient_context}
59
+ β€’ Available tools: {tool_names}
60
  {tools}
61
 
62
+ To call a tool, reply *only* with a JSON block:
63
  {"action": "<tool_name>", "action_input": <input>}
64
 
65
+ After receiving the tool’s output, craft a full answer for the user, citing any tools used.
66
  """.strip()
67
 
68
  return ChatPromptTemplate.from_messages([
69
  ("system", system_text),
70
+ MessagesPlaceholder(variable_name="chat_history"),
71
+ ("human", "{input}"),
72
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
73
  ])
74
 
75
  # -----------------------------------------------------------------------------
76
+ # 3. Lazily build and return the AgentExecutor singleton
77
  # -----------------------------------------------------------------------------
78
  def get_agent_executor() -> AgentExecutor:
79
  """
80
+ Returns a singleton AgentExecutor, creating it on first call.
81
+ Sets up LLM, tools, prompt, and executor params.
82
  """
83
  global _agent_executor_instance
84
+ if "_agent_executor_instance" not in globals():
85
+ # 3.1 Initialize LLM
86
  llm = _init_llm()
87
 
88
+ # 3.2 Prepare tools
89
  tools_list = [
90
  UMLSLookupTool(),
91
  BioPortalLookupTool(),
 
93
  ]
94
  app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
95
 
96
+ # 3.3 Build prompt
97
  prompt = _build_prompt_template(
98
  tool_names=[t.name for t in tools_list],
99
  tools=tools_list
100
  )
101
+ app_logger.info("Prompt template built")
102
 
103
+ # 3.4 Create the structured agent
104
  agent = create_structured_chat_agent(
105
  llm=llm,
106
  tools=tools_list,
107
  prompt=prompt
108
  )
109
+ app_logger.info("Structured chat agent created")
110
 
111
+ # 3.5 Create the executor
112
  executor = AgentExecutor(
113
  agent=agent,
114
  tools=tools_list,
 
117
  max_iterations=10,
118
  early_stopping_method="generate",
119
  )
120
+ app_logger.info("AgentExecutor initialized")
121
  _agent_executor_instance = executor
122
 
123
  return _agent_executor_instance
124
 
125
  # -----------------------------------------------------------------------------
126
+ # 4. Optional REPL for local testing
127
  # -----------------------------------------------------------------------------
128
  if __name__ == "__main__":
129
  try:
130
  executor = get_agent_executor()
131
  except Exception as e:
132
+ print(f"❌ Initialization failed: {e}")
133
  sys.exit(1)
134
 
135
+ # Sample patient context for testing
136
  patient_context = (
137
  "Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
138
+ "History: Prediabetes, mild dyslipidemia; Medications: None."
139
  )
140
+ chat_history: List[Union[SystemMessage, HumanMessage, AIMessage]] = []
141
 
142
  print("πŸš€ Quantum Health Navigator Console (type 'exit' to quit)")
143
  while True:
 
148
  if not user_input:
149
  continue
150
 
 
151
  try:
152
  result = executor.invoke({
153
  "input": user_input,
 
156
  })
157
  reply = result.get("output", "")
158
  print(f"πŸ€– Agent: {reply}\n")
159
+
160
+ # Update history
161
  chat_history.append(HumanMessage(content=user_input))
162
  chat_history.append(AIMessage(content=reply))
163
+ # Trim to last 20 messages
164
  if len(chat_history) > 20:
165
  chat_history = chat_history[-20:]
166
  except Exception as err:
167
+ print(f"⚠️ Inference error: {err}")
168
+ app_logger.error("Runtime error in REPL", exc_info=True)