Update agent.py
Browse files
agent.py
CHANGED
@@ -1,35 +1,30 @@
|
|
1 |
-
# /home/user/app/agent.py
|
2 |
import os
|
3 |
import sys
|
4 |
-
from typing import List
|
5 |
|
6 |
-
from langchain.agents import AgentExecutor, create_structured_chat_agent
|
7 |
from langchain.prompts import ChatPromptTemplate
|
8 |
from langchain.prompts.chat import MessagesPlaceholder
|
9 |
-
from langchain.
|
|
|
10 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
11 |
|
12 |
from config.settings import settings
|
13 |
from services.logger import app_logger
|
14 |
-
from tools import
|
15 |
-
BioPortalLookupTool,
|
16 |
-
UMLSLookupTool,
|
17 |
-
QuantumTreatmentOptimizerTool,
|
18 |
-
)
|
19 |
|
20 |
# -----------------------------------------------------------------------------
|
21 |
-
# 1. Initialize Gemini LLM
|
22 |
# -----------------------------------------------------------------------------
|
23 |
def _init_llm() -> ChatGoogleGenerativeAI:
|
24 |
"""
|
25 |
-
Initialize the Google Gemini LLM
|
26 |
Raises ValueError if no key is found or initialization fails.
|
27 |
"""
|
28 |
api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
|
29 |
if not api_key:
|
30 |
-
|
31 |
-
app_logger.error(
|
32 |
-
raise ValueError(
|
33 |
|
34 |
try:
|
35 |
llm = ChatGoogleGenerativeAI(
|
@@ -38,54 +33,59 @@ def _init_llm() -> ChatGoogleGenerativeAI:
|
|
38 |
google_api_key=api_key,
|
39 |
convert_system_message_to_human=True,
|
40 |
)
|
41 |
-
app_logger.info(f"
|
42 |
return llm
|
43 |
except Exception as e:
|
44 |
-
|
45 |
-
app_logger.error(
|
46 |
-
raise ValueError(
|
47 |
|
48 |
# -----------------------------------------------------------------------------
|
49 |
-
# 2. Build
|
50 |
# -----------------------------------------------------------------------------
|
51 |
def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
|
52 |
"""
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
55 |
"""
|
56 |
system_text = """
|
57 |
You are Quantum Health Navigator, an AI assistant for healthcare professionals.
|
58 |
-
|
59 |
-
β’
|
60 |
-
β’
|
|
|
61 |
{tools}
|
62 |
|
63 |
-
To
|
64 |
{"action": "<tool_name>", "action_input": <input>}
|
65 |
|
66 |
-
After the tool
|
67 |
""".strip()
|
68 |
|
69 |
return ChatPromptTemplate.from_messages([
|
70 |
("system", system_text),
|
71 |
-
MessagesPlaceholder(variable_name="chat_history"),
|
72 |
-
("human", "{input}"),
|
73 |
-
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
74 |
])
|
75 |
|
76 |
# -----------------------------------------------------------------------------
|
77 |
-
# 3.
|
78 |
# -----------------------------------------------------------------------------
|
79 |
def get_agent_executor() -> AgentExecutor:
|
80 |
"""
|
81 |
-
|
|
|
82 |
"""
|
83 |
global _agent_executor_instance
|
84 |
-
if
|
85 |
-
# Initialize LLM
|
86 |
llm = _init_llm()
|
87 |
|
88 |
-
# Prepare tools
|
89 |
tools_list = [
|
90 |
UMLSLookupTool(),
|
91 |
BioPortalLookupTool(),
|
@@ -93,22 +93,22 @@ def get_agent_executor() -> AgentExecutor:
|
|
93 |
]
|
94 |
app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
|
95 |
|
96 |
-
# Build prompt
|
97 |
prompt = _build_prompt_template(
|
98 |
tool_names=[t.name for t in tools_list],
|
99 |
tools=tools_list
|
100 |
)
|
101 |
-
app_logger.info("Prompt template built
|
102 |
|
103 |
-
# Create structured agent
|
104 |
agent = create_structured_chat_agent(
|
105 |
llm=llm,
|
106 |
tools=tools_list,
|
107 |
prompt=prompt
|
108 |
)
|
109 |
-
app_logger.info("Structured chat agent created
|
110 |
|
111 |
-
# Create executor
|
112 |
executor = AgentExecutor(
|
113 |
agent=agent,
|
114 |
tools=tools_list,
|
@@ -117,27 +117,27 @@ def get_agent_executor() -> AgentExecutor:
|
|
117 |
max_iterations=10,
|
118 |
early_stopping_method="generate",
|
119 |
)
|
120 |
-
app_logger.info("AgentExecutor initialized
|
121 |
_agent_executor_instance = executor
|
122 |
|
123 |
return _agent_executor_instance
|
124 |
|
125 |
# -----------------------------------------------------------------------------
|
126 |
-
# 4.
|
127 |
# -----------------------------------------------------------------------------
|
128 |
if __name__ == "__main__":
|
129 |
try:
|
130 |
executor = get_agent_executor()
|
131 |
except Exception as e:
|
132 |
-
print(f"β Initialization
|
133 |
sys.exit(1)
|
134 |
|
135 |
-
#
|
136 |
patient_context = (
|
137 |
"Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
|
138 |
-
"
|
139 |
)
|
140 |
-
chat_history: List[SystemMessage
|
141 |
|
142 |
print("π Quantum Health Navigator Console (type 'exit' to quit)")
|
143 |
while True:
|
@@ -148,7 +148,6 @@ if __name__ == "__main__":
|
|
148 |
if not user_input:
|
149 |
continue
|
150 |
|
151 |
-
# Invoke the agent
|
152 |
try:
|
153 |
result = executor.invoke({
|
154 |
"input": user_input,
|
@@ -157,12 +156,13 @@ if __name__ == "__main__":
|
|
157 |
})
|
158 |
reply = result.get("output", "")
|
159 |
print(f"π€ Agent: {reply}\n")
|
160 |
-
|
|
|
161 |
chat_history.append(HumanMessage(content=user_input))
|
162 |
chat_history.append(AIMessage(content=reply))
|
163 |
-
#
|
164 |
if len(chat_history) > 20:
|
165 |
chat_history = chat_history[-20:]
|
166 |
except Exception as err:
|
167 |
-
print(f"β οΈ
|
168 |
-
app_logger.error("
|
|
|
|
|
1 |
import os
|
2 |
import sys
|
3 |
+
from typing import List, Union
|
4 |
|
|
|
5 |
from langchain.prompts import ChatPromptTemplate
|
6 |
from langchain.prompts.chat import MessagesPlaceholder
|
7 |
+
from langchain.schema import BaseMessage, AIMessage, HumanMessage, SystemMessage
|
8 |
+
from langchain.agents import AgentExecutor, create_structured_chat_agent
|
9 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
10 |
|
11 |
from config.settings import settings
|
12 |
from services.logger import app_logger
|
13 |
+
from tools import BioPortalLookupTool, UMLSLookupTool, QuantumTreatmentOptimizerTool
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# -----------------------------------------------------------------------------
|
16 |
+
# 1. Initialize the Gemini LLM
|
17 |
# -----------------------------------------------------------------------------
|
18 |
def _init_llm() -> ChatGoogleGenerativeAI:
|
19 |
"""
|
20 |
+
Initialize the Google Gemini LLM with the configured API key.
|
21 |
Raises ValueError if no key is found or initialization fails.
|
22 |
"""
|
23 |
api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
|
24 |
if not api_key:
|
25 |
+
err = "Gemini API key not found: set GEMINI_API_KEY in settings or GOOGLE_API_KEY in env"
|
26 |
+
app_logger.error(err)
|
27 |
+
raise ValueError(err)
|
28 |
|
29 |
try:
|
30 |
llm = ChatGoogleGenerativeAI(
|
|
|
33 |
google_api_key=api_key,
|
34 |
convert_system_message_to_human=True,
|
35 |
)
|
36 |
+
app_logger.info(f"Gemini LLM initialized ({llm.model})")
|
37 |
return llm
|
38 |
except Exception as e:
|
39 |
+
err = f"Failed to initialize Gemini LLM: {e}"
|
40 |
+
app_logger.error(err, exc_info=True)
|
41 |
+
raise ValueError(err)
|
42 |
|
43 |
# -----------------------------------------------------------------------------
|
44 |
+
# 2. Build the structured chat prompt
|
45 |
# -----------------------------------------------------------------------------
|
46 |
def _build_prompt_template(tool_names: List[str], tools) -> ChatPromptTemplate:
|
47 |
"""
|
48 |
+
Construct a ChatPromptTemplate that includes:
|
49 |
+
- a system instruction block,
|
50 |
+
- a placeholder for chat_history (List[BaseMessage]),
|
51 |
+
- the current human input,
|
52 |
+
- a placeholder for agent_scratchpad (List[BaseMessage]) to manage tool calls.
|
53 |
"""
|
54 |
system_text = """
|
55 |
You are Quantum Health Navigator, an AI assistant for healthcare professionals.
|
56 |
+
|
57 |
+
β’ Disclaim: you are an AI, not a substitute for clinical judgment.
|
58 |
+
β’ Patient context is provided as: {patient_context}
|
59 |
+
β’ Available tools: {tool_names}
|
60 |
{tools}
|
61 |
|
62 |
+
To call a tool, reply *only* with a JSON block:
|
63 |
{"action": "<tool_name>", "action_input": <input>}
|
64 |
|
65 |
+
After receiving the toolβs output, craft a full answer for the user, citing any tools used.
|
66 |
""".strip()
|
67 |
|
68 |
return ChatPromptTemplate.from_messages([
|
69 |
("system", system_text),
|
70 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
71 |
+
("human", "{input}"),
|
72 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
73 |
])
|
74 |
|
75 |
# -----------------------------------------------------------------------------
|
76 |
+
# 3. Lazily build and return the AgentExecutor singleton
|
77 |
# -----------------------------------------------------------------------------
|
78 |
def get_agent_executor() -> AgentExecutor:
|
79 |
"""
|
80 |
+
Returns a singleton AgentExecutor, creating it on first call.
|
81 |
+
Sets up LLM, tools, prompt, and executor params.
|
82 |
"""
|
83 |
global _agent_executor_instance
|
84 |
+
if "_agent_executor_instance" not in globals():
|
85 |
+
# 3.1 Initialize LLM
|
86 |
llm = _init_llm()
|
87 |
|
88 |
+
# 3.2 Prepare tools
|
89 |
tools_list = [
|
90 |
UMLSLookupTool(),
|
91 |
BioPortalLookupTool(),
|
|
|
93 |
]
|
94 |
app_logger.info(f"Loaded tools: {[t.name for t in tools_list]}")
|
95 |
|
96 |
+
# 3.3 Build prompt
|
97 |
prompt = _build_prompt_template(
|
98 |
tool_names=[t.name for t in tools_list],
|
99 |
tools=tools_list
|
100 |
)
|
101 |
+
app_logger.info("Prompt template built")
|
102 |
|
103 |
+
# 3.4 Create the structured agent
|
104 |
agent = create_structured_chat_agent(
|
105 |
llm=llm,
|
106 |
tools=tools_list,
|
107 |
prompt=prompt
|
108 |
)
|
109 |
+
app_logger.info("Structured chat agent created")
|
110 |
|
111 |
+
# 3.5 Create the executor
|
112 |
executor = AgentExecutor(
|
113 |
agent=agent,
|
114 |
tools=tools_list,
|
|
|
117 |
max_iterations=10,
|
118 |
early_stopping_method="generate",
|
119 |
)
|
120 |
+
app_logger.info("AgentExecutor initialized")
|
121 |
_agent_executor_instance = executor
|
122 |
|
123 |
return _agent_executor_instance
|
124 |
|
125 |
# -----------------------------------------------------------------------------
|
126 |
+
# 4. Optional REPL for local testing
|
127 |
# -----------------------------------------------------------------------------
|
128 |
if __name__ == "__main__":
|
129 |
try:
|
130 |
executor = get_agent_executor()
|
131 |
except Exception as e:
|
132 |
+
print(f"β Initialization failed: {e}")
|
133 |
sys.exit(1)
|
134 |
|
135 |
+
# Sample patient context for testing
|
136 |
patient_context = (
|
137 |
"Age: 58; Gender: Female; Chief Complaint: Blurry vision & fatigue; "
|
138 |
+
"History: Prediabetes, mild dyslipidemia; Medications: None."
|
139 |
)
|
140 |
+
chat_history: List[Union[SystemMessage, HumanMessage, AIMessage]] = []
|
141 |
|
142 |
print("π Quantum Health Navigator Console (type 'exit' to quit)")
|
143 |
while True:
|
|
|
148 |
if not user_input:
|
149 |
continue
|
150 |
|
|
|
151 |
try:
|
152 |
result = executor.invoke({
|
153 |
"input": user_input,
|
|
|
156 |
})
|
157 |
reply = result.get("output", "")
|
158 |
print(f"π€ Agent: {reply}\n")
|
159 |
+
|
160 |
+
# Update history
|
161 |
chat_history.append(HumanMessage(content=user_input))
|
162 |
chat_history.append(AIMessage(content=reply))
|
163 |
+
# Trim to last 20 messages
|
164 |
if len(chat_history) > 20:
|
165 |
chat_history = chat_history[-20:]
|
166 |
except Exception as err:
|
167 |
+
print(f"β οΈ Inference error: {err}")
|
168 |
+
app_logger.error("Runtime error in REPL", exc_info=True)
|