|
|
|
import os |
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
from langchain.agents import AgentExecutor, create_structured_chat_agent |
|
|
|
|
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage |
|
|
|
|
|
from tools.bioportal_tool import BioPortalLookupTool, BioPortalInput |
|
from tools.gemini_tool import GeminiTool, GeminiInput |
|
from tools.umls_tool import UMLSLookupTool, UMLSInput |
|
from tools.quantum_treatment_optimizer_tool import QuantumTreatmentOptimizerTool, QuantumOptimizerInput |
|
|
|
from config.settings import settings |
|
from services.logger import app_logger |
|
|
|
|
|
try: |
|
|
|
|
|
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")): |
|
raise ValueError("GOOGLE_API_KEY (for Gemini) not found in settings or environment.") |
|
|
|
llm = ChatGoogleGenerativeAI( |
|
model="gemini-1.5-pro-latest", |
|
|
|
temperature=0.3, |
|
|
|
convert_system_message_to_human=True, |
|
|
|
|
|
|
|
|
|
) |
|
app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully.") |
|
except Exception as e: |
|
app_logger.error(f"Failed to initialize ChatGoogleGenerativeAI: {e}", exc_info=True) |
|
raise ValueError(f"Gemini LLM initialization failed: {e}. Check API key and configurations in HF Secrets.") |
|
|
|
|
|
|
|
|
|
|
|
tools = [ |
|
UMLSLookupTool(), |
|
BioPortalLookupTool(), |
|
QuantumTreatmentOptimizerTool(), |
|
|
|
|
|
|
|
|
|
] |
|
app_logger.info(f"Tools initialized: {[tool.name for tool in tools]}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SYSTEM_PROMPT_TEMPLATE = ( |
|
"You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. " |
|
"Your primary goal is to provide accurate information and insights based on user queries and available tools. " |
|
"You must adhere to the following guidelines:\n" |
|
"1. Disclaimers: Always remind the user that you are an AI, not a human medical professional, and your information " |
|
"is for support, not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases " |
|
"unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n" |
|
"2. Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. " |
|
"You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n" |
|
"3. Tool Usage: You have access to the following tools:\n{tools}\n" |
|
" To use a tool, respond with a JSON markdown code block like this:\n" |
|
" ```json\n" |
|
" {{\n" |
|
' "action": "tool_name",\n' |
|
' "action_input": "query string for the tool" OR {{"arg1": "value1", "arg2": "value2", ...}} \n' |
|
" }}\n" |
|
" ```\n" |
|
" - For `umls_lookup` and `google_gemini_chat`, `action_input` is a single string (the 'term' or 'query').\n" |
|
" - For `bioportal_lookup`, `action_input` is a dictionary like `{{\"term\": \"search_term\", \"ontology\": \"ONTOLOGY_CODE\"}}`. If ontology is not specified by user, you can default to SNOMEDCT or ask.\n" |
|
" - For `quantum_treatment_optimizer`, `action_input` is a dictionary like `{{\"patient_data\": {{...patient details...}}, \"current_treatments\": [\"med1\"], \"conditions\": [\"cond1\"]}}`. You MUST populate 'patient_data' using the overall {patient_context} if available and relevant.\n" |
|
"4. Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive answer. Cite the tool if you used one (e.g., 'According to UMLS Lookup...').\n" |
|
"5. Specific Tool Guidance:\n" |
|
" - If asked about treatment optimization for a specific patient (especially if context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n" |
|
" - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n" |
|
" - If the query is very general, complex, or creative beyond simple lookups, you might consider using `google_gemini_chat` (if enabled) or answering directly if confident.\n" |
|
"6. Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n" |
|
"Begin!\n\n" |
|
"Previous conversation history:\n" |
|
"{chat_history}\n\n" |
|
"New human question: {input}\n" |
|
"{agent_scratchpad}" |
|
) |
|
|
|
prompt = ChatPromptTemplate.from_messages([ |
|
("system", SYSTEM_PROMPT_TEMPLATE), |
|
|
|
|
|
|
|
MessagesPlaceholder(variable_name="agent_scratchpad"), |
|
]) |
|
app_logger.info("Agent prompt template created.") |
|
|
|
|
|
try: |
|
agent = create_structured_chat_agent(llm=llm, tools=tools, prompt=prompt) |
|
app_logger.info("Structured chat agent created successfully with Gemini LLM.") |
|
except Exception as e: |
|
app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True) |
|
raise ValueError(f"Gemini agent creation failed: {e}") |
|
|
|
|
|
|
|
agent_executor = AgentExecutor( |
|
agent=agent, |
|
tools=tools, |
|
verbose=True, |
|
handle_parsing_errors=True, |
|
max_iterations=10, |
|
|
|
early_stopping_method="generate", |
|
) |
|
app_logger.info("AgentExecutor with Gemini agent created successfully.") |
|
|
|
|
|
|
|
def get_agent_executor(): |
|
"""Returns the configured agent executor for Gemini.""" |
|
|
|
|
|
|
|
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")): |
|
|
|
app_logger.error("CRITICAL: GOOGLE_API_KEY (for Gemini) is not available at get_agent_executor call.") |
|
raise ValueError("Google API Key for Gemini not configured. Agent cannot function.") |
|
return agent_executor |
|
|
|
|
|
if __name__ == "__main__": |
|
if not (settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")): |
|
print("Please set your GOOGLE_API_KEY in .env file or as an environment variable.") |
|
else: |
|
print("\nQuantum Health Navigator (Gemini Agent Test Console)") |
|
print("Type 'exit' or 'quit' to stop.") |
|
print("Example queries:") |
|
print(" - What is hypertension?") |
|
print(" - Lookup 'myocardial infarction' in UMLS.") |
|
print(" - Search for 'diabetes mellitus type 2' in BioPortal using SNOMEDCT ontology.") |
|
print(" - Optimize treatment for a patient (context will be simulated).") |
|
print("-" * 30) |
|
|
|
executor = get_agent_executor() |
|
current_chat_history_for_test = [] |
|
|
|
|
|
test_patient_context_summary = ( |
|
"Age: 45; Gender: Male; Chief Complaint: Intermittent chest pain; " |
|
"Key Medical History: Hyperlipidemia; Current Medications: Atorvastatin 20mg." |
|
) |
|
|
|
while True: |
|
user_input_str = input("\n👤 You: ") |
|
if user_input_str.lower() in ["exit", "quit"]: |
|
print("Exiting test console.") |
|
break |
|
|
|
try: |
|
app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'") |
|
response = executor.invoke({ |
|
"input": user_input_str, |
|
"chat_history": current_chat_history_for_test, |
|
"patient_context": test_patient_context_summary |
|
}) |
|
|
|
ai_output_str = response.get('output', "Agent did not produce an output.") |
|
print(f"🤖 Agent: {ai_output_str}") |
|
|
|
current_chat_history_for_test.append(HumanMessage(content=user_input_str)) |
|
current_chat_history_for_test.append(AIMessage(content=ai_output_str)) |
|
|
|
except Exception as e: |
|
print(f"⚠️ Error during agent invocation: {e}") |
|
app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True) |