Spaces:
No application file
No application file
from dotenv import load_dotenv | |
load_dotenv() | |
import os | |
from langchain_groq import ChatGroq | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
from langgraph.prebuilt import create_react_agent | |
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage | |
def get_response_from_ai_agent(llm_id, query, allow_search, system_prompt): | |
""" | |
Create and invoke an AI agent with optional search capabilities | |
""" | |
try: | |
# Initialize LLM with proper configuration | |
llm = ChatGroq( | |
api_key=os.environ.get("GROQ_API_KEY"), | |
model_name=llm_id | |
) | |
# Setup tools based on allow_search flag | |
tools = [] | |
if allow_search: | |
tools.append(TavilySearchResults( | |
api_key=os.environ.get("TAVILY_API_KEY"), | |
max_results=2 | |
)) | |
# Create the agent | |
agent = create_react_agent( | |
model=llm, | |
tools=tools | |
) | |
# Prepare the initial messages | |
initial_messages = [ | |
SystemMessage(content=system_prompt), | |
HumanMessage(content=query) | |
] | |
# Create proper state with messages | |
state = { | |
"messages": initial_messages, | |
"next_steps": [], | |
"structured_response": None | |
} | |
# Invoke agent with proper state | |
response = agent.invoke(state) | |
# Handle response | |
if isinstance(response, dict): | |
# Extract messages from response | |
if "messages" in response: | |
messages = response["messages"] | |
# Get the last AI message | |
ai_messages = [msg for msg in messages if isinstance(msg, AIMessage)] | |
if ai_messages: | |
return ai_messages[-1].content | |
# Check structured response | |
elif "structured_response" in response: | |
return response["structured_response"] | |
return "I apologize, but I couldn't generate a proper response. Please try again." | |
except Exception as e: | |
print(f"Debug - Error in get_response_from_ai_agent: {str(e)}") | |
raise Exception(f"Agent error: {str(e)}") |