|
|
|
|
|
import os |
|
from typing import TypedDict, Annotated |
|
from langgraph.graph.message import add_messages |
|
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage |
|
from langgraph.prebuilt import ToolNode |
|
from langgraph.graph import START, StateGraph |
|
from langgraph.prebuilt import tools_condition |
|
from langgraph.checkpoint.memory import MemorySaver |
|
from tools import search_tool, hub_stats_tool, weather_info_tool |
|
from retriever import guest_info_tool |
|
import gradio as gr |
|
|
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
|
|
|
|
|
|
|
|
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash") |
|
|
|
tools = [search_tool, hub_stats_tool, guest_info_tool, weather_info_tool] |
|
chat_with_tools = llm.bind_tools(tools) |
|
|
|
|
|
sys_msg = SystemMessage(content=""" |
|
Role: |
|
You are a helpful agent and hosting a party. |
|
STRICT RULES: |
|
1. Follow a THINK → TOOL → THINK → RESPOND approach: |
|
- THINK: Analyze the request and decide if any tool call is required or if it can be answered without a tool. |
|
- TOOL: Perform only the necessary tool calls and collect responses. |
|
- THINK: Re-evaluate tool response and determine the next step. |
|
- RESPOND: Repeat THINK/TOOL/THINK as many times as required before providing a final answer. |
|
2. If no relevant tool exists, inform the user and provide guidance instead of making assumptions. |
|
""") |
|
|
|
|
|
class AgentState(TypedDict): |
|
messages: Annotated[list[AnyMessage], add_messages] |
|
|
|
def assistant(state: AgentState): |
|
if len(state["messages"]) > 7: |
|
return {"messages": chat_with_tools.invoke([sys_msg] + state["messages"][-6:])} |
|
return {"messages": chat_with_tools.invoke([sys_msg] + state["messages"])} |
|
|
|
|
|
builder = StateGraph(AgentState) |
|
|
|
|
|
builder.add_node("assistant", assistant) |
|
builder.add_node("tools", ToolNode(tools)) |
|
|
|
|
|
builder.add_edge(START, "assistant") |
|
builder.add_conditional_edges( |
|
"assistant", |
|
|
|
|
|
tools_condition, |
|
) |
|
builder.add_edge("tools", "assistant") |
|
memory = MemorySaver() |
|
alfred = builder.compile(checkpointer=memory) |
|
|
|
config = {"configurable": {"thread_id": "7"}} |
|
|
|
|
|
def alfred_response(question): |
|
messages = [HumanMessage(content=question)] |
|
response = alfred.invoke({"messages": messages}, config) |
|
|
|
return response['messages'][-1].content |
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Interface( |
|
fn=alfred_response, |
|
inputs="text", |
|
outputs="text", |
|
title="Party Organizer Assistant", |
|
description="Helps you answer with different asks during Party", |
|
examples=[["Whats weather now in Bangalore?"]], |
|
).launch() |