File size: 2,967 Bytes
8618f46
 
 
 
 
 
 
 
 
 
01ff992
8618f46
 
 
 
 
 
 
 
 
 
01ff992
8618f46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0047b94
 
01ff992
8618f46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
036f732
 
01ff992
bdc3def
4502e29
01ff992
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# App Section

import os
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langgraph.checkpoint.memory import MemorySaver
from tools import search_tool, hub_stats_tool, weather_info_tool
from retriever import guest_info_tool
import gradio as gr

from langchain_google_genai import ChatGoogleGenerativeAI


# Generate the chat interface, including the tools
#llm = ChatGroq(model="qwen-2.5-coder-32b")
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")

tools = [search_tool, hub_stats_tool, guest_info_tool, weather_info_tool]
chat_with_tools = llm.bind_tools(tools)

# System message
sys_msg = SystemMessage(content="""
Role:
You are a helpful agent and hosting a party.
STRICT RULES:
1. Follow a THINK → TOOL → THINK → RESPOND approach:
    - THINK: Analyze the request and decide if any tool call is required or if it can be answered without a tool.
    - TOOL: Perform only the necessary tool calls and collect responses.
    - THINK: Re-evaluate tool response and determine the next step.
    - RESPOND: Repeat THINK/TOOL/THINK as many times as required before providing a final answer.
2. If no relevant tool exists, inform the user and provide guidance instead of making assumptions.
""")

# Generate the AgentState and Agent graph
class AgentState(TypedDict):
    messages: Annotated[list[AnyMessage], add_messages]

def assistant(state: AgentState):
    if len(state["messages"]) > 7:
        return {"messages": chat_with_tools.invoke([sys_msg] + state["messages"][-6:])}
    return {"messages": chat_with_tools.invoke([sys_msg] + state["messages"])}

## The graph
builder = StateGraph(AgentState)

# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))

# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
    "assistant",
    # If the latest message requires a tool, route to tools
    # Otherwise, provide a direct response
    tools_condition,
)
builder.add_edge("tools", "assistant")
memory = MemorySaver()
alfred = builder.compile(checkpointer=memory)

config = {"configurable": {"thread_id": "7"}}
#alfred

def alfred_response(question):
    messages = [HumanMessage(content=question)]
    response = alfred.invoke({"messages": messages}, config)

    return response['messages'][-1].content

    #print("🎩 Alfred's Response:")
    #print(response['messages'][-1].content)

# Gradio

gr.Interface(
    fn=alfred_response, 
    inputs="text", 
    outputs="text", 
    title="Party Organizer Assistant", 
    description="Helps you answer with different asks during Party",
    examples=[["Whats weather now in Bangalore?"]],
).launch()