SARKAR Anupom OBS/GDO commited on
Commit
8618f46
·
1 Parent(s): 437e3a5

Initial_Commit_05-04-2025

Browse files
Files changed (3) hide show
  1. app.py +94 -0
  2. retriever.py +58 -0
  3. tools.py +49 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # App Section
2
+
3
+ import os
4
+ from typing import TypedDict, Annotated
5
+ from langgraph.graph.message import add_messages
6
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
7
+ from langgraph.prebuilt import ToolNode
8
+ from langgraph.graph import START, StateGraph
9
+ from langgraph.prebuilt import tools_condition
10
+ from langgraph.checkpoint.memory import MemorySaver
11
+ from tools import search_tool, weather_info_tool, hub_stats_tool
12
+ from retriever import guest_info_tool
13
+ import gradio as gr
14
+
15
+ from langchain_groq import ChatGroq
16
+
17
+ from langchain_google_genai import ChatGoogleGenerativeAI
18
+
19
+
20
+ # Generate the chat interface, including the tools
21
+ #llm = ChatGroq(model="qwen-2.5-coder-32b")
22
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
23
+
24
+ tools = [search_tool, weather_info_tool, hub_stats_tool, guest_info_tool]
25
+ chat_with_tools = llm.bind_tools(tools)
26
+
27
+ # System message
28
+ sys_msg = SystemMessage(content="""
29
+ Role:
30
+ You are a helpful agent and hosting a party.
31
+
32
+ STRICT RULES:
33
+ 1. Follow a THINK → TOOL → THINK → RESPOND approach:
34
+ - THINK: Analyze the request and decide if any tool call is required or if it can be answered without a tool.
35
+ - TOOL: Perform only the necessary tool calls and collect responses.
36
+ - THINK: Re-evaluate tool response and determine the next step.
37
+ - RESPOND: Repeat THINK/TOOL/THINK as many times as required before providing a final answer.
38
+ 2. If no relevant tool exists, inform the user and provide guidance instead of making assumptions.
39
+ """)
40
+
41
+ # Generate the AgentState and Agent graph
42
+ class AgentState(TypedDict):
43
+ messages: Annotated[list[AnyMessage], add_messages]
44
+
45
+ def assistant(state: AgentState):
46
+ return {
47
+ "messages": [chat_with_tools.invoke(state["messages"])],
48
+ }
49
+
50
+ ## The graph
51
+ builder = StateGraph(AgentState)
52
+
53
+ # Define nodes: these do the work
54
+ builder.add_node("assistant", assistant)
55
+ builder.add_node("tools", ToolNode(tools))
56
+
57
+ # Define edges: these determine how the control flow moves
58
+ builder.add_edge(START, "assistant")
59
+ builder.add_conditional_edges(
60
+ "assistant",
61
+ # If the latest message requires a tool, route to tools
62
+ # Otherwise, provide a direct response
63
+ tools_condition,
64
+ )
65
+ builder.add_edge("tools", "assistant")
66
+ memory = MemorySaver()
67
+ alfred = builder.compile(checkpointer=memory)
68
+
69
+ config = {"configurable": {"thread_id": "7"}}
70
+ #alfred
71
+
72
+ def alfred_response(question):
73
+ messages = [HumanMessage(content=question)]
74
+ response = alfred.invoke({"messages": messages}, config)
75
+
76
+ return response['messages'][-1].content
77
+
78
+ #print("🎩 Alfred's Response:")
79
+ #print(response['messages'][-1].content)
80
+
81
+ # Gradio
82
+
83
+ input_textbox = gr.Textbox(label="Type your query here:", placeholder="Hi", lines=5)
84
+ output_textbox = gr.Textbox(label="Type your query here:", placeholder="Hi", lines=5)
85
+ gr.Interface(
86
+ fn=alfred_response,
87
+ inputs=input_textbox,
88
+ outputs=output_textbox,
89
+ title="Party Organizer Helper",
90
+ description="Helps you answer with different asks during Party",
91
+ theme="peach",
92
+ examples=[["Whats weather now in Bangalore?"], ["The weather in Bangalore is Rainy with a temperature of 15°C."]],
93
+ live=True
94
+ ).launch(share=False)
retriever.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Retriever Section
2
+
3
+ import datasets
4
+ from langchain.docstore.document import Document
5
+ from langchain.tools import Tool
6
+ from transformers import AutoTokenizer, TFAutoModel
7
+
8
+ # Load the dataset
9
+ guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
10
+
11
+ def concatenate_text(examples):
12
+ return {
13
+ "text": "metadata={name:"+examples["name"]+"},"+
14
+ "page_content=Name:"+examples["name"]+"\n"+
15
+ "Relation:"+examples["relation"]+"\n"+
16
+ "Description:"+examples["description"]+"\n"+
17
+ "Email:"+examples["email"]
18
+ }
19
+
20
+ docs = guest_dataset.map(concatenate_text)
21
+
22
+ model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
23
+ tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
24
+ model = TFAutoModel.from_pretrained(model_ckpt, from_pt=True)
25
+
26
+ def cls_pooling(model_output):
27
+ return model_output.last_hidden_state[:, 0]
28
+
29
+ def get_embeddings(text_list):
30
+ encoded_input = tokenizer(
31
+ text_list, padding=True, truncation=True, return_tensors="tf"
32
+ )
33
+ encoded_input = {k: v for k, v in encoded_input.items()}
34
+ model_output = model(**encoded_input)
35
+ return cls_pooling(model_output)
36
+
37
+ embeddings_dataset = docs.map(
38
+ lambda x: {"embeddings": get_embeddings(x["text"]).numpy()[0]}
39
+ )
40
+
41
+ embeddings_dataset.add_faiss_index(column="embeddings")
42
+
43
+ def extract_text(query: str) -> str:
44
+ """Retrieves detailed information about gala guests based on their name or relation."""
45
+ query_embedding = get_embeddings([query]).numpy()
46
+ scores, samples = embeddings_dataset.get_nearest_examples(
47
+ "embeddings", query_embedding, k=2
48
+ )
49
+ if samples:
50
+ return "\n\n".join([text for text in samples["text"]])
51
+ else:
52
+ return "No matching guest information found."
53
+
54
+ guest_info_tool = Tool(
55
+ name="guest_info_retriever",
56
+ func=extract_text,
57
+ description="Retrieves detailed information about gala guests based on their name or relation."
58
+ )
tools.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tool Section
2
+
3
+ from langchain_community.tools import DuckDuckGoSearchRun
4
+ from langchain.tools import Tool
5
+ from huggingface_hub import list_models
6
+ import random
7
+
8
+ # Internet Search Tool
9
+ search_tool = DuckDuckGoSearchRun()
10
+
11
+ def get_weather_info(location: str) -> str:
12
+ """Fetches dummy weather information for a given location."""
13
+ # Dummy weather data
14
+ weather_conditions = [
15
+ {"condition": "Rainy", "temp_c": 15},
16
+ {"condition": "Clear", "temp_c": 25},
17
+ {"condition": "Windy", "temp_c": 20}
18
+ ]
19
+ # Randomly select a weather condition
20
+ data = random.choice(weather_conditions)
21
+ return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C"
22
+
23
+ # Initialize the tool
24
+ weather_info_tool = Tool(
25
+ name="get_weather_info",
26
+ func=get_weather_info,
27
+ description="Fetches dummy weather information for a given location."
28
+ )
29
+
30
+ def get_hub_stats(author: str) -> str:
31
+ """Fetches the most downloaded model from a specific author on the Hugging Face Hub."""
32
+ try:
33
+ # List models from the specified author, sorted by downloads
34
+ models = list(list_models(author=author, sort="downloads", direction=-1, limit=1))
35
+
36
+ if models:
37
+ model = models[0]
38
+ return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads."
39
+ else:
40
+ return f"No models found for author {author}."
41
+ except Exception as e:
42
+ return f"Error fetching models for {author}: {str(e)}"
43
+
44
+ # Initialize the tool
45
+ hub_stats_tool = Tool(
46
+ name="get_hub_stats",
47
+ func=get_hub_stats,
48
+ description="Fetches the most downloaded model from a specific author on the Hugging Face Hub."
49
+ )