# """LangGraph Agent"""
# import os
# from dotenv import load_dotenv
# from langgraph.graph import START, StateGraph, MessagesState
# from langgraph.prebuilt import tools_condition
# from langgraph.prebuilt import ToolNode
# from langchain_google_genai import ChatGoogleGenerativeAI
# from langchain_groq import ChatGroq
# from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
# from langchain_community.tools.tavily_search import TavilySearchResults
# from langchain_community.document_loaders import WikipediaLoader
# from langchain_community.document_loaders import ArxivLoader
# from langchain_community.vectorstores import SupabaseVectorStore
# from langchain_core.messages import SystemMessage, HumanMessage
# from langchain_core.tools import tool
# from langchain.tools.retriever import create_retriever_tool
# from supabase.client import Client, create_client
# from langchain_core.documents import Document
# #load_dotenv()
# load_dotenv(".env")
# @tool
# def multiply(a: int, b: int) -> int:
# """Multiply two numbers.
# Args:
# a: first int
# b: second int
# """
# return a * b
# @tool
# def add(a: int, b: int) -> int:
# """Add two numbers.
# Args:
# a: first int
# b: second int
# """
# return a + b
# @tool
# def subtract(a: int, b: int) -> int:
# """Subtract two numbers.
# Args:
# a: first int
# b: second int
# """
# return a - b
# @tool
# def divide(a: int, b: int) -> int:
# """Divide two numbers.
# Args:
# a: first int
# b: second int
# """
# if b == 0:
# raise ValueError("Cannot divide by zero.")
# return a / b
# @tool
# def modulus(a: int, b: int) -> int:
# """Get the modulus of two numbers.
# Args:
# a: first int
# b: second int
# """
# return a % b
# @tool
# def wiki_search(query: str) -> str:
# """Search Wikipedia for a query and return maximum 2 results.
# Args:
# query: The search query."""
# search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
# formatted_search_docs = "\n\n---\n\n".join(
# [
# f'\n{doc.page_content}\n'
# for doc in search_docs
# ])
# return {"wiki_results": formatted_search_docs}
# @tool
# def web_search(query: str) -> str:
# """Search Tavily for a query and return maximum 3 results.
# Args:
# query: The search query."""
# search_docs = TavilySearchResults(max_results=3).invoke(query=query)
# formatted_search_docs = "\n\n---\n\n".join(
# [
# f'\n{doc.page_content}\n'
# for doc in search_docs
# ])
# return {"web_results": formatted_search_docs}
# @tool
# def arvix_search(query: str) -> str:
# """Search Arxiv for a query and return maximum 3 result.
# Args:
# query: The search query."""
# search_docs = ArxivLoader(query=query, load_max_docs=3).load()
# formatted_search_docs = "\n\n---\n\n".join(
# [
# f'\n{doc.page_content[:1000]}\n'
# for doc in search_docs
# ])
# return {"arvix_results": formatted_search_docs}
# # load the system prompt from the file
# with open("system_prompt.txt", "r", encoding="utf-8") as f:
# system_prompt = f.read()
# # System message
# sys_msg = SystemMessage(content=system_prompt)
# # build a retriever
# embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
# # supabase: Client = create_client(
# # os.environ.get("SUPABASE_URL"),
# # os.environ.get("SUPABASE_SERVICE_KEY"))
# supabase_url = os.getenv("SUPABASE_URL")
# supabase_key = os.getenv("SUPABASE_KEY")
# if not supabase_url or not supabase_key:
# raise ValueError("SUPABASE_URL and SUPABASE_KEY must be set in environment variables.")
# supabase: Client = create_client(supabase_url, supabase_key)
# docs = [Document(page_content="This is a test about AI.")]
# vector_store = SupabaseVectorStore(
# client=supabase, # should be your `supabase` client instance
# embedding=embeddings,
# table_name="documents",
# query_name="match_documents_langchain",
# )
# # Add documents
# vector_store.add_documents(docs)
# print("š Testing similarity_search with: 'What is AI?'")
# results = vector_store.similarity_search("What is AI?")
# print(f"ā
Got {len(results)} results.")
# if results:
# print("First result content:\n", results[0].page_content)
# create_retriever_tool = create_retriever_tool(
# retriever=vector_store.as_retriever(),
# name="Question Search",
# description="A tool to retrieve similar questions from a vector store.",
# )
# tools = [
# multiply,
# add,
# subtract,
# divide,
# modulus,
# wiki_search,
# web_search,
# arvix_search,
# ]
# # Build graph function
# def build_graph(provider: str = "groq"):
# """Build the graph"""
# # Load environment variables from .env file
# if provider == "google":
# # Google Gemini
# llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
# elif provider == "groq":
# # Groq https://console.groq.com/docs/models
# llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
# elif provider == "huggingface":
# # TODO: Add huggingface endpoint
# llm = ChatHuggingFace(
# llm=HuggingFaceEndpoint(
# url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
# temperature=0,
# ),
# )
# else:
# raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
# # Bind tools to LLM
# llm_with_tools = llm.bind_tools(tools)
# def assistant(state: MessagesState):
# """Assistant node"""
# print("\nš§ Final prompt to model:")
# for m in state["messages"]:
# print(f"{m.type.upper()}: {m.content[:300]}...\n") # truncate for readability
# response = llm_with_tools.invoke(state["messages"])
# print("š¬ Model response:", response.content[:500], "\n")
# return {"messages": [response]}
# # Node
# # def assistant(state: MessagesState):
# # """Assistant node"""
# # return {"messages": [llm_with_tools.invoke(state["messages"])]}
# # def retriever(state: MessagesState):
# # """Retriever node"""
# # similar_question = vector_store.similarity_search(state["messages"][0].content)
# # example_msg = HumanMessage(
# # content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
# # )
# # return {"messages": [sys_msg] + state["messages"] + [example_msg]}
# def retriever(state: MessagesState):
# """Retriever node"""
# messages = state.get("messages", [])
# if not messages:
# print("ā ļø No messages received in retriever node.")
# return {"messages": []}
# query = messages[0].content
# print(f"\nš Query to vector store: {query}")
# try:
# similar_question = vector_store.similarity_search(query)
# except Exception as e:
# print(f"ā similarity_search failed: {e}")
# return {"messages": messages}
# if not similar_question:
# print("ā ļø No similar questions found.")
# return {"messages": messages}
# print(f"ā
Found {len(similar_question)} similar question(s).")
# print("š First retrieved doc:\n", similar_question[0].page_content)
# example_msg = HumanMessage(
# content=f"Here I provide a similar question and answer for reference:\n\n{similar_question[0].page_content}"
# )
# return {"messages": [sys_msg] + messages + [example_msg]}
# builder = StateGraph(MessagesState)
# builder.add_node("retriever", retriever)
# builder.add_node("assistant", assistant)
# builder.add_node("tools", ToolNode(tools))
# builder.add_edge(START, "retriever")
# builder.add_edge("retriever", "assistant")
# builder.add_conditional_edges(
# "assistant",
# tools_condition,
# )
# builder.add_edge("tools", "assistant")
# # Compile graph
# return builder.compile()
# # test
# if __name__ == "__main__":
# question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
# # Build the graph
# graph = build_graph(provider="groq")
# # Run the graph
# messages = [HumanMessage(content=question)]
# messages = graph.invoke({"messages": messages})
# for m in messages["messages"]:
# m.pretty_print()
"""LangGraph Agent"""
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
load_dotenv()
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers.
Args:
a: first int
b: second int
"""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Add two numbers.
Args:
a: first int
b: second int
"""
return a + b
@tool
def subtract(a: int, b: int) -> int:
"""Subtract two numbers.
Args:
a: first int
b: second int
"""
return a - b
@tool
def divide(a: int, b: int) -> int:
"""Divide two numbers.
Args:
a: first int
b: second int
"""
if b == 0:
raise ValueError("Cannot divide by zero.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Get the modulus of two numbers.
Args:
a: first int
b: second int
"""
return a % b
@tool
def wiki_search(query: str) -> str:
"""Search Wikipedia for a query and return maximum 2 results.
Args:
query: The search query."""
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'\n{doc.page_content}\n'
for doc in search_docs
])
return {"wiki_results": formatted_search_docs}
@tool
def web_search(query: str) -> str:
"""Search Tavily for a query and return maximum 3 results.
Args:
query: The search query."""
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
formatted_search_docs = "\n\n---\n\n".join(
[
f'\n{doc.page_content}\n'
for doc in search_docs
])
return {"web_results": formatted_search_docs}
@tool
def arvix_search(query: str) -> str:
"""Search Arxiv for a query and return maximum 3 result.
Args:
query: The search query."""
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'\n{doc.page_content[:1000]}\n'
for doc in search_docs
])
return {"arvix_results": formatted_search_docs}
# load the system prompt from the file
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
# build a retriever
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
supabase: Client = create_client(
os.environ.get("SUPABASE_URL"),
os.environ.get("SUPABASE_SERVICE_KEY"))
vector_store = SupabaseVectorStore(
client=supabase,
embedding= embeddings,
table_name="documents",
query_name="match_documents_langchain",
)
create_retriever_tool = create_retriever_tool(
retriever=vector_store.as_retriever(),
name="Question Search",
description="A tool to retrieve similar questions from a vector store.",
)
tools = [
multiply,
add,
subtract,
divide,
modulus,
wiki_search,
web_search,
arvix_search,
]
# Build graph function
def build_graph(provider: str = "google"):
"""Build the graph"""
# Load environment variables from .env file
if provider == "google":
# Google Gemini
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
elif provider == "groq":
# Groq https://console.groq.com/docs/models
llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
elif provider == "huggingface":
# TODO: Add huggingface endpoint
llm = ChatHuggingFace(
llm=HuggingFaceEndpoint(
url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
temperature=0,
),
)
else:
raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
# Bind tools to LLM
llm_with_tools = llm.bind_tools(tools)
# Node
def assistant(state: MessagesState):
"""Assistant node"""
return {"messages": [llm_with_tools.invoke(state["messages"])]}
# def retriever(state: MessagesState):
# """Retriever node"""
# similar_question = vector_store.similarity_search(state["messages"][0].content)
#example_msg = HumanMessage(
# content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
# )
# return {"messages": [sys_msg] + state["messages"] + [example_msg]}
from langchain_core.messages import AIMessage
def retriever(state: MessagesState):
query = state["messages"][-1].content
similar_doc = vector_store.similarity_search(query, k=1)[0]
content = similar_doc.page_content
if "Final answer :" in content:
answer = content.split("Final answer :")[-1].strip()
else:
answer = content.strip()
return {"messages": [AIMessage(content=answer)]}
# builder = StateGraph(MessagesState)
#builder.add_node("retriever", retriever)
#builder.add_node("assistant", assistant)
#builder.add_node("tools", ToolNode(tools))
#builder.add_edge(START, "retriever")
#builder.add_edge("retriever", "assistant")
#builder.add_conditional_edges(
# "assistant",
# tools_condition,
#)
#builder.add_edge("tools", "assistant")
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever)
# Retriever ist Start und Endpunkt
builder.set_entry_point("retriever")
builder.set_finish_point("retriever")
# Compile graph
return builder.compile()