|
import datasets |
|
from langchain.docstore.document import Document |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import TypedDict, Annotated |
|
from langgraph.graph.message import add_messages |
|
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage,SystemMessage |
|
from langgraph.prebuilt import ToolNode |
|
from langgraph.graph import START, StateGraph |
|
from langgraph.prebuilt import tools_condition |
|
from langchain_openai import ChatOpenAI |
|
|
|
from other_tools import ( |
|
wiki_search, arvix_search, web_search, vector_search, |
|
multiply, add, subtract, divide, modulus, power, square_root |
|
) |
|
import os |
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
llm = ChatOpenAI(temperature=0 |
|
, model="gpt-4o-mini", openai_api_key=os.getenv("OPENAI_KEY")) |
|
|
|
tools = [ |
|
wiki_search, arvix_search, web_search, |
|
multiply, add, subtract, divide, modulus, power, square_root |
|
] |
|
chat_with_tools = llm.bind_tools(tools) |
|
|
|
|
|
ai_message = SystemMessage(content="""You are a helpful assistant tasked with answering questions using a set of tools and reference materials. |
|
|
|
You may be provided with a reference set of questions and answers from a retriever. |
|
If the current question is identical to or semantically equivalent to a reference question, or if a reference answer clearly applies, use that reference answer directly. |
|
|
|
Otherwise, reason through the question as needed to determine the correct answer. |
|
|
|
Your output must follow these formatting rules: |
|
- If the answer is a number, do not use commas or units (unless specifically requested). |
|
- If the answer is a string, do not use articles, abbreviations, or short forms. Write digits in full unless specified otherwise. |
|
- If the answer is a comma-separated list, apply the above rules to each item and include exactly one space after each comma. |
|
- If the question matches a reference question, return the reference answer exactly as it appears. |
|
|
|
Do not include any explanation, prefix, or extra text—output only the final answer. |
|
""") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from langgraph.graph import MessagesState |
|
|
|
|
|
|
|
def assistant(state: MessagesState): |
|
return { |
|
"messages": [chat_with_tools.invoke(state["messages"])], |
|
} |
|
|
|
def retriever(state: MessagesState): |
|
"""Retriever node""" |
|
similar_question = vector_search(state["messages"][0].content) |
|
|
|
if similar_question: |
|
example_msg = HumanMessage( |
|
content=f"Here I provide a similar question and answer for reference: \n\n{similar_question}", |
|
) |
|
print(f"Similar question found: {similar_question}") |
|
return {"messages": [ai_message] + state["messages"] + [example_msg]} |
|
else: |
|
|
|
print( "No similar question found.") |
|
return {"messages": [ai_message] + state["messages"]} |
|
|
|
|
|
builder = StateGraph(MessagesState) |
|
|
|
|
|
|
|
builder.add_node("assistant", assistant) |
|
builder.add_node("retriever", retriever) |
|
builder.add_node("tools", ToolNode(tools)) |
|
|
|
|
|
builder.add_edge(START, "retriever") |
|
builder.add_edge("retriever", "assistant") |
|
builder.add_conditional_edges( |
|
"assistant", |
|
|
|
|
|
tools_condition, |
|
) |
|
builder.add_edge("tools", "assistant") |
|
alfred = builder.compile() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|