File size: 5,275 Bytes
569fb3e 2a6ab73 569fb3e 2a6ab73 569fb3e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import datasets
from langchain.docstore.document import Document
# Load the dataset
# guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
# Convert dataset entries into Document objects
# docs = [
# Document(
# page_content="\n".join([
# f"Name: {guest['name']}",
# f"Relation: {guest['relation']}",
# f"Description: {guest['description']}",
# f"Email: {guest['email']}"
# ]),
# metadata={"name": guest["name"]}
# )
# for guest in guest_dataset
# ]
# from langchain_community.retrievers import BM25Retriever
# from langchain.tools import Tool
# bm25_retriever = BM25Retriever.from_documents(docs)
# def extract_text(query: str) -> str:
# """Retrieves detailed information about gala guests based on their name or relation."""
# results = bm25_retriever.invoke(query)
# if results:
# return "\n\n".join([doc.page_content for doc in results[:3]])
# else:
# return "No matching guest information found."
# guest_info_tool = Tool(
# name="guest_info_retriever",
# func=extract_text,
# description="Retrieves detailed information about gala guests based on their name or relation."
# )
#######################################################################################################################################################
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage,SystemMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langchain_openai import ChatOpenAI
#from WebSearch import weather_info_tool
from other_tools import (
wiki_search, arvix_search, web_search, vector_search,
multiply, add, subtract, divide, modulus, power, square_root
)
import os
from dotenv import load_dotenv
load_dotenv()
# Generate the chat interface, including the tools
llm = ChatOpenAI(temperature=0
, model="gpt-4o-mini", openai_api_key=os.getenv("OPENAI_KEY"))
tools = [
wiki_search, arvix_search, web_search,
multiply, add, subtract, divide, modulus, power, square_root
]
chat_with_tools = llm.bind_tools(tools)
#setting up prompt
ai_message = SystemMessage(content="""You are a helpful assistant tasked with answering questions using a set of tools and reference materials.
You may be provided with a reference set of questions and answers from a retriever.
If the current question is identical to or semantically equivalent to a reference question, or if a reference answer clearly applies, use that reference answer directly.
Otherwise, reason through the question as needed to determine the correct answer.
Your output must follow these formatting rules:
- If the answer is a number, do not use commas or units (unless specifically requested).
- If the answer is a string, do not use articles, abbreviations, or short forms. Write digits in full unless specified otherwise.
- If the answer is a comma-separated list, apply the above rules to each item and include exactly one space after each comma.
- If the question matches a reference question, return the reference answer exactly as it appears.
Do not include any explanation, prefix, or extra text—output only the final answer.
""")
# Generate the AgentState and Agent graph
from langgraph.graph import MessagesState #the same as AgentState
# class AgentState(TypedDict):
# messages: Annotated[list[AnyMessage], add_messages]
def assistant(state: MessagesState):
return {
"messages": [chat_with_tools.invoke(state["messages"])],
}
def retriever(state: MessagesState):
"""Retriever node"""
similar_question = vector_search(state["messages"][0].content)
if similar_question:
example_msg = HumanMessage(
content=f"Here I provide a similar question and answer for reference: \n\n{similar_question}",
)
print(f"Similar question found: {similar_question}")
return {"messages": [ai_message] + state["messages"] + [example_msg]}
else:
# Handle the case when no similar questions are found
print( "No similar question found.")
return {"messages": [ai_message] + state["messages"]}
## The graph
builder = StateGraph(MessagesState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("retriever", retriever)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine how the control flow moves
builder.add_edge(START, "retriever")
builder.add_edge("retriever", "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message requires a tool, route to tools
# Otherwise, provide a direct response
tools_condition,
)
builder.add_edge("tools", "assistant")
alfred = builder.compile()
# messages = [HumanMessage(content="When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?")]
# #messages = [HumanMessage(content="What the remainder of 30 divided by 7?")]
# response = alfred.invoke({"messages": messages})
# print(response['messages'][-1].content) |