AdityaPandey commited on
Commit
3a60aaa
·
verified ·
1 Parent(s): 4598d02

Upload 4 files

Browse files
Files changed (4) hide show
  1. agent.py +67 -0
  2. prompts.py +5 -0
  3. retriever.py +44 -0
  4. tools.py +112 -0
agent.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LangGraph Agent"""
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from langgraph.graph import START, StateGraph, MessagesState
5
+ from langgraph.prebuilt import tools_condition
6
+ from langgraph.prebuilt import ToolNode
7
+ from langchain_core.messages import SystemMessage, HumanMessage
8
+ from prompts import SYS_PROMPT
9
+ from tools import tools
10
+ from retriever import vector_store
11
+ from langchain_openai import ChatOpenAI
12
+
13
+ load_dotenv()
14
+
15
+
16
+
17
+ # System message
18
+ sys_msg = SystemMessage(content=SYS_PROMPT)
19
+
20
+
21
+ # Build graph function
22
+ def build_graph():
23
+ """Build the graph"""
24
+ llm = ChatOpenAI(temperature=0.1, model="gpt-4o", openai_api_key=os.getenv("OPENAI_API_KEY"))
25
+ # Bind tools to LLM
26
+ llm_with_tools = llm.bind_tools(tools)
27
+
28
+ # Node
29
+ def assistant(state: MessagesState):
30
+ """Assistant node"""
31
+ return {"messages": [llm_with_tools.invoke(state["messages"])]}
32
+
33
+ def retriever(state: MessagesState):
34
+ """Retriever node"""
35
+ similar_question = vector_store.similarity_search(state["messages"][0].content, k=3)
36
+ similar_question_content = "\n".join([f"{idx+1}. {doc.page_content}" for idx, doc in enumerate(similar_question)])
37
+ example_msg = HumanMessage(
38
+ content=f"Here I provide some similar questions and answer for reference in case you can't find answer from tool result: \n\n{similar_question_content}",
39
+ )
40
+ return {"messages": [sys_msg] + state["messages"] + [example_msg]}
41
+
42
+ builder = StateGraph(MessagesState)
43
+ builder.add_node("retriever", retriever)
44
+ builder.add_node("assistant", assistant)
45
+ builder.add_node("tools", ToolNode(tools))
46
+ builder.add_edge(START, "retriever")
47
+ builder.add_edge("retriever", "assistant")
48
+ builder.add_conditional_edges(
49
+ "assistant",
50
+ tools_condition,
51
+ )
52
+ builder.add_edge("tools", "assistant")
53
+
54
+ # Compile graph
55
+ return builder.compile()
56
+
57
+ # test
58
+ if __name__ == "__main__":
59
+ question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
60
+ # Build the graph
61
+ graph = build_graph()
62
+ # Run the graph
63
+ messages = [HumanMessage(content=question)]
64
+ messages = graph.invoke({"messages": messages})
65
+ answer = messages['messages'][-1].content
66
+ for m in messages["messages"]:
67
+ m.pretty_print()
prompts.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ SYS_PROMPT = """You are a helpful assistant tasked with answering questions using a set of tools.
2
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
5
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer."""
retriever.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_chroma import Chroma
2
+ from langchain_huggingface import HuggingFaceEmbeddings
3
+ from langchain_chroma import Chroma
4
+ from langchain_core.documents import Document
5
+ import json
6
+
7
+ from uuid import uuid4
8
+
9
+
10
+ print("Loading embedding model...")
11
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
12
+
13
+ vector_store = Chroma(
14
+ collection_name="example_collection",
15
+ embedding_function=embeddings,
16
+ persist_directory="./chroma_langchain_db", # Where to save data locally, remove if not necessary
17
+ )
18
+
19
+ # Load the metadata.jsonl file
20
+ with open('metadata.jsonl', 'r') as jsonl_file:
21
+ json_list = list(jsonl_file)
22
+
23
+ json_QA = []
24
+ for json_str in json_list:
25
+ json_data = json.loads(json_str)
26
+ json_QA.append(json_data)
27
+
28
+ docs = []
29
+ for idx, sample in enumerate(json_QA):
30
+ content = f"Question: {sample['Question']}\n\nFinal answer: {sample['Final answer']}"
31
+ doc = Document(
32
+ page_content=content,
33
+ metadata={
34
+ "source": sample['task_id'],
35
+ },
36
+ id=str(uuid4()),
37
+ )
38
+ docs.append(doc)
39
+
40
+ # Add documents to the vector store
41
+ print("Adding documents to the vector store...")
42
+ vector_store.add_documents(documents=docs)
43
+ del docs
44
+ del json_QA
tools.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.tools import DuckDuckGoSearchResults
2
+ from langchain_community.document_loaders import WikipediaLoader
3
+ from langchain_community.document_loaders import ArxivLoader
4
+
5
+ from langchain_core.documents import Document
6
+
7
+
8
+ SEP_CHAR = "\n\n---\n\n"
9
+
10
+
11
+ def multiply(a: int, b: int) -> int:
12
+ """Multiply two numbers.
13
+ Args:
14
+ a: first int
15
+ b: second int
16
+ """
17
+ return a * b
18
+
19
+
20
+ def add(a: int, b: int) -> int:
21
+ """Add two numbers.
22
+
23
+ Args:
24
+ a: first int
25
+ b: second int
26
+ """
27
+ return a + b
28
+
29
+
30
+ def subtract(a: int, b: int) -> int:
31
+ """Subtract two numbers.
32
+
33
+ Args:
34
+ a: first int
35
+ b: second int
36
+ """
37
+ return a - b
38
+
39
+
40
+ def divide(a: int, b: int) -> int:
41
+ """Divide two numbers.
42
+
43
+ Args:
44
+ a: first int
45
+ b: second int
46
+ """
47
+ if b == 0:
48
+ raise ValueError("Cannot divide by zero.")
49
+ return a / b
50
+
51
+
52
+ def modulus(a: int, b: int) -> int:
53
+ """Get the modulus of two numbers.
54
+
55
+ Args:
56
+ a: first int
57
+ b: second int
58
+ """
59
+ return a % b
60
+
61
+
62
+ def wiki_search(query: str) -> dict:
63
+ """Search Wikipedia for a query and return maximum 2 results.
64
+
65
+ Args:
66
+ query: The search query."""
67
+ search_docs: list[Document] = WikipediaLoader(query=query, load_max_docs=2).load()
68
+ formatted_search_docs = SEP_CHAR.join(
69
+ [
70
+ f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content}\n</Document>'
71
+ for doc in search_docs
72
+ ])
73
+ return formatted_search_docs
74
+
75
+
76
+ def web_search(query: str) -> dict:
77
+ """Search Web for a query and return maximum 3 results.
78
+
79
+ Args:
80
+ query: The search query."""
81
+ search_docs: list[dict] = DuckDuckGoSearchResults(num_results=3, output_format='list').invoke(input=query)
82
+ formatted_search_docs = SEP_CHAR.join(
83
+ [
84
+ f'<Document source="{doc["link"]}" title="{doc.get("title", "")}"/>\n{doc["snippet"]}\n</Document>'
85
+ for doc in search_docs
86
+ ])
87
+ return formatted_search_docs
88
+
89
+
90
+ def arvix_search(query: str) -> dict:
91
+ """Search Arxiv for a query and return maximum 3 result.
92
+
93
+ Args:
94
+ query: The search query."""
95
+ search_docs: list[Document] = ArxivLoader(query=query).load()
96
+ formatted_search_docs = SEP_CHAR.join(
97
+ [
98
+ f'<Document title="{doc.metadata["Title"]}" authors="{doc.metadata.get("Authors", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
99
+ for doc in search_docs
100
+ ])
101
+ return formatted_search_docs
102
+
103
+ tools = [
104
+ multiply,
105
+ add,
106
+ subtract,
107
+ divide,
108
+ modulus,
109
+ wiki_search,
110
+ web_search,
111
+ arvix_search,
112
+ ]