Spaces:
Paused
Paused
File size: 3,734 Bytes
1cbf254 952909f 1cbf254 2624a11 1cbf254 7bc489f 1cbf254 7bc489f 952909f 7bc489f d056c3f 7bc489f 952909f 2624a11 952909f 7bc489f 2624a11 e5beda5 2624a11 1cbf254 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
from typing import List
from langchain_community.document_loaders import PyMuPDFLoader, TextLoader, WebBaseLoader
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_community.vectorstores import Qdrant
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.language_models import BaseLanguageModel
import os
import functools
def process_file(file_or_url):
if isinstance(file_or_url, str) and file_or_url.startswith(('http://', 'https://')):
# Handle URL
loader = WebBaseLoader(file_or_url)
docs = loader.load()
documents.extend(docs)
# save the file temporarily
temp_file = "./"+file_or_url.path
with open(temp_file, "wb") as file:
file.write(file_or_url.content)
file_name = file_or_url.name
documents = []
if file_or_url.path.endswith(".pdf"):
loader = PyMuPDFLoader(temp_file)
docs = loader.load()
documents.extend(docs)
else:
loader = TextLoader(temp_file)
docs = loader.load()
documents.extend(docs)
return documents
def add_to_qdrant(documents, embeddings, qdrant_client, collection_name):
Qdrant.from_documents(
documents,
embeddings,
url=qdrant_client.url,
prefer_grpc=True,
collection_name=collection_name,
)
def agent_node(state, agent, name):
result = agent.invoke(state)
return {"messages": [HumanMessage(content=result["output"], name=name)]}
def create_team_agent(llm, tools, system_prompt, agent_name, team_members):
return create_agent(
llm,
tools,
f"{system_prompt}\nBelow are files currently in your directory:\n{{current_files}}",
team_members
)
def create_agent_node(agent, name):
return functools.partial(agent_node, agent=agent, name=name)
def add_agent_to_graph(graph, agent_name, agent_node):
graph.add_node(agent_name, agent_node)
graph.add_edge(agent_name, "supervisor")
def create_team_supervisor(llm, team_description, team_members):
return create_team_supervisor(
llm,
f"You are a supervisor tasked with managing a conversation between the"
f" following workers: {', '.join(team_members)}. {team_description}"
f" When all workers are finished, you must respond with FINISH.",
team_members
)
def enter_chain(message: str, members: List[str]):
results = {
"messages": [HumanMessage(content=message)],
"team_members": ", ".join(members),
}
return results
def create_team_chain(graph, team_members):
return (
functools.partial(enter_chain, members=team_members)
| graph.compile()
)
def create_agent(
llm: BaseLanguageModel,
tools: list,
system_prompt: str,
) -> str:
"""Create a function-calling agent and add it to the graph."""
system_prompt += ("\nWork autonomously according to your specialty, using the tools available to you."
" Do not ask for clarification."
" Your other team members (and other teams) will collaborate with you with their own specialties."
" You are chosen for a reason! You are one of the following team members: {{team_members}}.")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt,
),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
agent = create_openai_functions_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
return executor |