code
stringlengths 141
79.4k
| apis
sequencelengths 1
23
| extract_api
stringlengths 126
73.2k
|
---|---|---|
'''
This script takes the True/False style questions from the csv file and save the result as another csv file.
This script makes use of Llama model.
Before running this script, make sure to configure the filepaths in config.yaml file.
'''
from langchain import PromptTemplate, LLMChain
from kg_rag.utility import *
import sys
QUESTION_PATH = config_data["TRUE_FALSE_PATH"]
SYSTEM_PROMPT = system_prompts["TRUE_FALSE_QUESTION"]
QUESTION_VS_CONTEXT_SIMILARITY_PERCENTILE_THRESHOLD = float(config_data["QUESTION_VS_CONTEXT_SIMILARITY_PERCENTILE_THRESHOLD"])
QUESTION_VS_CONTEXT_MINIMUM_SIMILARITY = float(config_data["QUESTION_VS_CONTEXT_MINIMUM_SIMILARITY"])
VECTOR_DB_PATH = config_data["VECTOR_DB_PATH"]
NODE_CONTEXT_PATH = config_data["NODE_CONTEXT_PATH"]
SENTENCE_EMBEDDING_MODEL_FOR_NODE_RETRIEVAL = config_data["SENTENCE_EMBEDDING_MODEL_FOR_NODE_RETRIEVAL"]
SENTENCE_EMBEDDING_MODEL_FOR_CONTEXT_RETRIEVAL = config_data["SENTENCE_EMBEDDING_MODEL_FOR_CONTEXT_RETRIEVAL"]
SAVE_PATH = config_data["SAVE_RESULTS_PATH"]
MODEL_NAME = config_data["LLAMA_MODEL_NAME"]
BRANCH_NAME = config_data["LLAMA_MODEL_BRANCH"]
CACHE_DIR = config_data["LLM_CACHE_DIR"]
CONTEXT_VOLUME = 100
save_name = "_".join(MODEL_NAME.split("/")[-1].split("-"))+"_one_hop_true_false_binary_response.csv"
INSTRUCTION = "Context:\n\n{context} \n\nQuestion: {question}"
vectorstore = load_chroma(VECTOR_DB_PATH, SENTENCE_EMBEDDING_MODEL_FOR_NODE_RETRIEVAL)
embedding_function_for_context_retrieval = load_sentence_transformer(SENTENCE_EMBEDDING_MODEL_FOR_CONTEXT_RETRIEVAL)
node_context_df = pd.read_csv(NODE_CONTEXT_PATH)
def main():
start_time = time.time()
llm = llama_model(MODEL_NAME, BRANCH_NAME, CACHE_DIR)
template = get_prompt(INSTRUCTION, SYSTEM_PROMPT)
prompt = PromptTemplate(template=template, input_variables=["context", "question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
question_df = pd.read_csv(QUESTION_PATH)
answer_list = []
for index, row in question_df.iterrows():
question = row["text"]
context = retrieve_context(question, vectorstore, embedding_function_for_context_retrieval, node_context_df, CONTEXT_VOLUME, QUESTION_VS_CONTEXT_SIMILARITY_PERCENTILE_THRESHOLD, QUESTION_VS_CONTEXT_MINIMUM_SIMILARITY)
output = llm_chain.run(context=context, question=question)
answer_list.append((row["text"], row["label"], output))
answer_df = pd.DataFrame(answer_list, columns=["question", "label", "llm_answer"])
answer_df.to_csv(os.path.join(SAVE_PATH, save_name), index=False, header=True)
print("Completed in {} min".format((time.time()-start_time)/60))
if __name__ == "__main__":
main()
| [
"langchain.LLMChain",
"langchain.PromptTemplate"
] | [((1786, 1860), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (1800, 1860), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1877, 1909), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (1885, 1909), False, 'from langchain import PromptTemplate, LLMChain\n')] |
import os
from typing import Any, Callable
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
import registry
from .base import BaseChat, ChatHistory, Response
TEMPLATE = '''
You are a web3 assistant. You help users use web3 apps, such as Uniswap, AAVE, MakerDao, etc. You assist users in achieving their goals with these protocols, by providing users with relevant information, and creating transactions for users. Your responses should sound natural, helpful, cheerful, and engaging, and you should use easy to understand language with explanations for jargon.
Information to help complete your task is below. Only use information below to answer the question, and create a final answer with references ("SOURCES").
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
ALWAYS return a "SOURCES" part in your answer.
-----
{task_info}
-----
User: {question}
Assistant:'''
# TODO: make this few-shot on real examples instead of dummy ones
REPHRASE_TEMPLATE = '''
Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. You should assume that the question is related to web3.
## Example:
Chat History:
User: Who created Ethereum?
Assistant: Vitalik Buterin
Follow Up Input: What about AAVE?
Standalone question: Who created AAVE?
## Example:
Chat History:
User: Who created Ethereum?
Assistant: Vitalik Buterin
User: What about AAVE?
Assistant: Stani Kulechov
Follow Up Input: When was that?
Standalone question: When were Ethereum and AAVE created?
## Example:
Chat History:
User: Who created Ethereum?
Assistant: Vitalik Buterin
Follow Up Input: What is AAVE?
Standalone question: What is AAVE?
## Example:
Chat History:
User: Who created Ethereum?
Assistant: Vitalik Buterin
User: What is AAVE?
Assistant: AAVE is a decentralized finance protocol that allows users to borrow and lend digital assets. It is a protocol built on Ethereum and is powered by a native token, Aave.
Follow Up Input: Bitoin?
Standalone question: What is Bitcoin?
## Example:
Chat History:
{history}
Follow Up Input: {question}
Standalone question:'''
@registry.register_class
class RephraseCitedChat(BaseChat):
def __init__(self, doc_index: Any, top_k: int = 3, show_thinking: bool = True) -> None:
super().__init__()
self.prompt = PromptTemplate(
input_variables=["task_info", "question"],
template=TEMPLATE,
)
self.llm = OpenAI(temperature=0.0, max_tokens=-1)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt)
self.chain.verbose = True
self.doc_index = doc_index
self.top_k = top_k
self.show_thinking = show_thinking
self.rephrase_prompt = PromptTemplate(
input_variables=["history", "question"],
template=REPHRASE_TEMPLATE,
)
self.rephrase_chain = LLMChain(llm=self.llm, prompt=self.rephrase_prompt)
self.rephrase_chain.verbose = True
def receive_input(self, history: ChatHistory, userinput: str, send: Callable) -> None:
userinput = userinput.strip()
if history:
# First rephrase the question
history_string = history.to_string()
question = self.rephrase_chain.run({
"history": history_string.strip(),
"question": userinput,
"stop": "##",
}).strip()
rephrased = True
else:
question = userinput
rephrased = False
if self.show_thinking and rephrased and userinput != question:
send(Response(response="I think you're asking: " + question, still_thinking=True))
docs = self.doc_index.similarity_search(question, k=self.top_k)
task_info = '\n'.join([f'Content: {doc.page_content}\nSource: {doc.metadata["url"]}' for doc in docs])
result = self.chain.run({
"task_info": task_info,
"question": question,
"stop": "User",
})
result = result.strip()
history.add_interaction(userinput, result)
send(Response(result))
| [
"langchain.chains.LLMChain",
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate"
] | [((2418, 2494), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['task_info', 'question']", 'template': 'TEMPLATE'}), "(input_variables=['task_info', 'question'], template=TEMPLATE)\n", (2432, 2494), False, 'from langchain.prompts import PromptTemplate\n'), ((2549, 2587), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'max_tokens': '(-1)'}), '(temperature=0.0, max_tokens=-1)\n', (2555, 2587), False, 'from langchain.llms import OpenAI\n'), ((2609, 2651), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.prompt'}), '(llm=self.llm, prompt=self.prompt)\n', (2617, 2651), False, 'from langchain.chains import LLMChain\n'), ((2823, 2911), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question']", 'template': 'REPHRASE_TEMPLATE'}), "(input_variables=['history', 'question'], template=\n REPHRASE_TEMPLATE)\n", (2837, 2911), False, 'from langchain.prompts import PromptTemplate\n'), ((2972, 3023), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.rephrase_prompt'}), '(llm=self.llm, prompt=self.rephrase_prompt)\n', (2980, 3023), False, 'from langchain.chains import LLMChain\n')] |
from typing import List
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
import langchain.docstore.document as docstore
from loguru import logger
from settings import COLLECTION_NAME, PERSIST_DIRECTORY
from .vortex_pdf_parser import VortexPdfParser
from .vortext_content_iterator import VortexContentIterator
class VortexIngester:
def __init__(self, content_folder: str):
self.content_folder = content_folder
def ingest(self) -> None:
vortex_content_iterator = VortexContentIterator(self.content_folder)
vortex_pdf_parser = VortexPdfParser()
chunks: List[docstore.Document] = []
for document in vortex_content_iterator:
vortex_pdf_parser.set_pdf_file_path(document)
document_chunks = vortex_pdf_parser.clean_text_to_docs()
chunks.extend(document_chunks)
logger.info(f"Extracted {len(chunks)} chunks from {document}")
embeddings = OpenAIEmbeddings(client=None)
logger.info("Loaded embeddings")
vector_store = Chroma.from_documents(
chunks,
embeddings,
collection_name=COLLECTION_NAME,
persist_directory=PERSIST_DIRECTORY,
)
logger.info("Created Chroma vector store")
vector_store.persist()
logger.info("Persisted Chroma vector store")
| [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.vectorstores.Chroma.from_documents"
] | [((985, 1014), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'client': 'None'}), '(client=None)\n', (1001, 1014), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1023, 1055), 'loguru.logger.info', 'logger.info', (['"""Loaded embeddings"""'], {}), "('Loaded embeddings')\n", (1034, 1055), False, 'from loguru import logger\n'), ((1079, 1194), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['chunks', 'embeddings'], {'collection_name': 'COLLECTION_NAME', 'persist_directory': 'PERSIST_DIRECTORY'}), '(chunks, embeddings, collection_name=COLLECTION_NAME,\n persist_directory=PERSIST_DIRECTORY)\n', (1100, 1194), False, 'from langchain.vectorstores import Chroma\n'), ((1259, 1301), 'loguru.logger.info', 'logger.info', (['"""Created Chroma vector store"""'], {}), "('Created Chroma vector store')\n", (1270, 1301), False, 'from loguru import logger\n'), ((1341, 1385), 'loguru.logger.info', 'logger.info', (['"""Persisted Chroma vector store"""'], {}), "('Persisted Chroma vector store')\n", (1352, 1385), False, 'from loguru import logger\n')] |
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.path.append('.')
sys.path.append('..')
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain
from typing import List, Union, Callable
from langchain.schema import AgentAction, AgentFinish
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from utils.configs import configs
os.environ["GOOGLE_CSE_ID"] = configs['tools']['google_cse_id']
os.environ["GOOGLE_API_KEY"] = configs['tools']['google_api_key']
os.environ["OPENAI_API_KEY"] = configs['openai_api_key']
# Set up the base template
template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
def fake_func(inp: str) -> str:
return "foo"
def get_tools(query):
docs = retriever.get_relevant_documents(query)
return [ALL_TOOLS[d.metadata["index"]] for d in docs]
# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
# The template to use
template: str
############## NEW ######################
# The list of tools available
tools_getter: Callable
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
############## NEW ######################
tools = self.tools_getter(kwargs["input"])
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in tools])
print(self.template.format(**kwargs))
return self.template.format(**kwargs)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
if __name__ == '__main__':
# Define which tools the agent can use to answer user queries
search = GoogleSearchAPIWrapper()
search_tool = Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
fake_tools = [
Tool(
name=f"foo-{i}",
func=fake_func,
description=f"a silly function that you can use to get more information about the number {i}"
)
for i in range(99)
]
ALL_TOOLS = [search_tool] + fake_tools
# tools retrieval
tool_lib = configs['demo_agents']['tool_faiss_index']
if os.path.exists(tool_lib):
vector_store = FAISS.load_local(tool_lib, OpenAIEmbeddings())
else:
docs = [Document(page_content=t.description, metadata={"index": i}) for i, t in enumerate(ALL_TOOLS)]
vector_store = FAISS.from_documents(docs, OpenAIEmbeddings())
vector_store.save_local(tool_lib)
retriever = vector_store.as_retriever()
prompt = CustomPromptTemplate(
template=template,
tools_getter=get_tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
output_parser = CustomOutputParser()
model_name = configs['model_name']
llm = OpenAI(model_name=model_name, temperature=0)
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt)
query = "What's the weather in SF?"
tools = get_tools(query)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_executor.run(query)
| [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.LLMSingleActionAgent",
"langchain.LLMChain",
"langchain.GoogleSearchAPIWrapper",
"langchain.schema.Document",
"langchain.agents.Tool",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.OpenAI"
] | [((55, 75), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (70, 75), False, 'import sys\n'), ((76, 97), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (91, 97), False, 'import sys\n'), ((4014, 4038), 'langchain.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (4036, 4038), False, 'from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain\n'), ((4058, 4180), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (4062, 4180), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4577, 4601), 'os.path.exists', 'os.path.exists', (['tool_lib'], {}), '(tool_lib)\n', (4591, 4601), False, 'import os\n'), ((5400, 5444), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'model_name', 'temperature': '(0)'}), '(model_name=model_name, temperature=0)\n', (5406, 5444), False, 'from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain\n'), ((5512, 5544), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (5520, 5544), False, 'from langchain import OpenAI, GoogleSearchAPIWrapper, LLMChain\n'), ((5674, 5800), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (5694, 5800), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((5856, 5930), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': '(True)'}), '(agent=agent, tools=tools, verbose=True)\n', (5890, 5930), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3542, 3581), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (3551, 3581), False, 'import re\n'), ((4233, 4375), 'langchain.agents.Tool', 'Tool', ([], {'name': 'f"""foo-{i}"""', 'func': 'fake_func', 'description': 'f"""a silly function that you can use to get more information about the number {i}"""'}), "(name=f'foo-{i}', func=fake_func, description=\n f'a silly function that you can use to get more information about the number {i}'\n )\n", (4237, 4375), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4653, 4671), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4669, 4671), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((4699, 4758), 'langchain.schema.Document', 'Document', ([], {'page_content': 't.description', 'metadata': "{'index': i}"}), "(page_content=t.description, metadata={'index': i})\n", (4707, 4758), False, 'from langchain.schema import Document\n'), ((4843, 4861), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4859, 4861), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import base64
from email.message import EmailMessage
from typing import List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
class CreateDraftSchema(BaseModel):
"""Input for CreateDraftTool."""
message: str = Field(
...,
description="The message to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class GmailCreateDraft(GmailBaseTool):
"""Tool that creates a draft email for Gmail."""
name: str = "create_gmail_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftSchema] = CreateDraftSchema
def _prepare_draft_message(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
) -> dict:
draft_message = EmailMessage()
draft_message.set_content(message)
draft_message["To"] = ", ".join(to)
draft_message["Subject"] = subject
if cc is not None:
draft_message["Cc"] = ", ".join(cc)
if bcc is not None:
draft_message["Bcc"] = ", ".join(bcc)
encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode()
return {"message": {"raw": encoded_message}}
def _run(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
create_message = self._prepare_draft_message(message, to, subject, cc, bcc)
draft = (
self.api_resource.users()
.drafts()
.create(userId="me", body=create_message)
.execute()
)
output = f'Draft created. Draft Id: {draft["id"]}'
return output
except Exception as e:
raise Exception(f"An error occurred: {e}")
| [
"langchain.pydantic_v1.Field"
] | [((359, 421), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The message to include in the draft."""'}), "(..., description='The message to include in the draft.')\n", (364, 421), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((465, 514), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The list of recipients."""'}), "(..., description='The list of recipients.')\n", (470, 514), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((557, 610), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The subject of the message."""'}), "(..., description='The subject of the message.')\n", (562, 610), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((664, 717), 'langchain.pydantic_v1.Field', 'Field', (['None'], {'description': '"""The list of CC recipients."""'}), "(None, description='The list of CC recipients.')\n", (669, 717), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((772, 826), 'langchain.pydantic_v1.Field', 'Field', (['None'], {'description': '"""The list of BCC recipients."""'}), "(None, description='The list of BCC recipients.')\n", (777, 826), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1390, 1404), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (1402, 1404), False, 'from email.message import EmailMessage\n')] |
from langchain import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.docstore.document import Document
base_prompt = """A profound and powerful writer, you have been given a context text and a search query, {0}. You must write an in-depth analysis, highlighting the significance of {0} in larger context's meaning as well as INCLUDE AS MANY SPECIFIC QUOTATIONS AS POSSIBLE (marked with quotes) from the context and note what page you found them from. Try to prioritize quotations in responses that should be about 1000 characters total.
"""
def summarize_context(search_term: str, contexts: list[str], openai_api_key: str):
try:
if openai_api_key:
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
else:
llm = OpenAI(temperature=0)
docs = [Document(page_content=context) for context in contexts]
# have to do a little weird acrobatics here because summarize cannot take more than one input
# so have to construct the prompt template string after we interpolate the characters
final_prompt = base_prompt.format(search_term) + "\n{text}\n\nSUMMARY:"
final_prompt_template = PromptTemplate(template = final_prompt, input_variables=["text"])
llm_summarize = load_summarize_chain(llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=final_prompt_template, combine_prompt=final_prompt_template)
global_summary = llm_summarize({"input_documents": docs}, return_only_outputs=True)
if (len(global_summary["output_text"]) > 400):
return global_summary["output_text"]
else:
# To augment the summary with more details that don't get lost, we extract some info from the summaries
doc_summaries = [Document(page_content=summary) for summary in global_summary["intermediate_steps"]]
qa_chain = load_qa_chain(llm, chain_type="stuff")
query = "What is the significance of {0} in the context and quotes (include quotations) to back up your reasoning".format(search_term)
additional_context = qa_chain({"input_documents": doc_summaries, "question": query}, return_only_outputs=True)
return global_summary["output_text"] + additional_context["output_text"]
except Exception as e:
print("Error generating summary: ", e)
raise e
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.chains.summarize.load_summarize_chain",
"langchain.docstore.document.Document",
"langchain.llms.OpenAI",
"langchain.PromptTemplate"
] | [((1309, 1372), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'final_prompt', 'input_variables': "['text']"}), "(template=final_prompt, input_variables=['text'])\n", (1323, 1372), False, 'from langchain import PromptTemplate\n'), ((1399, 1561), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""map_reduce"""', 'return_intermediate_steps': '(True)', 'map_prompt': 'final_prompt_template', 'combine_prompt': 'final_prompt_template'}), "(llm, chain_type='map_reduce',\n return_intermediate_steps=True, map_prompt=final_prompt_template,\n combine_prompt=final_prompt_template)\n", (1419, 1561), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((821, 873), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), '(temperature=0, openai_api_key=openai_api_key)\n', (827, 873), False, 'from langchain.llms import OpenAI\n'), ((907, 928), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (913, 928), False, 'from langchain.llms import OpenAI\n'), ((945, 975), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'context'}), '(page_content=context)\n', (953, 975), False, 'from langchain.docstore.document import Document\n'), ((2016, 2054), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2029, 2054), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1909, 1939), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'summary'}), '(page_content=summary)\n', (1917, 1939), False, 'from langchain.docstore.document import Document\n')] |
import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
def generate_response(uploaded_file, openai_api_key, query_text):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever)
return qa.run(query_text)
# Page title
st.set_page_config(page_title='🦜🔗 Ask the Doc App')
st.title('🦜🔗 Ask the Doc App')
# File upload
uploaded_file = st.file_uploader('Upload an article', type='txt')
# Query text
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not uploaded_file)
# Form input and query
result = []
with st.form('myform', clear_on_submit=True):
openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file and query_text))
if submitted and openai_api_key.startswith('sk-'):
with st.spinner('Calculating...'):
response = generate_response(uploaded_file, openai_api_key, query_text)
result.append(response)
del openai_api_key
if len(result):
st.info(response)
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.llms.OpenAI",
"langchain.vectorstores.Chroma.from_documents"
] | [((1040, 1091), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""🦜🔗 Ask the Doc App"""'}), "(page_title='🦜🔗 Ask the Doc App')\n", (1058, 1091), True, 'import streamlit as st\n'), ((1092, 1122), 'streamlit.title', 'st.title', (['"""🦜🔗 Ask the Doc App"""'], {}), "('🦜🔗 Ask the Doc App')\n", (1100, 1122), True, 'import streamlit as st\n'), ((1154, 1203), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload an article"""'], {'type': '"""txt"""'}), "('Upload an article', type='txt')\n", (1170, 1203), True, 'import streamlit as st\n'), ((1230, 1347), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""Please provide a short summary."""', 'disabled': '(not uploaded_file)'}), "('Enter your question:', placeholder=\n 'Please provide a short summary.', disabled=not uploaded_file)\n", (1243, 1347), True, 'import streamlit as st\n'), ((495, 550), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (516, 550), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((646, 693), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (662, 693), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((745, 785), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (766, 785), False, 'from langchain.vectorstores import Chroma\n'), ((1386, 1425), 'streamlit.form', 'st.form', (['"""myform"""'], {'clear_on_submit': '(True)'}), "('myform', clear_on_submit=True)\n", (1393, 1425), True, 'import streamlit as st\n'), ((1448, 1546), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""', 'disabled': '(not (uploaded_file and query_text))'}), "('OpenAI API Key', type='password', disabled=not (\n uploaded_file and query_text))\n", (1461, 1546), True, 'import streamlit as st\n'), ((1558, 1634), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {'disabled': '(not (uploaded_file and query_text))'}), "('Submit', disabled=not (uploaded_file and query_text))\n", (1579, 1634), True, 'import streamlit as st\n'), ((1904, 1921), 'streamlit.info', 'st.info', (['response'], {}), '(response)\n', (1911, 1921), True, 'import streamlit as st\n'), ((916, 953), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (922, 953), False, 'from langchain.llms import OpenAI\n'), ((1702, 1730), 'streamlit.spinner', 'st.spinner', (['"""Calculating..."""'], {}), "('Calculating...')\n", (1712, 1730), True, 'import streamlit as st\n')] |
import re
from typing import List, Union
# Python内置模块,用于格式化和包装文本
import textwrap
import time
from langchain.agents import (
Tool, # 可用工具
AgentExecutor, # Agent执行
LLMSingleActionAgent, # 定义Agent
AgentOutputParser, # 输出结果解析
)
from langchain.prompts import StringPromptTemplate
# LLMChain,包含一个PromptTemplate和一个LLM
from langchain import OpenAI, LLMChain
# Agent执行,Agent结束
from langchain.schema import AgentAction, AgentFinish
# PromptTemplate: 管理LLMs的Prompts
from langchain.prompts import PromptTemplate
from langchain.llms.base import BaseLLM
# 定义了LLM的Prompt Template
CONTEXT_QA_TMPL = """
根据以下提供的信息,回答用户的问题
信息:{context}
问题:{query}
"""
CONTEXT_QA_PROMPT = PromptTemplate(
input_variables=["query", "context"],
template=CONTEXT_QA_TMPL,
)
# 输出结果显示,每行最多60字符,每个字符显示停留0.1秒(动态显示效果)
def output_response(response: str) -> None:
if not response:
exit(0)
# 每行最多60个字符
for line in textwrap.wrap(response, width=60):
for word in line.split():
for char in word:
print(char, end="", flush=True)
time.sleep(0.1) # Add a delay of 0.1 seconds between each character
print(" ", end="", flush=True) # Add a space between each word
print() # Move to the next line after each line is printed
# 遇到这里,这个问题的回答就结束了
print("----------------------------------------------------------------")
# 模拟公司产品和公司介绍的数据源
class TeslaDataSource:
def __init__(self, llm: BaseLLM):
self.llm = llm
# 工具1:产品描述
def find_product_description(self, product_name: str) -> str:
"""模拟公司产品的数据库"""
product_info = {
"Model 3": "具有简洁、动感的外观设计,流线型车身和现代化前脸。定价23.19-33.19万",
"Model Y": "在外观上与Model 3相似,但采用了更高的车身和更大的后备箱空间。定价26.39-36.39万",
"Model X": "拥有独特的翅子门设计和更加大胆的外观风格。定价89.89-105.89万",
}
# 基于产品名称 => 产品描述
return product_info.get(product_name, "没有找到这个产品")
# 工具2:公司介绍
def find_company_info(self, query: str) -> str:
"""模拟公司介绍文档数据库,让llm根据信息回答问题"""
context = """
特斯拉最知名的产品是电动汽车,其中包括Model S、Model 3、Model X和Model Y等多款车型。
特斯拉以其技术创新、高性能和领先的自动驾驶技术而闻名。公司不断推动自动驾驶技术的研发,并在车辆中引入了各种驾驶辅助功能,如自动紧急制动、自适应巡航控制和车道保持辅助等。
"""
# prompt模板 = 上下文context + 用户的query
prompt = CONTEXT_QA_PROMPT.format(query=query, context=context)
# 使用LLM进行推理
return self.llm(prompt)
AGENT_TMPL = """按照给定的格式回答以下问题。你可以使用下面这些工具:
{tools}
回答时需要遵循以下用---括起来的格式:
---
Question: 我需要回答的问题
Thought: 回答这个上述我需要做些什么
Action: "{tool_names}" 中的一个工具名
Action Input: 选择这个工具所需要的输入
Observation: 选择这个工具返回的结果
...(这个 思考/行动/行动输入/观察 可以重复N次)
Thought: 我现在知道最终答案
Final Answer: 原始输入问题的最终答案
---
现在开始回答,记得在给出最终答案前,需要按照指定格式进行一步一步的推理。
Question: {input}
{agent_scratchpad}
"""
class CustomPromptTemplate(StringPromptTemplate):
template: str # 标准模板
tools: List[Tool] # 可使用工具集合
def format(self, **kwargs) -> str:
"""
按照定义的 template,将需要的值都填写进去。
Returns:
str: 填充好后的 template。
"""
# 取出中间步骤并进行执行
intermediate_steps = kwargs.pop("intermediate_steps")
print('intermediate_steps=', intermediate_steps)
print('='*30)
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# 记录下当前想法 => 赋值给agent_scratchpad
kwargs["agent_scratchpad"] = thoughts
# 枚举所有可使用的工具名+工具描述
kwargs["tools"] = "\n".join(
[f"{tool.name}: {tool.description}" for tool in self.tools]
)
# 枚举所有的工具名称
kwargs["tool_names"] = ", ".join(
[tool.name for tool in self.tools]
)
cur_prompt = self.template.format(**kwargs)
#print(cur_prompt)
return cur_prompt
"""
对Agent返回结果进行解析,有两种可能:
1)还在思考中 AgentAction
2)找到了答案 AgentFinal
"""
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
"""
解析 llm 的输出,根据输出文本找到需要执行的决策。
Args:
llm_output (str): _description_
Raises:
ValueError: _description_
Returns:
Union[AgentAction, AgentFinish]: _description_
"""
# 如果句子中包含 Final Answer 则代表已经完成
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# 需要进行 AgentAction
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" # 解析 action_input 和 action
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Agent执行
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
if __name__ == "__main__":
## 定义LLM,需要定义环境变量 OPENAI_API_KEY = XXX
llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo")
# 自有数据
tesla_data_source = TeslaDataSource(llm)
# 定义的Tools
tools = [
Tool(
name="查询产品名称",
func=tesla_data_source.find_product_description,
description="通过产品名称找到产品描述时用的工具,输入的是产品名称",
),
Tool(
name="公司相关信息",
func=tesla_data_source.find_company_info,
description="当用户询问公司相关的问题,可以通过这个工具了解公司信息",
),
]
# 用户定义的模板
agent_prompt = CustomPromptTemplate(
template=AGENT_TMPL,
tools=tools,
input_variables=["input", "intermediate_steps"],
)
# Agent返回结果解析
output_parser = CustomOutputParser()
# 最常用的Chain, 由LLM + PromptTemplate组成
llm_chain = LLMChain(llm=llm, prompt=agent_prompt)
# 定义的工具名称
tool_names = [tool.name for tool in tools]
# 定义Agent = llm_chain + output_parser + tools_names
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)
# 定义Agent执行器 = Agent + Tools
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
# 主过程:可以一直提问下去,直到Ctrl+C
while True:
try:
user_input = input("请输入您的问题:")
response = agent_executor.run(user_input)
output_response(response)
except KeyboardInterrupt:
break | [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.LLMSingleActionAgent",
"langchain.LLMChain",
"langchain.agents.Tool",
"langchain.prompts.PromptTemplate",
"langchain.OpenAI"
] | [((681, 759), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'context']", 'template': 'CONTEXT_QA_TMPL'}), "(input_variables=['query', 'context'], template=CONTEXT_QA_TMPL)\n", (695, 759), False, 'from langchain.prompts import PromptTemplate\n'), ((924, 957), 'textwrap.wrap', 'textwrap.wrap', (['response'], {'width': '(60)'}), '(response, width=60)\n', (937, 957), False, 'import textwrap\n'), ((5123, 5172), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (5129, 5172), False, 'from langchain import OpenAI, LLMChain\n'), ((5881, 5919), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'agent_prompt'}), '(llm=llm, prompt=agent_prompt)\n', (5889, 5919), False, 'from langchain import OpenAI, LLMChain\n'), ((6049, 6175), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (6069, 6175), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((6264, 6338), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': '(True)'}), '(agent=agent, tools=tools, verbose=True)\n', (6298, 6338), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4682, 4721), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (4691, 4721), False, 'import re\n'), ((5266, 5380), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""查询产品名称"""', 'func': 'tesla_data_source.find_product_description', 'description': '"""通过产品名称找到产品描述时用的工具,输入的是产品名称"""'}), "(name='查询产品名称', func=tesla_data_source.find_product_description,\n description='通过产品名称找到产品描述时用的工具,输入的是产品名称')\n", (5270, 5380), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((5433, 5542), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""公司相关信息"""', 'func': 'tesla_data_source.find_company_info', 'description': '"""当用户询问公司相关的问题,可以通过这个工具了解公司信息"""'}), "(name='公司相关信息', func=tesla_data_source.find_company_info, description=\n '当用户询问公司相关的问题,可以通过这个工具了解公司信息')\n", (5437, 5542), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((1087, 1102), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1097, 1102), False, 'import time\n')] |
import os
import os.path as osp
from typing import List
from tqdm import tqdm
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import NLTKTextSplitter
from langchain.vectorstores.faiss import FAISS
import pandas as pd
import nltk
nltk.download('punkt')
PROCESSED_CSV_DIRECTORY = "processed" # Directory to save processed CSV file
def create_docs() -> List[Document]:
docs = []
df = pd.read_csv(osp.join(PROCESSED_CSV_DIRECTORY, 'scraped.csv'))
for index, row in df.iterrows():
doc = Document(page_content=row['text'], metadata={"source": row['url']})
docs.append(doc)
return docs
docs = create_docs()
doc_chunks = []
seen_chunks = set()
total_websites = set()
total_words = 0
splitter = NLTKTextSplitter(chunk_size=1024)
for source in tqdm(docs):
for chunk in splitter.split_text(source.page_content):
if chunk not in seen_chunks:
doc_chunks.append(
Document(page_content=chunk, metadata=source.metadata))
total_words += len(chunk.split())
total_websites.add(source.metadata['source'])
seen_chunks.add(chunk)
print(f'Total websites: {len(total_websites)}')
print(f'Total chunks: {len(doc_chunks)}')
print(f'Total words: {total_words}')
print(f'Avg words per chunk: {int(total_words / len(doc_chunks))}')
print(f'Estimated embedding cost: ${total_words / 0.75 / 1000 * 0.0004:.2f}')
search_index = FAISS.from_documents(doc_chunks, OpenAIEmbeddings(model='text-embedding-ada-002'))
# persistent search index
search_index.save_local("search_index")
| [
"langchain.text_splitter.NLTKTextSplitter",
"langchain.docstore.document.Document",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((316, 338), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (329, 338), False, 'import nltk\n'), ((810, 843), 'langchain.text_splitter.NLTKTextSplitter', 'NLTKTextSplitter', ([], {'chunk_size': '(1024)'}), '(chunk_size=1024)\n', (826, 843), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((858, 868), 'tqdm.tqdm', 'tqdm', (['docs'], {}), '(docs)\n', (862, 868), False, 'from tqdm import tqdm\n'), ((1527, 1575), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (1543, 1575), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((490, 538), 'os.path.join', 'osp.join', (['PROCESSED_CSV_DIRECTORY', '"""scraped.csv"""'], {}), "(PROCESSED_CSV_DIRECTORY, 'scraped.csv')\n", (498, 538), True, 'import os.path as osp\n'), ((591, 658), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': "row['text']", 'metadata': "{'source': row['url']}"}), "(page_content=row['text'], metadata={'source': row['url']})\n", (599, 658), False, 'from langchain.docstore.document import Document\n'), ((1013, 1067), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (1021, 1067), False, 'from langchain.docstore.document import Document\n')] |
"""
相关资料:
llama-cpp-python文档:https://llama-cpp-python.readthedocs.io/en/latest/
前提:
1.安装C++环境
https://developer.microsoft.com/en-us/windows/downloads/windows-sdk/
勾选“使用C++桌面开发”
2.安装模块
pip install llama-cpp-python
pip install llama-cpp-python[server]
3.运行服务
python3 -m llama_cpp.server --model “模型路径”
# http://localhost:8000/v1
"""
import time
import os
import gradio as gr
from langchain.document_loaders import DirectoryLoader
from langchain.llms import ChatGLM
from langchain.llms.llamacpp import LlamaCpp
from langchain.prompts import PromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
# 加载embedding
embedding_model_dict = {
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
"ernie-base": "nghuyong/ernie-3.0-base-zh",
"text2vec": "GanymedeNil/text2vec-large-chinese",
"text2vec2": "uer/sbert-base-chinese-nli",
"text2vec3": "shibing624/text2vec-base-chinese",
}
def load_documents(directory="documents"):
"""
加载books下的文件,进行拆分
:param directory:
:return:
"""
loader = DirectoryLoader(directory)
documents = loader.load()
text_spliter = CharacterTextSplitter(chunk_size=256, chunk_overlap=0)
split_docs = text_spliter.split_documents(documents)
return split_docs
def load_embedding_model(model_name="ernie-tiny"):
"""
加载embedding模型
:param model_name:
:return:
"""
encode_kwargs = {"normalize_embeddings": False}
model_kwargs = {"device": "cuda:0"}
return HuggingFaceEmbeddings(
model_name=embedding_model_dict[model_name],
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
def store_chroma(docs, embeddings, persist_directory="VectorStore"):
"""
讲文档向量化,存入向量数据库
:param docs:
:param embeddings:
:param persist_directory:
:return:
"""
db = Chroma.from_documents(docs, embeddings, persist_directory=persist_directory)
db.persist()
return db
# 加载embedding模型
embeddings = load_embedding_model('text2vec3')
# 加载数据库
if not os.path.exists('VectorStore'):
documents = load_documents()
db = store_chroma(documents, embeddings)
else:
db = Chroma(persist_directory='VectorStore', embedding_function=embeddings)
# 创建llm
# llm = ChatGLM(
# endpoint_url='http://127.0.0.1:8000',
# max_token=80000,
# top_p=0.9
# )
llm = LlamaCpp(
model_path=r"G:\models\llama2\llama-2-7b-chat-q4\llama-2-7b-chat.Q4_0.gguf",
n_ctx=2048,
stop=['Human:']
)
# 创建qa
QA_CHAIN_PROMPT = PromptTemplate.from_template("""Human:
根据下面的上下文(context)内容回答问题。
如果你不知道答案,就回答不知道,不要试图编造答案。
答案最多3句话,保持答案简介。
总是在答案结束时说”谢谢你的提问!“
{context}
问题:{question}
Assistant:
""")
retriever = db.as_retriever()
qa = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
verbose=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
)
def add_text(history, text):
history = history + [(text, None)]
return history, gr.update(value="", interactive=False)
def add_file(history, file):
"""
上传文件后的回调函数,将上传的文件向量化存入数据库
:param history:
:param file:
:return:
"""
global qa
directory = os.path.dirname(file.name)
documents = load_documents(directory)
db = store_chroma(documents, embeddings)
retriever = db.as_retriever()
qa.retriever = retriever
history = history + [((file.name,), None)]
return history
def bot(history):
"""
聊天调用的函数
:param history:
:return:
"""
message = history[-1][0]
if isinstance(message, tuple):
response = "文件上传成功!!"
else:
response = qa({"query": message})['result']
history[-1][1] = ""
for character in response:
history[-1][1] += character
time.sleep(0.05)
yield history
with gr.Blocks() as demo:
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
bubble_full_width=False,
avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))),
)
with gr.Row():
txt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter, or upload an image",
container=False,
)
btn = gr.UploadButton("📁", file_types=['txt'])
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
bot, chatbot, chatbot
)
txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
bot, chatbot, chatbot
)
demo.queue()
if __name__ == "__main__":
demo.launch()
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.document_loaders.DirectoryLoader",
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.llms.llamacpp.LlamaCpp",
"langchain.vectorstores.Chroma.from_documents",
"langchain.prompts.PromptTemplate.from_template",
"langchain.vectorstores.Chroma"
] | [((2537, 2663), 'langchain.llms.llamacpp.LlamaCpp', 'LlamaCpp', ([], {'model_path': '"""G:\\\\models\\\\llama2\\\\llama-2-7b-chat-q4\\\\llama-2-7b-chat.Q4_0.gguf"""', 'n_ctx': '(2048)', 'stop': "['Human:']"}), "(model_path=\n 'G:\\\\models\\\\llama2\\\\llama-2-7b-chat-q4\\\\llama-2-7b-chat.Q4_0.gguf',\n n_ctx=2048, stop=['Human:'])\n", (2545, 2663), False, 'from langchain.llms.llamacpp import LlamaCpp\n'), ((2691, 2865), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""Human:\n根据下面的上下文(context)内容回答问题。\n如果你不知道答案,就回答不知道,不要试图编造答案。\n答案最多3句话,保持答案简介。\n总是在答案结束时说”谢谢你的提问!“\n{context}\n问题:{question}\nAssistant:\n"""'], {}), '(\n """Human:\n根据下面的上下文(context)内容回答问题。\n如果你不知道答案,就回答不知道,不要试图编造答案。\n答案最多3句话,保持答案简介。\n总是在答案结束时说”谢谢你的提问!“\n{context}\n问题:{question}\nAssistant:\n"""\n )\n', (2719, 2865), False, 'from langchain.prompts import PromptTemplate\n'), ((2891, 3013), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm=llm, retriever=retriever, verbose=True,\n chain_type_kwargs={'prompt': QA_CHAIN_PROMPT})\n", (2918, 3013), False, 'from langchain.chains import RetrievalQA\n'), ((1249, 1275), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['directory'], {}), '(directory)\n', (1264, 1275), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((1325, 1379), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(0)'}), '(chunk_size=256, chunk_overlap=0)\n', (1346, 1379), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1685, 1811), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'embedding_model_dict[model_name]', 'model_kwargs': 'model_kwargs', 'encode_kwargs': 'encode_kwargs'}), '(model_name=embedding_model_dict[model_name],\n model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)\n', (1706, 1811), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((2036, 2112), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['docs', 'embeddings'], {'persist_directory': 'persist_directory'}), '(docs, embeddings, persist_directory=persist_directory)\n', (2057, 2112), False, 'from langchain.vectorstores import Chroma\n'), ((2224, 2253), 'os.path.exists', 'os.path.exists', (['"""VectorStore"""'], {}), "('VectorStore')\n", (2238, 2253), False, 'import os\n'), ((2348, 2418), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""VectorStore"""', 'embedding_function': 'embeddings'}), "(persist_directory='VectorStore', embedding_function=embeddings)\n", (2354, 2418), False, 'from langchain.vectorstores import Chroma\n'), ((3314, 3340), 'os.path.dirname', 'os.path.dirname', (['file.name'], {}), '(file.name)\n', (3329, 3340), False, 'import os\n'), ((3939, 3950), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (3948, 3950), True, 'import gradio as gr\n'), ((3118, 3156), 'gradio.update', 'gr.update', ([], {'value': '""""""', 'interactive': '(False)'}), "(value='', interactive=False)\n", (3127, 3156), True, 'import gradio as gr\n'), ((3893, 3909), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3903, 3909), False, 'import time\n'), ((4161, 4169), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (4167, 4169), True, 'import gradio as gr\n'), ((4185, 4306), 'gradio.Textbox', 'gr.Textbox', ([], {'scale': '(4)', 'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""', 'container': '(False)'}), "(scale=4, show_label=False, placeholder=\n 'Enter text and press enter, or upload an image', container=False)\n", (4195, 4306), True, 'import gradio as gr\n'), ((4375, 4415), 'gradio.UploadButton', 'gr.UploadButton', (['"""📁"""'], {'file_types': "['txt']"}), "('📁', file_types=['txt'])\n", (4390, 4415), True, 'import gradio as gr\n'), ((4564, 4591), 'gradio.update', 'gr.update', ([], {'interactive': '(True)'}), '(interactive=True)\n', (4573, 4591), True, 'import gradio as gr\n'), ((4101, 4126), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4116, 4126), False, 'import os\n')] |
from enum import Enum
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Union
from langchain.utilities.redis import TokenEscaper
# disable mypy error for dunder method overrides
# mypy: disable-error-code="override"
class RedisFilterOperator(Enum):
EQ = 1
NE = 2
LT = 3
GT = 4
LE = 5
GE = 6
OR = 7
AND = 8
LIKE = 9
IN = 10
class RedisFilter:
@staticmethod
def text(field: str) -> "RedisText":
return RedisText(field)
@staticmethod
def num(field: str) -> "RedisNum":
return RedisNum(field)
@staticmethod
def tag(field: str) -> "RedisTag":
return RedisTag(field)
class RedisFilterField:
escaper: "TokenEscaper" = TokenEscaper()
OPERATORS: Dict[RedisFilterOperator, str] = {}
def __init__(self, field: str):
self._field = field
self._value: Any = None
self._operator: RedisFilterOperator = RedisFilterOperator.EQ
def equals(self, other: "RedisFilterField") -> bool:
if not isinstance(other, type(self)):
return False
return self._field == other._field and self._value == other._value
def _set_value(
self, val: Any, val_type: type, operator: RedisFilterOperator
) -> None:
# check that the operator is supported by this class
if operator not in self.OPERATORS:
raise ValueError(
f"Operator {operator} not supported by {self.__class__.__name__}. "
+ f"Supported operators are {self.OPERATORS.values()}"
)
if not isinstance(val, val_type):
raise TypeError(
f"Right side argument passed to operator {self.OPERATORS[operator]} "
f"with left side "
f"argument {self.__class__.__name__} must be of type {val_type}"
)
self._value = val
self._operator = operator
def check_operator_misuse(func: Callable) -> Callable:
@wraps(func)
def wrapper(instance: Any, *args: List[Any], **kwargs: Dict[str, Any]) -> Any:
# Extracting 'other' from positional arguments or keyword arguments
other = kwargs.get("other") if "other" in kwargs else None
if not other:
for arg in args:
if isinstance(arg, type(instance)):
other = arg
break
if isinstance(other, type(instance)):
raise ValueError(
"Equality operators are overridden for FilterExpression creation. Use "
".equals() for equality checks"
)
return func(instance, *args, **kwargs)
return wrapper
class RedisTag(RedisFilterField):
"""A RedisTag is a RedisFilterField representing a tag in a Redis index."""
OPERATORS: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "==",
RedisFilterOperator.NE: "!=",
RedisFilterOperator.IN: "==",
}
OPERATOR_MAP: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "@%s:{%s}",
RedisFilterOperator.NE: "(-@%s:{%s})",
RedisFilterOperator.IN: "@%s:{%s}",
}
def __init__(self, field: str):
"""Create a RedisTag FilterField
Args:
field (str): The name of the RedisTag field in the index to be queried
against.
"""
super().__init__(field)
def _set_tag_value(
self, other: Union[List[str], str], operator: RedisFilterOperator
) -> None:
if isinstance(other, list):
if not all(isinstance(tag, str) for tag in other):
raise ValueError("All tags must be strings")
else:
other = [other]
self._set_value(other, list, operator)
@check_operator_misuse
def __eq__(self, other: Union[List[str], str]) -> "RedisFilterExpression":
"""Create a RedisTag equality filter expression
Args:
other (Union[List[str], str]): The tag(s) to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") == "nike"
"""
self._set_tag_value(other, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
@check_operator_misuse
def __ne__(self, other: Union[List[str], str]) -> "RedisFilterExpression":
"""Create a RedisTag inequality filter expression
Args:
other (Union[List[str], str]): The tag(s) to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") != "nike"
"""
self._set_tag_value(other, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
@property
def _formatted_tag_value(self) -> str:
return "|".join([self.escaper.escape(tag) for tag in self._value])
def __str__(self) -> str:
if not self._value:
raise ValueError(
f"Operator must be used before calling __str__. Operators are "
f"{self.OPERATORS.values()}"
)
"""Return the Redis Query syntax for a RedisTag filter expression"""
return self.OPERATOR_MAP[self._operator] % (
self._field,
self._formatted_tag_value,
)
class RedisNum(RedisFilterField):
"""A RedisFilterField representing a numeric field in a Redis index."""
OPERATORS: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "==",
RedisFilterOperator.NE: "!=",
RedisFilterOperator.LT: "<",
RedisFilterOperator.GT: ">",
RedisFilterOperator.LE: "<=",
RedisFilterOperator.GE: ">=",
}
OPERATOR_MAP: Dict[RedisFilterOperator, str] = {
RedisFilterOperator.EQ: "@%s:[%i %i]",
RedisFilterOperator.NE: "(-@%s:[%i %i])",
RedisFilterOperator.GT: "@%s:[(%i +inf]",
RedisFilterOperator.LT: "@%s:[-inf (%i]",
RedisFilterOperator.GE: "@%s:[%i +inf]",
RedisFilterOperator.LE: "@%s:[-inf %i]",
}
def __str__(self) -> str:
"""Return the Redis Query syntax for a Numeric filter expression"""
if not self._value:
raise ValueError(
f"Operator must be used before calling __str__. Operators are "
f"{self.OPERATORS.values()}"
)
if (
self._operator == RedisFilterOperator.EQ
or self._operator == RedisFilterOperator.NE
):
return self.OPERATOR_MAP[self._operator] % (
self._field,
self._value,
self._value,
)
else:
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
@check_operator_misuse
def __eq__(self, other: int) -> "RedisFilterExpression":
"""Create a Numeric equality filter expression
Args:
other (int): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("zipcode") == 90210
"""
self._set_value(other, int, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
@check_operator_misuse
def __ne__(self, other: int) -> "RedisFilterExpression":
"""Create a Numeric inequality filter expression
Args:
other (int): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("zipcode") != 90210
"""
self._set_value(other, int, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
def __gt__(self, other: int) -> "RedisFilterExpression":
"""Create a RedisNumeric greater than filter expression
Args:
other (int): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") > 18
"""
self._set_value(other, int, RedisFilterOperator.GT)
return RedisFilterExpression(str(self))
def __lt__(self, other: int) -> "RedisFilterExpression":
"""Create a Numeric less than filter expression
Args:
other (int): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") < 18
"""
self._set_value(other, int, RedisFilterOperator.LT)
return RedisFilterExpression(str(self))
def __ge__(self, other: int) -> "RedisFilterExpression":
"""Create a Numeric greater than or equal to filter expression
Args:
other (int): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") >= 18
"""
self._set_value(other, int, RedisFilterOperator.GE)
return RedisFilterExpression(str(self))
def __le__(self, other: int) -> "RedisFilterExpression":
"""Create a Numeric less than or equal to filter expression
Args:
other (int): The value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") <= 18
"""
self._set_value(other, int, RedisFilterOperator.LE)
return RedisFilterExpression(str(self))
class RedisText(RedisFilterField):
"""A RedisText is a RedisFilterField representing a text field in a Redis index."""
OPERATORS = {
RedisFilterOperator.EQ: "==",
RedisFilterOperator.NE: "!=",
RedisFilterOperator.LIKE: "%",
}
OPERATOR_MAP = {
RedisFilterOperator.EQ: '@%s:"%s"',
RedisFilterOperator.NE: '(-@%s:"%s")',
RedisFilterOperator.LIKE: "@%s:%s",
}
@check_operator_misuse
def __eq__(self, other: str) -> "RedisFilterExpression":
"""Create a RedisText equality filter expression
Args:
other (str): The text value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisText
>>> filter = RedisText("job") == "engineer"
"""
self._set_value(other, str, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
@check_operator_misuse
def __ne__(self, other: str) -> "RedisFilterExpression":
"""Create a RedisText inequality filter expression
Args:
other (str): The text value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisText
>>> filter = RedisText("job") != "engineer"
"""
self._set_value(other, str, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
def __mod__(self, other: str) -> "RedisFilterExpression":
"""Create a RedisText like filter expression
Args:
other (str): The text value to filter on.
Example:
>>> from langchain.vectorstores.redis import RedisText
>>> filter = RedisText("job") % "engineer"
"""
self._set_value(other, str, RedisFilterOperator.LIKE)
return RedisFilterExpression(str(self))
def __str__(self) -> str:
if not self._value:
raise ValueError(
f"Operator must be used before calling __str__. Operators are "
f"{self.OPERATORS.values()}"
)
try:
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
except KeyError:
raise Exception("Invalid operator")
class RedisFilterExpression:
"""A RedisFilterExpression is a logical expression of RedisFilterFields.
RedisFilterExpressions can be combined using the & and | operators to create
complex logical expressions that evaluate to the Redis Query language.
This presents an interface by which users can create complex queries
without having to know the Redis Query language.
Filter expressions are not initialized directly. Instead they are built
by combining RedisFilterFields using the & and | operators.
Examples:
>>> from langchain.vectorstores.redis import RedisTag, RedisNum
>>> brand_is_nike = RedisTag("brand") == "nike"
>>> price_is_under_100 = RedisNum("price") < 100
>>> filter = brand_is_nike & price_is_under_100
>>> print(str(filter))
(@brand:{nike} @price:[-inf (100)])
"""
def __init__(
self,
_filter: Optional[str] = None,
operator: Optional[RedisFilterOperator] = None,
left: Optional["RedisFilterExpression"] = None,
right: Optional["RedisFilterExpression"] = None,
):
self._filter = _filter
self._operator = operator
self._left = left
self._right = right
def __and__(self, other: "RedisFilterExpression") -> "RedisFilterExpression":
return RedisFilterExpression(
operator=RedisFilterOperator.AND, left=self, right=other
)
def __or__(self, other: "RedisFilterExpression") -> "RedisFilterExpression":
return RedisFilterExpression(
operator=RedisFilterOperator.OR, left=self, right=other
)
def __str__(self) -> str:
# top level check that allows recursive calls to __str__
if not self._filter and not self._operator:
raise ValueError("Improperly initialized RedisFilterExpression")
# allow for single filter expression without operators as last
# expression in the chain might not have an operator
if self._operator:
operator_str = " | " if self._operator == RedisFilterOperator.OR else " "
return f"({str(self._left)}{operator_str}{str(self._right)})"
# check that base case, the filter is set
if not self._filter:
raise ValueError("Improperly initialized RedisFilterExpression")
return self._filter
| [
"langchain.utilities.redis.TokenEscaper"
] | [((747, 761), 'langchain.utilities.redis.TokenEscaper', 'TokenEscaper', ([], {}), '()\n', (759, 761), False, 'from langchain.utilities.redis import TokenEscaper\n'), ((2002, 2013), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2007, 2013), False, 'from functools import wraps\n')] |
from langchain.tools import tool
from graph_chain import get_results
@tool("graph-tool")
def graph_tool(query:str) -> str:
"""Tool for returning aggregations of Manager or Company or Industry data or if answer is dependent on relationships between a Company and other objects. Use this tool second and to verify results of vector-graph-tool.
"""
return get_results(query) | [
"langchain.tools.tool"
] | [((71, 89), 'langchain.tools.tool', 'tool', (['"""graph-tool"""'], {}), "('graph-tool')\n", (75, 89), False, 'from langchain.tools import tool\n'), ((366, 384), 'graph_chain.get_results', 'get_results', (['query'], {}), '(query)\n', (377, 384), False, 'from graph_chain import get_results\n')] |
import matplotlib.pyplot as plt
import numpy as np
import openai
import os
import pyaudio
import pyttsx3
import threading
import tkinter as tk
import queue
import wave
import whisper
from langchain import OpenAI, SQLDatabase
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.agents import create_sql_agent
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from tkinter import scrolledtext
AUDIO_FORMAT = pyaudio.paInt16
CHANNELS = 1
FRAME_RATE = 16000
CHUNK = 1024
# Get OpenAI API key
openai.api_key = os.environ["OPENAI_API_KEY"]
s2_password = "<password>"
s2_host = "<host>"
s2_db = "timeseries_db"
db = SQLDatabase.from_uri(f"mysql+pymysql://admin:{s2_password}@{s2_host}:3306/{s2_db}")
llm = OpenAI(
model_name = "gpt-3.5-turbo-instruct",
temperature = 0,
verbose = False
)
toolkit = SQLDatabaseToolkit(db = db, llm = llm)
agent_executor = create_sql_agent(
llm = OpenAI(
model_name = "gpt-3.5-turbo-instruct",
temperature = 0
),
toolkit = toolkit,
verbose = False
)
model = whisper.load_model("base.en")
# GUI class
class AudioRecorderGUI:
def __init__(self, root):
self.root = root
self.root.title("Audio Recorder")
self.start_button = tk.Button(root, text = "Start Recording", command = self.start_recording)
self.start_button.pack(pady = 10)
self.stop_button = tk.Button(root, text = "Stop Recording", command = self.stop_recording, state = tk.DISABLED)
self.stop_button.pack(pady = 5)
self.exit_button = tk.Button(root, text = "Exit", command = self.exit_program)
self.exit_button.pack(pady = 5)
self.transcription_box = scrolledtext.ScrolledText(root, height = 5, width = 60)
self.transcription_box.pack(padx = 10, pady = 10)
self.recording_timer = None
self.audio_filename = "audio_recording.wav"
self.fig, self.ax = plt.subplots()
self.canvas = FigureCanvasTkAgg(self.fig, master = self.root)
self.canvas_widget = self.canvas.get_tk_widget()
self.canvas_widget.pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
self.audio_array = np.array([])
self.update_waveform = False
def update_waveform_plot(self):
while self.update_waveform and not self.stop_event.is_set():
data = self.audio_queue.queue[-1] if not self.audio_queue.empty() else np.zeros(1024)
self.audio_array = np.frombuffer(data, dtype = np.int16)
self.ax.clear()
self.ax.plot(self.audio_array, color = "r")
self.ax.set_title("Real-Time Audio Waveform")
self.ax.set_xlabel("Time (samples)")
self.ax.set_ylabel("Amplitude")
self.ax.set_ylim(-128, 128)
self.ax.set_xlim(0, len(self.audio_array))
self.canvas.draw()
self.root.update()
def speak_audio(self, text):
engine = pyttsx3.init()
engine.setProperty("voice", "english_us")
engine.setProperty("rate", 150)
engine.say(text)
engine.runAndWait()
engine.stop()
def start_recording(self):
self.transcription_box.delete(1.0, tk.END)
self.stop_event = threading.Event()
self.audio_queue = queue.Queue()
self.record_thread = threading.Thread(target = self.record_audio)
self.record_thread.start()
self.recording_timer = self.root.after(20000, self.stop_recording)
self.update_waveform = True
self.update_waveform_plot_thread = threading.Thread(target = self.update_waveform_plot)
self.update_waveform_plot_thread.start()
self.start_button.config(state = tk.DISABLED)
self.stop_button.config(state = tk.NORMAL)
def stop_recording(self):
if self.recording_timer:
self.root.after_cancel(self.recording_timer)
self.recording_timer = None
self.stop_event.set()
self.record_thread.join()
transcription = self.transcribe_audio(self.audio_filename)
self.transcription_box.insert(
tk.END,
"Transcription:\n" + transcription + "\n"
)
speak_thread = threading.Thread(target = self.speak_audio, args = (agent_executor.run(transcription),))
speak_thread.start()
self.start_button.config(state = tk.NORMAL)
self.stop_button.config(state = tk.DISABLED)
def record_audio(self):
audio = pyaudio.PyAudio()
stream = audio.open(
format = AUDIO_FORMAT,
channels = CHANNELS,
rate = FRAME_RATE,
input = True,
frames_per_buffer = CHUNK
)
while not self.stop_event.is_set():
data = stream.read(CHUNK)
self.audio_queue.put(data)
stream.stop_stream()
stream.close()
audio.terminate()
with wave.open(self.audio_filename, "wb") as wf:
wf.setnchannels(CHANNELS)
wf.setsampwidth(audio.get_sample_size(AUDIO_FORMAT))
wf.setframerate(FRAME_RATE)
wf.writeframes(b''.join(list(self.audio_queue.queue)))
def transcribe_audio(self, filename):
with open(filename, "rb") as audio_file:
# transcript = openai.Audio.transcribe(
# model = "whisper-1",
# file = audio_file,
# language = "en"
# )
transcript = model.transcribe(filename)
return transcript["text"].strip()
def exit_program(self):
self.root.destroy()
def main():
root = tk.Tk()
app = AudioRecorderGUI(root)
root.mainloop()
if __name__ == "__main__":
main()
| [
"langchain.agents.agent_toolkits.SQLDatabaseToolkit",
"langchain.SQLDatabase.from_uri",
"langchain.OpenAI"
] | [((652, 740), 'langchain.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['f"""mysql+pymysql://admin:{s2_password}@{s2_host}:3306/{s2_db}"""'], {}), "(\n f'mysql+pymysql://admin:{s2_password}@{s2_host}:3306/{s2_db}')\n", (672, 740), False, 'from langchain import OpenAI, SQLDatabase\n'), ((743, 816), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""', 'temperature': '(0)', 'verbose': '(False)'}), "(model_name='gpt-3.5-turbo-instruct', temperature=0, verbose=False)\n", (749, 816), False, 'from langchain import OpenAI, SQLDatabase\n'), ((848, 882), 'langchain.agents.agent_toolkits.SQLDatabaseToolkit', 'SQLDatabaseToolkit', ([], {'db': 'db', 'llm': 'llm'}), '(db=db, llm=llm)\n', (866, 882), False, 'from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n'), ((1073, 1102), 'whisper.load_model', 'whisper.load_model', (['"""base.en"""'], {}), "('base.en')\n", (1091, 1102), False, 'import whisper\n'), ((5634, 5641), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (5639, 5641), True, 'import tkinter as tk\n'), ((933, 991), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo-instruct', temperature=0)\n", (939, 991), False, 'from langchain import OpenAI, SQLDatabase\n'), ((1266, 1335), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Start Recording"""', 'command': 'self.start_recording'}), "(root, text='Start Recording', command=self.start_recording)\n", (1275, 1335), True, 'import tkinter as tk\n'), ((1410, 1501), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Stop Recording"""', 'command': 'self.stop_recording', 'state': 'tk.DISABLED'}), "(root, text='Stop Recording', command=self.stop_recording, state=\n tk.DISABLED)\n", (1419, 1501), True, 'import tkinter as tk\n'), ((1571, 1626), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Exit"""', 'command': 'self.exit_program'}), "(root, text='Exit', command=self.exit_program)\n", (1580, 1626), True, 'import tkinter as tk\n'), ((1705, 1756), 'tkinter.scrolledtext.ScrolledText', 'scrolledtext.ScrolledText', (['root'], {'height': '(5)', 'width': '(60)'}), '(root, height=5, width=60)\n', (1730, 1756), False, 'from tkinter import scrolledtext\n'), ((1937, 1951), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1949, 1951), True, 'import matplotlib.pyplot as plt\n'), ((1974, 2019), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.fig'], {'master': 'self.root'}), '(self.fig, master=self.root)\n', (1991, 2019), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((2181, 2193), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2189, 2193), True, 'import numpy as np\n'), ((2948, 2962), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (2960, 2962), False, 'import pyttsx3\n'), ((3237, 3254), 'threading.Event', 'threading.Event', ([], {}), '()\n', (3252, 3254), False, 'import threading\n'), ((3282, 3295), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (3293, 3295), False, 'import queue\n'), ((3326, 3368), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.record_audio'}), '(target=self.record_audio)\n', (3342, 3368), False, 'import threading\n'), ((3562, 3612), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.update_waveform_plot'}), '(target=self.update_waveform_plot)\n', (3578, 3612), False, 'import threading\n'), ((4496, 4513), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (4511, 4513), False, 'import pyaudio\n'), ((2467, 2502), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.int16'}), '(data, dtype=np.int16)\n', (2480, 2502), True, 'import numpy as np\n'), ((4931, 4967), 'wave.open', 'wave.open', (['self.audio_filename', '"""wb"""'], {}), "(self.audio_filename, 'wb')\n", (4940, 4967), False, 'import wave\n'), ((2421, 2435), 'numpy.zeros', 'np.zeros', (['(1024)'], {}), '(1024)\n', (2429, 2435), True, 'import numpy as np\n')] |
from typing import Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.base import BaseTool
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class WriteFileInput(BaseModel):
"""Input for WriteFileTool."""
file_path: str = Field(..., description="name of file")
text: str = Field(..., description="text to write to file")
append: bool = Field(
default=False, description="Whether to append to an existing file."
)
class WriteFileTool(BaseFileToolMixin, BaseTool):
"""Tool that writes a file to disk."""
name: str = "write_file"
args_schema: Type[BaseModel] = WriteFileInput
description: str = "Write file to disk"
def _run(
self,
file_path: str,
text: str,
append: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
write_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
try:
write_path.parent.mkdir(exist_ok=True, parents=False)
mode = "a" if append else "w"
with write_path.open(mode, encoding="utf-8") as f:
f.write(text)
return f"File written successfully to {file_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [
"langchain.pydantic_v1.Field",
"langchain.tools.file_management.utils.INVALID_PATH_TEMPLATE.format"
] | [((415, 453), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""name of file"""'}), "(..., description='name of file')\n", (420, 453), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((470, 517), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""text to write to file"""'}), "(..., description='text to write to file')\n", (475, 517), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((537, 611), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to append to an existing file."""'}), "(default=False, description='Whether to append to an existing file.')\n", (542, 611), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1153, 1220), 'langchain.tools.file_management.utils.INVALID_PATH_TEMPLATE.format', 'INVALID_PATH_TEMPLATE.format', ([], {'arg_name': '"""file_path"""', 'value': 'file_path'}), "(arg_name='file_path', value=file_path)\n", (1181, 1220), False, 'from langchain.tools.file_management.utils import INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError\n')] |
import argparse
from typing import Optional
from langchain.llms.ollama import Ollama
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from termcolor import colored
class RubberDuck:
"""
This class is a wrapper around the Ollama model.
"""
def __init__(self, model: str = "codellama") -> None:
"""
This function initializes the RubberDuck class.
Args:
model (str, optional): The model to be used. Defaults to "codellama".
"""
self.system_prompt = """You are a pair progamming tool to help developers debug, think through design, and write code.
Help the user think through their approach and provide feedback on the code."""
self.llm = Ollama(model=model, callbacks=[StreamingStdOutCallbackHandler()], system=self.system_prompt)
def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: bool = False) -> None:
"""
This function calls the Ollama model to provide feedback on the given code.
Args:
code (str): The code to be reviewed.
prompt (Optional[str]): Custom prompt to be used. Defaults to None.
"""
if prompt is None:
prompt = "review the code, find any issues if any, suggest cleanups if any:" + code
else:
prompt = prompt + code
self.llm(prompt)
if chain:
while(True):
prompt = input(colored("\n What's on your mind? \n ", 'green'))
self.llm(prompt)
def read_files_from_dir(directory: str) -> str:
"""
This function reads all the files from a directory and returns the concatenated string.
Args:
directory (str): The directory to be processed.
Returns:
str: The concatenated string of all the files.
"""
import os
files = os.listdir(directory)
code = ""
for file in files:
code += open(directory + "/" + file).read()
return code
def ducky() -> None:
"""
This function parses the command line arguments and calls the Ollama model.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", "-p", help="Custom prompt to be used", default=None)
parser.add_argument("--file", "-f", help="The file to be processed", default=None)
parser.add_argument("--directory", "-d", help="The directory to be processed", default=None)
parser.add_argument("--chain", "-c", help="Chain the output of the previous command to the next command", action="store_true", default=False)
parser.add_argument("--model", "-m", help="The model to be used", default="codellama")
args, _ = parser.parse_known_args()
# My testing has shown that the codellama:7b-python is good for returning python code from the program.
# My intention with this tool was to give more general feedback and have back a back and forth with the user.
rubber_ducky = RubberDuck(model=args.model)
if args.file is None and args.directory is None:
if args.chain:
while(True):
prompt = input(colored("\n What's on your mind? \n ", 'green'))
rubber_ducky.call_llama(prompt=prompt, chain=args.chain)
else:
prompt = input(colored("\n What's on your mind? \n ", 'green'))
rubber_ducky.call_llama(prompt=prompt, chain=args.chain)
if args.file is not None:
code = open(args.file).read()
rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain)
elif args.directory is not None:
code = read_files_from_dir(args.directory)
rubber_ducky.call_llama(code=code, prompt=args.prompt, chain=args.chain)
if __name__ == "__main__":
ducky()
| [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((1887, 1908), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1897, 1908), False, 'import os\n'), ((2146, 2171), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2169, 2171), False, 'import argparse\n'), ((3288, 3337), 'termcolor.colored', 'colored', (['"""\n What\'s on your mind? \n """', '"""green"""'], {}), '("""\n What\'s on your mind? \n """, \'green\')\n', (3295, 3337), False, 'from termcolor import colored\n'), ((784, 816), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (814, 816), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((1481, 1530), 'termcolor.colored', 'colored', (['"""\n What\'s on your mind? \n """', '"""green"""'], {}), '("""\n What\'s on your mind? \n """, \'green\')\n', (1488, 1530), False, 'from termcolor import colored\n'), ((3125, 3174), 'termcolor.colored', 'colored', (['"""\n What\'s on your mind? \n """', '"""green"""'], {}), '("""\n What\'s on your mind? \n """, \'green\')\n', (3132, 3174), False, 'from termcolor import colored\n')] |
import json
from pathlib import Path
from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_text_splitters import RecursiveCharacterTextSplitter
# Load output from gpt crawler
path_to_gptcrawler = Path(__file__).parent.parent / "output.json"
data = json.loads(Path(path_to_gptcrawler).read_text())
docs = [
Document(
page_content=dict_["html"],
metadata={"title": dict_["title"], "url": dict_["url"]},
)
for dict_ in data
]
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
all_splits = text_splitter.split_documents(docs)
# Add to vectorDB
vectorstore = Chroma.from_documents(
documents=all_splits,
collection_name="rag-gpt-builder",
embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever()
# RAG prompt
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# LLM
model = ChatOpenAI()
# RAG chain
chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt
| model
| StrOutputParser()
)
# Add typing for input
class Question(BaseModel):
__root__: str
chain = chain.with_types(input_type=Question)
| [
"langchain_community.chat_models.ChatOpenAI",
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_core.documents.Document",
"langchain_text_splitters.RecursiveCharacterTextSplitter",
"langchain_core.runnables.RunnablePassthrough",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.embeddings.OpenAIEmbeddings"
] | [((888, 954), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), '(chunk_size=1000, chunk_overlap=100)\n', (918, 954), False, 'from langchain_text_splitters import RecursiveCharacterTextSplitter\n'), ((1330, 1372), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['template'], {}), '(template)\n', (1362, 1372), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1388, 1400), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1398, 1400), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((722, 819), 'langchain_core.documents.Document', 'Document', ([], {'page_content': "dict_['html']", 'metadata': "{'title': dict_['title'], 'url': dict_['url']}"}), "(page_content=dict_['html'], metadata={'title': dict_['title'],\n 'url': dict_['url']})\n", (730, 819), False, 'from langchain_core.documents import Document\n'), ((1535, 1552), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1550, 1552), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1139, 1157), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1155, 1157), False, 'from langchain_community.embeddings import OpenAIEmbeddings\n'), ((608, 622), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (612, 622), False, 'from pathlib import Path\n'), ((671, 695), 'pathlib.Path', 'Path', (['path_to_gptcrawler'], {}), '(path_to_gptcrawler)\n', (675, 695), False, 'from pathlib import Path\n'), ((1480, 1501), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (1499, 1501), False, 'from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n')] |
import io
from io import IOBase
import os
import logging
import json
import sys
from typing import Any, List, Optional, Union
import pandas as pd
from langchain.agents import AgentType
from langchain.agents.agent import AgentExecutor
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain.schema.language_model import BaseLanguageModel
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
from src.ai.conversations.conversation_manager import ConversationManager
from src.ai.tools.tool_registry import register_tool, tool_class
from src.ai.utilities.llm_helper import get_llm
from src.configuration.model_configuration import ModelConfiguration
from src.db.models.domain.file_model import FileModel
from src.db.models.documents import Documents
from src.db.models.user_settings import UserSettings
@tool_class
class SpreadsheetsTool:
excel_types = [".xls", ".xlsx", ".ods"]
cached_pandas_dataframes = {}
def __init__(
self,
configuration,
conversation_manager: ConversationManager,
):
self.conversation_manager = conversation_manager
self.configuration = configuration
self.callbacks = []
self.document_helper = Documents()
@register_tool(
display_name="Query Spreadsheet",
description="Query a spreadsheet using natural language.",
additional_instructions="This tool transforms your natural language query into Python code to query a spreadsheet using the pandas library.",
requires_documents=True,
document_classes=["Spreadsheet"],
enabled_by_default=False,
include_in_conversation=False,
requires_llm=True,
category="Documents",
)
def query_spreadsheet_with_pandas(self, query: str, target_file_id: int):
"""Useful for querying a specific spreadsheet. If the target document is a 'Spreadsheet', always use this tool.
Args:
query (str): The query to use.
target_file_id (int): The file ID to query."""
override_file = None
if self.conversation_manager:
override_file = self.conversation_manager.tool_kwargs.get(
"override_file", None
)
if override_file is not None:
target_file_id = int(override_file)
file = self.document_helper.get_file(target_file_id)
# Get the setting for the tool model
tool_model_configuration = ModelConfiguration(
**json.loads(
UserSettings()
.get_user_setting(
user_id=self.conversation_manager.user_id,
setting_name=f"{self.query_spreadsheet_with_pandas.__name__}_model_configuration",
default_value=ModelConfiguration.default().model_dump_json(),
)
.setting_value
)
)
llm = get_llm(
model_configuration=tool_model_configuration,
streaming=True,
callbacks=self.conversation_manager.agent_callbacks,
)
agent_executor = self.create_pandas_agent(llm=llm, files=[file])
# self.callbacks is set outside of this class
results = agent_executor.run(
input=query,
handle_parsing_errors=True,
callbacks=self.conversation_manager.agent_callbacks
)
return results
def create_pandas_agent(
self,
llm: BaseLanguageModel,
files: List[FileModel],
) -> AgentExecutor:
"""Create csv agent by loading to a dataframe and using pandas agent."""
pandas_dataframes = self.get_dataframes(files=files)
# Get a list of the dataframes in the pandas_dataframes dictionary
dfs = []
for key in pandas_dataframes:
for df in pandas_dataframes[key]:
dfs.append(df)
pandas_agent = create_pandas_dataframe_agent(
llm=llm,
df=dfs,
include_df_in_prompt=True,
number_of_head_rows=5,
verbose=True,
# callbacks=self.callbacks,
)
return pandas_agent
def get_dataframes(self, files: List[FileModel], **kwargs: Any) -> dict:
for file in files:
if not file.id in self.cached_pandas_dataframes:
self.cached_pandas_dataframes[file.id] = []
# Read in the file from the database
reader = io.BytesIO(self.document_helper.get_file_data(file.id))
# Check to see if the file extension is in the excel types
file_extension = os.path.splitext(file.file_name)[1]
if file_extension in self.excel_types:
# If it is an excel type, read it in as an excel file
for sheet in pd.ExcelFile(reader, **kwargs).sheet_names:
df = pd.read_excel(reader, sheet_name=sheet, **kwargs)
self.cached_pandas_dataframes[file.id].append(df)
else:
# Otherwise, read it in as a csv
self.cached_pandas_dataframes[file.id].append(
pd.read_csv(
filepath_or_buffer=reader,
on_bad_lines="skip",
encoding="ISO-8859-1",
**kwargs,
)
)
else:
logging.info(
f"File {file.file_name} already in pandas cache, skipping."
)
return self.cached_pandas_dataframes
if __name__ == "__main__":
conversation_manager = ConversationManager(
conversation_id=None, user_email="[email protected]", prompt_manager=None
)
spreadsheets_tool = SpreadsheetsTool(
configuration=None, conversation_manager=conversation_manager
)
result = spreadsheets_tool.query_spreadsheet_with_pandas(
"What headers are in this file?", 6
)
print(result)
| [
"langchain_experimental.agents.agent_toolkits.create_pandas_dataframe_agent"
] | [((1293, 1722), 'src.ai.tools.tool_registry.register_tool', 'register_tool', ([], {'display_name': '"""Query Spreadsheet"""', 'description': '"""Query a spreadsheet using natural language."""', 'additional_instructions': '"""This tool transforms your natural language query into Python code to query a spreadsheet using the pandas library."""', 'requires_documents': '(True)', 'document_classes': "['Spreadsheet']", 'enabled_by_default': '(False)', 'include_in_conversation': '(False)', 'requires_llm': '(True)', 'category': '"""Documents"""'}), "(display_name='Query Spreadsheet', description=\n 'Query a spreadsheet using natural language.', additional_instructions=\n 'This tool transforms your natural language query into Python code to query a spreadsheet using the pandas library.'\n , requires_documents=True, document_classes=['Spreadsheet'],\n enabled_by_default=False, include_in_conversation=False, requires_llm=\n True, category='Documents')\n", (1306, 1722), False, 'from src.ai.tools.tool_registry import register_tool, tool_class\n'), ((5754, 5855), 'src.ai.conversations.conversation_manager.ConversationManager', 'ConversationManager', ([], {'conversation_id': 'None', 'user_email': '"""[email protected]"""', 'prompt_manager': 'None'}), "(conversation_id=None, user_email='[email protected]',\n prompt_manager=None)\n", (5773, 5855), False, 'from src.ai.conversations.conversation_manager import ConversationManager\n'), ((1275, 1286), 'src.db.models.documents.Documents', 'Documents', ([], {}), '()\n', (1284, 1286), False, 'from src.db.models.documents import Documents\n'), ((2963, 3089), 'src.ai.utilities.llm_helper.get_llm', 'get_llm', ([], {'model_configuration': 'tool_model_configuration', 'streaming': '(True)', 'callbacks': 'self.conversation_manager.agent_callbacks'}), '(model_configuration=tool_model_configuration, streaming=True,\n callbacks=self.conversation_manager.agent_callbacks)\n', (2970, 3089), False, 'from src.ai.utilities.llm_helper import get_llm\n'), ((3970, 4084), 'langchain_experimental.agents.agent_toolkits.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'dfs', 'include_df_in_prompt': '(True)', 'number_of_head_rows': '(5)', 'verbose': '(True)'}), '(llm=llm, df=dfs, include_df_in_prompt=True,\n number_of_head_rows=5, verbose=True)\n', (3999, 4084), False, 'from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n'), ((432, 457), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (447, 457), False, 'import os\n'), ((5540, 5613), 'logging.info', 'logging.info', (['f"""File {file.file_name} already in pandas cache, skipping."""'], {}), "(f'File {file.file_name} already in pandas cache, skipping.')\n", (5552, 5613), False, 'import logging\n'), ((4690, 4722), 'os.path.splitext', 'os.path.splitext', (['file.file_name'], {}), '(file.file_name)\n', (4706, 4722), False, 'import os\n'), ((4889, 4919), 'pandas.ExcelFile', 'pd.ExcelFile', (['reader'], {}), '(reader, **kwargs)\n', (4901, 4919), True, 'import pandas as pd\n'), ((4962, 5011), 'pandas.read_excel', 'pd.read_excel', (['reader'], {'sheet_name': 'sheet'}), '(reader, sheet_name=sheet, **kwargs)\n', (4975, 5011), True, 'import pandas as pd\n'), ((5252, 5349), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'reader', 'on_bad_lines': '"""skip"""', 'encoding': '"""ISO-8859-1"""'}), "(filepath_or_buffer=reader, on_bad_lines='skip', encoding=\n 'ISO-8859-1', **kwargs)\n", (5263, 5349), True, 'import pandas as pd\n'), ((2577, 2591), 'src.db.models.user_settings.UserSettings', 'UserSettings', ([], {}), '()\n', (2589, 2591), False, 'from src.db.models.user_settings import UserSettings\n'), ((2827, 2855), 'src.configuration.model_configuration.ModelConfiguration.default', 'ModelConfiguration.default', ([], {}), '()\n', (2853, 2855), False, 'from src.configuration.model_configuration import ModelConfiguration\n')] |
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from log10.langchain import Log10Callback
from log10.llm import Log10Config
log10_callback = Log10Callback(log10_config=Log10Config())
messages = [
HumanMessage(content="You are a ping pong machine"),
HumanMessage(content="Ping?"),
]
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
callbacks=[log10_callback],
temperature=0.5,
tags=["test"],
)
completion = llm.predict_messages(messages, tags=["foobar"])
print(completion)
print(log10_callback.last_completion_url())
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
callbacks=[log10_callback],
temperature=0.5,
tags=["test"],
)
messages.append(HumanMessage(content="Pong!"))
completion = llm.predict_messages(messages, tags=["foobar"])
print(completion)
print(log10_callback.last_completion_url())
| [
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((341, 443), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'callbacks': '[log10_callback]', 'temperature': '(0.5)', 'tags': "['test']"}), "(model_name='gpt-3.5-turbo', callbacks=[log10_callback],\n temperature=0.5, tags=['test'])\n", (351, 443), False, 'from langchain.chat_models import ChatOpenAI\n'), ((590, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'callbacks': '[log10_callback]', 'temperature': '(0.5)', 'tags': "['test']"}), "(model_name='gpt-3.5-turbo', callbacks=[log10_callback],\n temperature=0.5, tags=['test'])\n", (600, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((244, 295), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""You are a ping pong machine"""'}), "(content='You are a ping pong machine')\n", (256, 295), False, 'from langchain.schema import HumanMessage\n'), ((301, 330), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Ping?"""'}), "(content='Ping?')\n", (313, 330), False, 'from langchain.schema import HumanMessage\n'), ((724, 753), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Pong!"""'}), "(content='Pong!')\n", (736, 753), False, 'from langchain.schema import HumanMessage\n'), ((210, 223), 'log10.llm.Log10Config', 'Log10Config', ([], {}), '()\n', (221, 223), False, 'from log10.llm import Log10Config\n')] |
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import CharacterTextSplitter
import os
import pinecone
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
import streamlit as st
from dotenv import load_dotenv
load_dotenv()
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
PINECONE_ENV = os.getenv('PINECONE_ENV')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
def doc_preprocessing():
loader = DirectoryLoader(
'data/',
glob='**/*.pdf', # only the PDFs
show_progress=True
)
docs = loader.load()
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=0
)
docs_split = text_splitter.split_documents(docs)
return docs_split
@st.cache_resource
def embedding_db():
# we use the openAI embedding model
embeddings = OpenAIEmbeddings()
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENV
)
docs_split = doc_preprocessing()
doc_db = Pinecone.from_documents(
docs_split,
embeddings,
index_name='langchain-demo-indexes'
)
return doc_db
llm = ChatOpenAI()
doc_db = embedding_db()
def retrieval_answer(query):
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type='stuff',
retriever=doc_db.as_retriever(),
)
query = query
result = qa.run(query)
return result
def main():
st.title("Question and Answering App powered by LLM and Pinecone")
text_input = st.text_input("Ask your query...")
if st.button("Ask Query"):
if len(text_input)>0:
st.info("Your Query: " + text_input)
answer = retrieval_answer(text_input)
st.success(answer)
if __name__ == "__main__":
main()
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.document_loaders.DirectoryLoader",
"langchain.vectorstores.Pinecone.from_documents",
"langchain.chat_models.ChatOpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((393, 406), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (404, 406), False, 'from dotenv import load_dotenv\n'), ((431, 460), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (440, 460), False, 'import os\n'), ((477, 502), 'os.getenv', 'os.getenv', (['"""PINECONE_ENV"""'], {}), "('PINECONE_ENV')\n", (486, 502), False, 'import os\n'), ((521, 548), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (530, 548), False, 'import os\n'), ((1382, 1394), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1392, 1394), False, 'from langchain.chat_models import ChatOpenAI\n'), ((642, 703), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['"""data/"""'], {'glob': '"""**/*.pdf"""', 'show_progress': '(True)'}), "('data/', glob='**/*.pdf', show_progress=True)\n", (657, 703), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((805, 860), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (826, 860), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1066, 1084), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1082, 1084), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1090, 1155), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINECONE_API_KEY', 'environment': 'PINECONE_ENV'}), '(api_key=PINECONE_API_KEY, environment=PINECONE_ENV)\n', (1103, 1155), False, 'import pinecone\n'), ((1233, 1322), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['docs_split', 'embeddings'], {'index_name': '"""langchain-demo-indexes"""'}), "(docs_split, embeddings, index_name=\n 'langchain-demo-indexes')\n", (1256, 1322), False, 'from langchain.vectorstores import Pinecone\n'), ((1662, 1728), 'streamlit.title', 'st.title', (['"""Question and Answering App powered by LLM and Pinecone"""'], {}), "('Question and Answering App powered by LLM and Pinecone')\n", (1670, 1728), True, 'import streamlit as st\n'), ((1749, 1783), 'streamlit.text_input', 'st.text_input', (['"""Ask your query..."""'], {}), "('Ask your query...')\n", (1762, 1783), True, 'import streamlit as st\n'), ((1793, 1815), 'streamlit.button', 'st.button', (['"""Ask Query"""'], {}), "('Ask Query')\n", (1802, 1815), True, 'import streamlit as st\n'), ((1861, 1897), 'streamlit.info', 'st.info', (["('Your Query: ' + text_input)"], {}), "('Your Query: ' + text_input)\n", (1868, 1897), True, 'import streamlit as st\n'), ((1962, 1980), 'streamlit.success', 'st.success', (['answer'], {}), '(answer)\n', (1972, 1980), True, 'import streamlit as st\n')] |
import os
import re
import subprocess # nosec
import tempfile
from langchain.agents import AgentType, initialize_agent
from langchain.agents.tools import Tool
from langchain.pydantic_v1 import BaseModel, Field, ValidationError, validator
from langchain_community.chat_models import ChatOpenAI
from langchain_core.language_models import BaseLLM
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import ConfigurableField, Runnable
def strip_python_markdown_tags(text: str) -> str:
pat = re.compile(r"```python\n(.*)```", re.DOTALL)
code = pat.match(text)
if code:
return code.group(1)
else:
return text
def format_black(filepath: str):
"""Format a file with black."""
subprocess.run( # nosec
f"black {filepath}",
stderr=subprocess.STDOUT,
text=True,
shell=True,
timeout=3,
check=False,
)
def format_ruff(filepath: str):
"""Run ruff format on a file."""
subprocess.run( # nosec
f"ruff check --fix {filepath}",
shell=True,
text=True,
timeout=3,
universal_newlines=True,
check=False,
)
subprocess.run( # nosec
f"ruff format {filepath}",
stderr=subprocess.STDOUT,
shell=True,
timeout=3,
text=True,
check=False,
)
def check_ruff(filepath: str):
"""Run ruff check on a file."""
subprocess.check_output( # nosec
f"ruff check {filepath}",
stderr=subprocess.STDOUT,
shell=True,
timeout=3,
text=True,
)
def check_mypy(filepath: str, strict: bool = True, follow_imports: str = "skip"):
"""Run mypy on a file."""
cmd = (
f"mypy {'--strict' if strict else ''} "
f"--follow-imports={follow_imports} {filepath}"
)
subprocess.check_output( # nosec
cmd,
stderr=subprocess.STDOUT,
shell=True,
text=True,
timeout=3,
)
class PythonCode(BaseModel):
code: str = Field(
description="Python code conforming to "
"ruff, black, and *strict* mypy standards.",
)
@validator("code")
@classmethod
def check_code(cls, v: str) -> str:
v = strip_python_markdown_tags(v).strip()
try:
with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file:
temp_file.write(v)
temp_file_path = temp_file.name
try:
# format with black and ruff
format_black(temp_file_path)
format_ruff(temp_file_path)
except subprocess.CalledProcessError:
pass
# update `v` with formatted code
with open(temp_file_path, "r") as temp_file:
v = temp_file.read()
# check
complaints = dict(ruff=None, mypy=None)
try:
check_ruff(temp_file_path)
except subprocess.CalledProcessError as e:
complaints["ruff"] = e.output
try:
check_mypy(temp_file_path)
except subprocess.CalledProcessError as e:
complaints["mypy"] = e.output
# raise ValueError if ruff or mypy had complaints
if any(complaints.values()):
code_str = f"```{temp_file_path}\n{v}```"
error_messages = [
f"```{key}\n{value}```"
for key, value in complaints.items()
if value
]
raise ValueError("\n\n".join([code_str] + error_messages))
finally:
os.remove(temp_file_path)
return v
def check_code(code: str) -> str:
try:
code_obj = PythonCode(code=code)
return (
f"# LGTM\n"
f"# use the `submit` tool to submit this code:\n\n"
f"```python\n{code_obj.code}\n```"
)
except ValidationError as e:
return e.errors()[0]["msg"]
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a world class Python coder who uses "
"black, ruff, and *strict* mypy for all of your code. "
"Provide complete, end-to-end Python code "
"to meet the user's description/requirements. "
"Always `check` your code. When you're done, "
"you must ALWAYS use the `submit` tool.",
),
(
"human",
": {input}",
),
],
)
check_code_tool = Tool.from_function(
check_code,
name="check-code",
description="Always check your code before submitting it!",
)
submit_code_tool = Tool.from_function(
strip_python_markdown_tags,
name="submit-code",
description="THIS TOOL is the most important. "
"use it to submit your code to the user who requested it... "
"but be sure to `check` it first!",
return_direct=True,
)
tools = [check_code_tool, submit_code_tool]
def get_agent_executor(
llm: BaseLLM,
agent_type: AgentType = AgentType.OPENAI_FUNCTIONS,
) -> Runnable:
_agent_executor = initialize_agent(
tools,
llm,
agent=agent_type,
verbose=True,
handle_parsing_errors=True,
prompt=prompt,
)
return _agent_executor | (lambda output: output["output"])
class Instruction(BaseModel):
__root__: str
agent_executor = (
get_agent_executor(ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0.0))
.configurable_alternatives(
ConfigurableField("model_name"),
default_key="gpt4turbo",
gpt4=get_agent_executor(ChatOpenAI(model_name="gpt-4", temperature=0.0)),
gpt35t=get_agent_executor(
ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0),
),
)
.with_types(input_type=Instruction, output_type=str)
)
| [
"langchain.pydantic_v1.Field",
"langchain.agents.initialize_agent",
"langchain_community.chat_models.ChatOpenAI",
"langchain.agents.tools.Tool.from_function",
"langchain.pydantic_v1.validator",
"langchain_core.prompts.ChatPromptTemplate.from_messages",
"langchain_core.runnables.ConfigurableField"
] | [((4039, 4387), 'langchain_core.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[(\'system\',\n "You are a world class Python coder who uses black, ruff, and *strict* mypy for all of your code. Provide complete, end-to-end Python code to meet the user\'s description/requirements. Always `check` your code. When you\'re done, you must ALWAYS use the `submit` tool."\n ), (\'human\', \': {input}\')]'], {}), '([(\'system\',\n "You are a world class Python coder who uses black, ruff, and *strict* mypy for all of your code. Provide complete, end-to-end Python code to meet the user\'s description/requirements. Always `check` your code. When you\'re done, you must ALWAYS use the `submit` tool."\n ), (\'human\', \': {input}\')])\n', (4071, 4387), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((4573, 4687), 'langchain.agents.tools.Tool.from_function', 'Tool.from_function', (['check_code'], {'name': '"""check-code"""', 'description': '"""Always check your code before submitting it!"""'}), "(check_code, name='check-code', description=\n 'Always check your code before submitting it!')\n", (4591, 4687), False, 'from langchain.agents.tools import Tool\n'), ((4718, 4958), 'langchain.agents.tools.Tool.from_function', 'Tool.from_function', (['strip_python_markdown_tags'], {'name': '"""submit-code"""', 'description': '"""THIS TOOL is the most important. use it to submit your code to the user who requested it... but be sure to `check` it first!"""', 'return_direct': '(True)'}), "(strip_python_markdown_tags, name='submit-code',\n description=\n 'THIS TOOL is the most important. use it to submit your code to the user who requested it... but be sure to `check` it first!'\n , return_direct=True)\n", (4736, 4958), False, 'from langchain.agents.tools import Tool\n'), ((527, 571), 're.compile', 're.compile', (['"""```python\\\\n(.*)```"""', 're.DOTALL'], {}), "('```python\\\\n(.*)```', re.DOTALL)\n", (537, 571), False, 'import re\n'), ((746, 858), 'subprocess.run', 'subprocess.run', (['f"""black {filepath}"""'], {'stderr': 'subprocess.STDOUT', 'text': '(True)', 'shell': '(True)', 'timeout': '(3)', 'check': '(False)'}), "(f'black {filepath}', stderr=subprocess.STDOUT, text=True,\n shell=True, timeout=3, check=False)\n", (760, 858), False, 'import subprocess\n'), ((994, 1116), 'subprocess.run', 'subprocess.run', (['f"""ruff check --fix {filepath}"""'], {'shell': '(True)', 'text': '(True)', 'timeout': '(3)', 'universal_newlines': '(True)', 'check': '(False)'}), "(f'ruff check --fix {filepath}', shell=True, text=True,\n timeout=3, universal_newlines=True, check=False)\n", (1008, 1116), False, 'import subprocess\n'), ((1182, 1301), 'subprocess.run', 'subprocess.run', (['f"""ruff format {filepath}"""'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)', 'timeout': '(3)', 'text': '(True)', 'check': '(False)'}), "(f'ruff format {filepath}', stderr=subprocess.STDOUT, shell=\n True, timeout=3, text=True, check=False)\n", (1196, 1301), False, 'import subprocess\n'), ((1434, 1547), 'subprocess.check_output', 'subprocess.check_output', (['f"""ruff check {filepath}"""'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)', 'timeout': '(3)', 'text': '(True)'}), "(f'ruff check {filepath}', stderr=subprocess.STDOUT,\n shell=True, timeout=3, text=True)\n", (1457, 1547), False, 'import subprocess\n'), ((1841, 1934), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)', 'text': '(True)', 'timeout': '(3)'}), '(cmd, stderr=subprocess.STDOUT, shell=True, text=\n True, timeout=3)\n', (1864, 1934), False, 'import subprocess\n'), ((2033, 2126), 'langchain.pydantic_v1.Field', 'Field', ([], {'description': '"""Python code conforming to ruff, black, and *strict* mypy standards."""'}), "(description=\n 'Python code conforming to ruff, black, and *strict* mypy standards.')\n", (2038, 2126), False, 'from langchain.pydantic_v1 import BaseModel, Field, ValidationError, validator\n'), ((2154, 2171), 'langchain.pydantic_v1.validator', 'validator', (['"""code"""'], {}), "('code')\n", (2163, 2171), False, 'from langchain.pydantic_v1 import BaseModel, Field, ValidationError, validator\n'), ((5160, 5267), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'agent_type', 'verbose': '(True)', 'handle_parsing_errors': '(True)', 'prompt': 'prompt'}), '(tools, llm, agent=agent_type, verbose=True,\n handle_parsing_errors=True, prompt=prompt)\n', (5176, 5267), False, 'from langchain.agents import AgentType, initialize_agent\n'), ((3668, 3693), 'os.remove', 'os.remove', (['temp_file_path'], {}), '(temp_file_path)\n', (3677, 3693), False, 'import os\n'), ((5578, 5609), 'langchain_core.runnables.ConfigurableField', 'ConfigurableField', (['"""model_name"""'], {}), "('model_name')\n", (5595, 5609), False, 'from langchain_core.runnables import ConfigurableField, Runnable\n'), ((2309, 2360), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (2336, 2360), False, 'import tempfile\n'), ((5476, 5536), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4-1106-preview"""', 'temperature': '(0.0)'}), "(model_name='gpt-4-1106-preview', temperature=0.0)\n", (5486, 5536), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((5676, 5723), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0.0)'}), "(model_name='gpt-4', temperature=0.0)\n", (5686, 5723), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((5773, 5828), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.0)'}), "(model_name='gpt-3.5-turbo', temperature=0.0)\n", (5783, 5828), False, 'from langchain_community.chat_models import ChatOpenAI\n')] |
import os
import langchain
from langchain import (
agents,
prompts,
chains,
llms
)
class BOAgent:
def __init__(
self,
tools,
memory,
model="text-davinci-003",
temp=0.1,
max_steps=30,
):
self.openai_key = os.getenv("OPENAI_API_KEY")
self.memory = memory
# Initialize LLM
if model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"):
self.llm = langchain.chat_models.ChatOpenAI(
temperature=temp,
openai_api_key=self.openai_key,
model_name=model,
)
else:
self.llm = langchain.OpenAI(
temperature=temp,
openai_api_key=self.openai_key,
model_name=model
)
# Initialize agent
self.agent = agents.initialize_agent(
tools,
self.llm,
agent="conversational-react-description",
verbose=True,
max_iterations=max_steps,
memory=self.memory
)
def run(self, prompt):
return self.agent.run(prompt)
| [
"langchain.agents.initialize_agent",
"langchain.OpenAI",
"langchain.chat_models.ChatOpenAI"
] | [((310, 337), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (319, 337), False, 'import os\n'), ((888, 1040), 'langchain.agents.initialize_agent', 'agents.initialize_agent', (['tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'max_iterations': 'max_steps', 'memory': 'self.memory'}), "(tools, self.llm, agent=\n 'conversational-react-description', verbose=True, max_iterations=\n max_steps, memory=self.memory)\n", (911, 1040), False, 'from langchain import agents, prompts, chains, llms\n'), ((491, 596), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'temperature': 'temp', 'openai_api_key': 'self.openai_key', 'model_name': 'model'}), '(temperature=temp, openai_api_key=self.\n openai_key, model_name=model)\n', (523, 596), False, 'import langchain\n'), ((692, 780), 'langchain.OpenAI', 'langchain.OpenAI', ([], {'temperature': 'temp', 'openai_api_key': 'self.openai_key', 'model_name': 'model'}), '(temperature=temp, openai_api_key=self.openai_key,\n model_name=model)\n', (708, 780), False, 'import langchain\n')] |
import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a Hugging Face pipeline (or more likely,
a key pointing to such a pipeline on the cluster's object store)
and returns generated text.
"""
response = pipeline(prompt, *args, **kwargs)
if pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _load_transformer(
model_id: str = DEFAULT_MODEL_ID,
task: str = DEFAULT_TASK,
device: int = 0,
model_kwargs: Optional[dict] = None,
) -> Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return pipeline
class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):
.. code-block:: python
from langchain.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
"""
model_id: str = DEFAULT_MODEL_ID
"""Hugging Face model_id to load the model."""
task: str = DEFAULT_TASK
"""Hugging Face task ("text-generation", "text2text-generation" or
"summarization")."""
device: int = 0
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
model_load_fn: Callable = _load_transformer
"""Function to load the model remotely on the server."""
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {
"model_id": kwargs.get("model_id", DEFAULT_MODEL_ID),
"task": kwargs.get("task", DEFAULT_TASK),
"device": kwargs.get("device", 0),
"model_kwargs": kwargs.get("model_kwargs", None),
}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "selfhosted_huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
| [
"langchain.llms.utils.enforce_stop_tokens"
] | [((457, 484), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (474, 484), False, 'import logging\n'), ((1983, 2039), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2012, 2039), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n'), ((3399, 3502), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': 'device', 'model_kwargs': '_model_kwargs'}), '(task=task, model=model, tokenizer=tokenizer, device=device,\n model_kwargs=_model_kwargs)\n', (3410, 3502), True, 'from transformers import pipeline as hf_pipeline\n'), ((1434, 1465), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (1453, 1465), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2739, 2764), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2762, 2764), False, 'import torch\n'), ((2108, 2171), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2144, 2171), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n'), ((2256, 2320), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (2293, 2320), False, 'from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer\n')] |
from langchain.prompts.prompt import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain, ChatVectorDBChain
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores.faiss import FAISS
_template = """Given the following chat history and a follow up question, rephrase the follow up question to be a standalone question.
You can assume that the question is about Flyte.
Chat History:
{chat_history}
Follow Up Question:
{question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
template = """You are a maintainer developing the open source library Flyte and understanding the codebase very well.
You are given the following extracted parts of the context and a question. Provide a conversational answer in a concise and clear manner. Attach a link if neccessary.
Please answer based on the question.
Question: {question}
=========
Context:
{context}
=========
Answer in Markdown:"""
QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
def get_chain(vectorstore):
llm = ChatOpenAI(model_name="gpt-3.5-turbo")
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
vectorstore.as_retriever(search_kwargs={"k": 20}),
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
combine_docs_chain_kwargs={'prompt': QA_PROMPT},
# verbose=True,
)
return qa_chain
def start_conversation(vectorstore):
qa_chain = get_chain(vectorstore)
chat_history = []
print("Chat with your docs!")
while True:
print("Human:")
question = input()
print(question)
result = qa_chain({"question": question, "chat_history": chat_history})
chat_history.append((question, result["answer"]))
print("AI:")
print(result["answer"])
| [
"langchain.prompts.prompt.PromptTemplate.from_template",
"langchain.prompts.prompt.PromptTemplate",
"langchain.chat_models.ChatOpenAI"
] | [((681, 720), 'langchain.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_template'], {}), '(_template)\n', (709, 720), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1141, 1215), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question', 'context']"}), "(template=template, input_variables=['question', 'context'])\n", (1155, 1215), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1256, 1294), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (1266, 1294), False, 'from langchain.chat_models import ChatOpenAI\n')] |
from collections import deque
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import BaseLLM
from langchain.prompts import PromptTemplate
from modules.memory import MemoryModule
from typing import Dict, List
class ReasoningModule:
def __init__(self, llm, memory_module: MemoryModule, verbose: bool = True):
self.task_list = deque()
self.completed_task_list = deque()
self.memory_module = memory_module
self.task_creation_chain = TaskCreationChain.from_llm(llm, verbose)
self.task_prioritization_chain = TaskPrioritizationChain.from_llm(llm, verbose)
self.milestone_chain = MilestoneChain.from_llm(llm, verbose)
def initialize_tasks(self):
milestones = self.milestone_chain.run(objective=self.memory_module.objective)
self.memory_module.store(str(milestones))
for milestone in milestones:
self.task_list.append({"task_name": milestone})
self.task_list = deque(self.prioritize_tasks(0))
def update_tasks(self, task: dict, result: dict):
incomplete_tasks = [t["task_name"] for t in self.task_list]
task_description = task["task_name"]
incomplete_tasks = "\n".join(incomplete_tasks)
if len(self.task_list) == 0:
incomplete_tasks = "all"
objective = self.memory_module.objective
response = self.task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
)
new_tasks = response.split("\n")
new_tasks = [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
this_task_id = int("".join(filter(str.isdigit, task["task_id"]))) if isinstance(task["task_id"], str) else task["task_id"]
task_id_counter = this_task_id
for new_task in new_tasks:
task_id_counter += 1
new_task.update({"task_id": task_id_counter})
self.task_list.append(new_task)
self.task_list = deque(self.prioritize_tasks(this_task_id))
def prioritize_tasks(self, this_task_id: int) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in self.task_list]
task_names = "\n".join(task_names)
objective = self.memory_module.objective
next_task_id = this_task_id + 1
response = self.task_prioritization_chain.run(task_names=task_names, next_task_id=next_task_id, objective=objective)
new_tasks = response.split("\n")
prioritized_task_list = []
task_id_counter = this_task_id
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id_counter += 1
# task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append({"task_id": task_id_counter, "task_name": task_name})
return prioritized_task_list
class TaskCreationChain(LLMChain):
"""Chain to generate tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_creation_template = (
"As a task creation AI, create new tasks with the objective: {objective}.\n"
"Last completed task's result: {result}.\n"
"Task description: {task_description}.\n"
"Incomplete tasks: {incomplete_tasks}\n\n"
"Ensure tasks are actionable and achievable by an agent with limited resources.\n"
"Create short, finite tasks. Avoid continuous tasks like monitoring or testing.\n"
"Consider if a new task is essential for reaching the objective.\n"
"Return tasks as an array.\n"
)
prompt = PromptTemplate(
template=task_creation_template,
input_variables=["result", "task_description", "incomplete_tasks", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class TaskPrioritizationChain(LLMChain):
"""Chain to prioritize tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_prioritization_template = (
"As a task prioritization AI, format and prioritize tasks: {task_names}\n"
"Objective: {objective}\n\n"
"Return prioritized tasks as a numbered list starting with {next_task_id}.\n"
)
prompt = PromptTemplate(
template=task_prioritization_template,
input_variables=["task_names", "next_task_id", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class MilestoneChain(LLMChain):
"""Chain to generate milestones."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
milestone_template = "As a milestone AI, generate milestones for the objective: {objective}.\n" "Return milestones as an array.\n"
return cls(llm=llm, prompt=PromptTemplate(input_variables=["objective"], template=milestone_template), verbose=verbose)
def run(self, objective: str) -> List[str]:
"""Run the chain."""
return self.generate_milestones(objective=objective)
def generate_milestones(self, objective: str) -> List[str]:
"""Generate milestones."""
response = self.predict(objective=objective)
return response.strip().split("\n") if response else []
| [
"langchain.prompts.PromptTemplate"
] | [((395, 402), 'collections.deque', 'deque', ([], {}), '()\n', (400, 402), False, 'from collections import deque\n'), ((438, 445), 'collections.deque', 'deque', ([], {}), '()\n', (443, 445), False, 'from collections import deque\n'), ((3982, 4114), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'task_creation_template', 'input_variables': "['result', 'task_description', 'incomplete_tasks', 'objective']"}), "(template=task_creation_template, input_variables=['result',\n 'task_description', 'incomplete_tasks', 'objective'])\n", (3996, 4114), False, 'from langchain.prompts import PromptTemplate\n'), ((4700, 4819), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'task_prioritization_template', 'input_variables': "['task_names', 'next_task_id', 'objective']"}), "(template=task_prioritization_template, input_variables=[\n 'task_names', 'next_task_id', 'objective'])\n", (4714, 4819), False, 'from langchain.prompts import PromptTemplate\n'), ((5287, 5361), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['objective']", 'template': 'milestone_template'}), "(input_variables=['objective'], template=milestone_template)\n", (5301, 5361), False, 'from langchain.prompts import PromptTemplate\n')] |
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6
from io import StringIO
import sys
import os
from typing import Dict, Optional
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents.tools import Tool
from langchain.llms import OpenAI
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo')
class PythonREPL:
"""Simulates a standalone Python REPL."""
def __init__(self):
pass
def run(self, command: str) -> str:
"""Run command and returns anything printed."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, globals())
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = str(e)
return output
llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name)
python_repl = Tool(
"Python REPL",
PythonREPL().run,
"""A Python shell. Use this to execute python commands. Input should be a valid python command.
If you expect output it should be printed out.""",
)
tools = [python_repl]
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
agent.run("What is the 10th fibonacci number?") | [
"langchain.agents.initialize_agent",
"langchain.llms.OpenAI"
] | [((348, 409), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (362, 409), False, 'import os\n'), ((423, 468), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt-3.5-turbo"""'], {}), "('MODEL_NAME', 'gpt-3.5-turbo')\n", (437, 468), False, 'import os\n'), ((1003, 1076), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'openai_api_base': 'base_path', 'model_name': 'model_name'}), '(temperature=0.0, openai_api_base=base_path, model_name=model_name)\n', (1009, 1076), False, 'from langchain.llms import OpenAI\n'), ((1345, 1424), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (1361, 1424), False, 'from langchain.agents import initialize_agent\n'), ((741, 751), 'io.StringIO', 'StringIO', ([], {}), '()\n', (749, 751), False, 'from io import StringIO\n')] |
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6
from io import StringIO
import sys
import os
from typing import Dict, Optional
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents.tools import Tool
from langchain.llms import OpenAI
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo')
class PythonREPL:
"""Simulates a standalone Python REPL."""
def __init__(self):
pass
def run(self, command: str) -> str:
"""Run command and returns anything printed."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, globals())
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = str(e)
return output
llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name)
python_repl = Tool(
"Python REPL",
PythonREPL().run,
"""A Python shell. Use this to execute python commands. Input should be a valid python command.
If you expect output it should be printed out.""",
)
tools = [python_repl]
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
agent.run("What is the 10th fibonacci number?") | [
"langchain.agents.initialize_agent",
"langchain.llms.OpenAI"
] | [((348, 409), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (362, 409), False, 'import os\n'), ((423, 468), 'os.environ.get', 'os.environ.get', (['"""MODEL_NAME"""', '"""gpt-3.5-turbo"""'], {}), "('MODEL_NAME', 'gpt-3.5-turbo')\n", (437, 468), False, 'import os\n'), ((1003, 1076), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.0)', 'openai_api_base': 'base_path', 'model_name': 'model_name'}), '(temperature=0.0, openai_api_base=base_path, model_name=model_name)\n', (1009, 1076), False, 'from langchain.llms import OpenAI\n'), ((1345, 1424), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (1361, 1424), False, 'from langchain.agents import initialize_agent\n'), ((741, 751), 'io.StringIO', 'StringIO', ([], {}), '()\n', (749, 751), False, 'from io import StringIO\n')] |
"""Utility functions for mlflow.langchain."""
import contextlib
import json
import logging
import os
import shutil
import types
import warnings
from functools import lru_cache
from importlib.util import find_spec
from typing import NamedTuple
import cloudpickle
import yaml
from packaging import version
from packaging.version import Version
import mlflow
from mlflow.utils.class_utils import _get_class_from_string
_AGENT_PRIMITIVES_FILE_NAME = "agent_primitive_args.json"
_AGENT_PRIMITIVES_DATA_KEY = "agent_primitive_data"
_AGENT_DATA_FILE_NAME = "agent.yaml"
_AGENT_DATA_KEY = "agent_data"
_TOOLS_DATA_FILE_NAME = "tools.pkl"
_TOOLS_DATA_KEY = "tools_data"
_LOADER_FN_FILE_NAME = "loader_fn.pkl"
_LOADER_FN_KEY = "loader_fn"
_LOADER_ARG_KEY = "loader_arg"
_PERSIST_DIR_NAME = "persist_dir_data"
_PERSIST_DIR_KEY = "persist_dir"
_MODEL_DATA_YAML_FILE_NAME = "model.yaml"
_MODEL_DATA_PKL_FILE_NAME = "model.pkl"
_MODEL_DATA_FOLDER_NAME = "model"
_MODEL_DATA_KEY = "model_data"
_MODEL_TYPE_KEY = "model_type"
_RUNNABLE_LOAD_KEY = "runnable_load"
_BASE_LOAD_KEY = "base_load"
_CONFIG_LOAD_KEY = "config_load"
_MODEL_LOAD_KEY = "model_load"
_UNSUPPORTED_MODEL_ERROR_MESSAGE = (
"MLflow langchain flavor only supports subclasses of "
"langchain.chains.base.Chain, langchain.agents.agent.AgentExecutor, "
"langchain.schema.BaseRetriever, langchain.schema.runnable.RunnableSequence, "
"langchain.schema.runnable.RunnableLambda, "
"langchain.schema.runnable.RunnableParallel, "
"langchain.schema.runnable.RunnablePassthrough, "
"langchain.schema.runnable.passthrough.RunnableAssign instances, "
"found {instance_type}"
)
_UNSUPPORTED_MODEL_WARNING_MESSAGE = (
"MLflow does not guarantee support for Chains outside of the subclasses of LLMChain, found %s"
)
_UNSUPPORTED_LLM_WARNING_MESSAGE = (
"MLflow does not guarantee support for LLMs outside of HuggingFaceHub and OpenAI, found %s"
)
_UNSUPPORTED_LANGCHAIN_VERSION_ERROR_MESSAGE = (
"Saving {instance_type} models is only supported in langchain 0.0.194 and above."
)
logger = logging.getLogger(__name__)
@lru_cache
def base_lc_types():
import langchain.agents.agent
import langchain.chains.base
import langchain.schema
return (
langchain.chains.base.Chain,
langchain.agents.agent.AgentExecutor,
langchain.schema.BaseRetriever,
)
@lru_cache
def picklable_runnable_types():
"""
Runnable types that can be pickled and unpickled by cloudpickle.
"""
from langchain.chat_models.base import SimpleChatModel
from langchain.prompts import ChatPromptTemplate
types = (
SimpleChatModel,
ChatPromptTemplate,
)
try:
from langchain.schema.runnable import (
RunnableLambda,
RunnablePassthrough,
)
types += (RunnableLambda, RunnablePassthrough)
except ImportError:
pass
return types
@lru_cache
def lc_runnable_with_steps_types():
# import them separately because they are added
# in different versions of langchain
try:
from langchain.schema.runnable import RunnableSequence
types = (RunnableSequence,)
except ImportError:
types = ()
try:
from langchain.schema.runnable import RunnableParallel
types += (RunnableParallel,)
except ImportError:
pass
return types
def lc_runnable_assign_types():
try:
from langchain.schema.runnable.passthrough import RunnableAssign
return (RunnableAssign,)
except ImportError:
return ()
def lc_runnable_branch_types():
try:
from langchain.schema.runnable import RunnableBranch
return (RunnableBranch,)
except ImportError:
return ()
def lc_runnables_types():
return (
picklable_runnable_types()
+ lc_runnable_with_steps_types()
+ lc_runnable_branch_types()
+ lc_runnable_assign_types()
)
def supported_lc_types():
return base_lc_types() + lc_runnables_types()
@lru_cache
def runnables_supports_batch_types():
try:
from langchain.schema.runnable import (
RunnableLambda,
RunnableSequence,
)
types = (RunnableSequence, RunnableLambda)
except ImportError:
types = ()
try:
from langchain.schema.runnable import RunnableParallel
types += (RunnableParallel,)
except ImportError:
pass
return types
@lru_cache
def custom_type_to_loader_dict():
# helper function to load output_parsers from config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
from langchain.schema.output_parser import StrOutputParser
output_parser_type = config.pop("_type", None)
if output_parser_type == "default":
return StrOutputParser(**config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
return {"default": _load_output_parser}
class _SpecialChainInfo(NamedTuple):
loader_arg: str
def _get_special_chain_info_or_none(chain):
for (
special_chain_class,
loader_arg,
) in _get_map_of_special_chain_class_to_loader_arg().items():
if isinstance(chain, special_chain_class):
return _SpecialChainInfo(loader_arg=loader_arg)
@lru_cache
def _get_map_of_special_chain_class_to_loader_arg():
import langchain
from mlflow.langchain.retriever_chain import _RetrieverChain
class_name_to_loader_arg = {
"langchain.chains.RetrievalQA": "retriever",
"langchain.chains.APIChain": "requests_wrapper",
"langchain.chains.HypotheticalDocumentEmbedder": "embeddings",
}
# NB: SQLDatabaseChain was migrated to langchain_experimental beginning with version 0.0.247
if version.parse(langchain.__version__) <= version.parse("0.0.246"):
class_name_to_loader_arg["langchain.chains.SQLDatabaseChain"] = "database"
else:
if find_spec("langchain_experimental"):
# Add this entry only if langchain_experimental is installed
class_name_to_loader_arg["langchain_experimental.sql.SQLDatabaseChain"] = "database"
class_to_loader_arg = {
_RetrieverChain: "retriever",
}
for class_name, loader_arg in class_name_to_loader_arg.items():
try:
cls = _get_class_from_string(class_name)
class_to_loader_arg[cls] = loader_arg
except Exception:
logger.warning(
"Unexpected import failure for class '%s'. Please file an issue at"
" https://github.com/mlflow/mlflow/issues/.",
class_name,
exc_info=True,
)
return class_to_loader_arg
@lru_cache
def _get_supported_llms():
import langchain.chat_models
import langchain.llms
llms = {langchain.llms.openai.OpenAI, langchain.llms.huggingface_hub.HuggingFaceHub}
if hasattr(langchain.llms, "Databricks"):
llms.add(langchain.llms.Databricks)
if hasattr(langchain.llms, "Mlflow"):
llms.add(langchain.llms.Mlflow)
if hasattr(langchain.chat_models, "ChatDatabricks"):
llms.add(langchain.chat_models.ChatDatabricks)
if hasattr(langchain.chat_models, "ChatMlflow"):
llms.add(langchain.chat_models.ChatMlflow)
return llms
def _validate_and_wrap_lc_model(lc_model, loader_fn):
import langchain.agents.agent
import langchain.chains.base
import langchain.chains.llm
import langchain.llms.huggingface_hub
import langchain.llms.openai
import langchain.schema
if isinstance(lc_model, str):
if os.path.basename(os.path.abspath(lc_model)) != "chain.py":
raise mlflow.MlflowException.invalid_parameter_value(
f"If {lc_model} is a string, it must be the path to a file "
"named `chain.py` on the local filesystem."
)
return lc_model
if not isinstance(lc_model, supported_lc_types()):
raise mlflow.MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(lc_model).__name__)
)
_SUPPORTED_LLMS = _get_supported_llms()
if isinstance(lc_model, langchain.chains.llm.LLMChain) and not any(
isinstance(lc_model.llm, supported_llm) for supported_llm in _SUPPORTED_LLMS
):
logger.warning(
_UNSUPPORTED_LLM_WARNING_MESSAGE,
type(lc_model.llm).__name__,
)
if isinstance(lc_model, langchain.agents.agent.AgentExecutor) and not any(
isinstance(lc_model.agent.llm_chain.llm, supported_llm) for supported_llm in _SUPPORTED_LLMS
):
logger.warning(
_UNSUPPORTED_LLM_WARNING_MESSAGE,
type(lc_model.agent.llm_chain.llm).__name__,
)
if special_chain_info := _get_special_chain_info_or_none(lc_model):
if isinstance(lc_model, langchain.chains.RetrievalQA) and version.parse(
langchain.__version__
) < version.parse("0.0.194"):
raise mlflow.MlflowException.invalid_parameter_value(
_UNSUPPORTED_LANGCHAIN_VERSION_ERROR_MESSAGE.format(
instance_type=type(lc_model).__name__
)
)
if loader_fn is None:
raise mlflow.MlflowException.invalid_parameter_value(
f"For {type(lc_model).__name__} models, a `loader_fn` must be provided."
)
if not isinstance(loader_fn, types.FunctionType):
raise mlflow.MlflowException.invalid_parameter_value(
"The `loader_fn` must be a function that returns a {loader_arg}.".format(
loader_arg=special_chain_info.loader_arg
)
)
# If lc_model is a retriever, wrap it in a _RetrieverChain
if isinstance(lc_model, langchain.schema.BaseRetriever):
from mlflow.langchain.retriever_chain import _RetrieverChain
if loader_fn is None:
raise mlflow.MlflowException.invalid_parameter_value(
f"For {type(lc_model).__name__} models, a `loader_fn` must be provided."
)
if not isinstance(loader_fn, types.FunctionType):
raise mlflow.MlflowException.invalid_parameter_value(
"The `loader_fn` must be a function that returns a retriever."
)
lc_model = _RetrieverChain(retriever=lc_model)
return lc_model
def _save_base_lcs(model, path, loader_fn=None, persist_dir=None):
import langchain.agents.agent
import langchain.chains.base
import langchain.chains.llm
model_data_path = os.path.join(path, _MODEL_DATA_YAML_FILE_NAME)
model_data_kwargs = {
_MODEL_DATA_KEY: _MODEL_DATA_YAML_FILE_NAME,
_MODEL_LOAD_KEY: _BASE_LOAD_KEY,
}
if isinstance(model, langchain.chains.llm.LLMChain):
model.save(model_data_path)
elif isinstance(model, langchain.agents.agent.AgentExecutor):
if model.agent and model.agent.llm_chain:
model.agent.llm_chain.save(model_data_path)
if model.agent:
agent_data_path = os.path.join(path, _AGENT_DATA_FILE_NAME)
model.save_agent(agent_data_path)
model_data_kwargs[_AGENT_DATA_KEY] = _AGENT_DATA_FILE_NAME
if model.tools:
tools_data_path = os.path.join(path, _TOOLS_DATA_FILE_NAME)
try:
with open(tools_data_path, "wb") as f:
cloudpickle.dump(model.tools, f)
except Exception as e:
raise mlflow.MlflowException(
"Error when attempting to pickle the AgentExecutor tools. "
"This model likely does not support serialization."
) from e
model_data_kwargs[_TOOLS_DATA_KEY] = _TOOLS_DATA_FILE_NAME
else:
raise mlflow.MlflowException.invalid_parameter_value(
"For initializing the AgentExecutor, tools must be provided."
)
key_to_ignore = ["llm_chain", "agent", "tools", "callback_manager"]
temp_dict = {k: v for k, v in model.__dict__.items() if k not in key_to_ignore}
agent_primitive_path = os.path.join(path, _AGENT_PRIMITIVES_FILE_NAME)
with open(agent_primitive_path, "w") as config_file:
json.dump(temp_dict, config_file, indent=4)
model_data_kwargs[_AGENT_PRIMITIVES_DATA_KEY] = _AGENT_PRIMITIVES_FILE_NAME
elif special_chain_info := _get_special_chain_info_or_none(model):
# Save loader_fn by pickling
loader_fn_path = os.path.join(path, _LOADER_FN_FILE_NAME)
with open(loader_fn_path, "wb") as f:
cloudpickle.dump(loader_fn, f)
model_data_kwargs[_LOADER_FN_KEY] = _LOADER_FN_FILE_NAME
model_data_kwargs[_LOADER_ARG_KEY] = special_chain_info.loader_arg
if persist_dir is not None:
if os.path.exists(persist_dir):
# Save persist_dir by copying into subdir _PERSIST_DIR_NAME
persist_dir_data_path = os.path.join(path, _PERSIST_DIR_NAME)
shutil.copytree(persist_dir, persist_dir_data_path)
model_data_kwargs[_PERSIST_DIR_KEY] = _PERSIST_DIR_NAME
else:
raise mlflow.MlflowException.invalid_parameter_value(
"The directory provided for persist_dir does not exist."
)
# Save model
model.save(model_data_path)
elif isinstance(model, langchain.chains.base.Chain):
logger.warning(
_UNSUPPORTED_MODEL_WARNING_MESSAGE,
type(model).__name__,
)
model.save(model_data_path)
else:
raise mlflow.MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__)
)
return model_data_kwargs
def _load_from_pickle(path):
with open(path, "rb") as f:
return cloudpickle.load(f)
def _load_from_json(path):
with open(path) as f:
return json.load(f)
def _load_from_yaml(path):
with open(path) as f:
return yaml.safe_load(f)
def _get_path_by_key(root_path, key, conf):
key_path = conf.get(key)
return os.path.join(root_path, key_path) if key_path else None
def _load_base_lcs(
local_model_path,
conf,
):
lc_model_path = os.path.join(
local_model_path, conf.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME)
)
agent_path = _get_path_by_key(local_model_path, _AGENT_DATA_KEY, conf)
tools_path = _get_path_by_key(local_model_path, _TOOLS_DATA_KEY, conf)
agent_primitive_path = _get_path_by_key(local_model_path, _AGENT_PRIMITIVES_DATA_KEY, conf)
loader_fn_path = _get_path_by_key(local_model_path, _LOADER_FN_KEY, conf)
persist_dir = _get_path_by_key(local_model_path, _PERSIST_DIR_KEY, conf)
model_type = conf.get(_MODEL_TYPE_KEY)
loader_arg = conf.get(_LOADER_ARG_KEY)
from langchain.chains.loading import load_chain
from mlflow.langchain.retriever_chain import _RetrieverChain
if loader_arg is not None:
if loader_fn_path is None:
raise mlflow.MlflowException.invalid_parameter_value(
"Missing file for loader_fn which is required to build the model."
)
loader_fn = _load_from_pickle(loader_fn_path)
kwargs = {loader_arg: loader_fn(persist_dir)}
if model_type == _RetrieverChain.__name__:
model = _RetrieverChain.load(lc_model_path, **kwargs).retriever
else:
model = load_chain(lc_model_path, **kwargs)
elif agent_path is None and tools_path is None:
model = load_chain(lc_model_path)
else:
from langchain.agents import initialize_agent
llm = load_chain(lc_model_path)
tools = []
kwargs = {}
if os.path.exists(tools_path):
tools = _load_from_pickle(tools_path)
else:
raise mlflow.MlflowException(
"Missing file for tools which is required to build the AgentExecutor object."
)
if os.path.exists(agent_primitive_path):
kwargs = _load_from_json(agent_primitive_path)
model = initialize_agent(tools=tools, llm=llm, agent_path=agent_path, **kwargs)
return model
def register_pydantic_serializer():
"""
Helper function to pickle pydantic fields for pydantic v1.
Pydantic's Cython validators are not serializable.
https://github.com/cloudpipe/cloudpickle/issues/408
"""
import pydantic
if Version(pydantic.__version__) >= Version("2.0.0"):
return
import pydantic.fields
def custom_serializer(obj):
return {
"name": obj.name,
# outer_type_ is the original type for ModelFields,
# while type_ can be updated later with the nested type
# like int for List[int].
"type_": obj.outer_type_,
"class_validators": obj.class_validators,
"model_config": obj.model_config,
"default": obj.default,
"default_factory": obj.default_factory,
"required": obj.required,
"final": obj.final,
"alias": obj.alias,
"field_info": obj.field_info,
}
def custom_deserializer(kwargs):
return pydantic.fields.ModelField(**kwargs)
def _CloudPicklerReducer(obj):
return custom_deserializer, (custom_serializer(obj),)
warnings.warn(
"Using custom serializer to pickle pydantic.fields.ModelField classes, "
"this might miss some fields and validators. To avoid this, "
"please upgrade pydantic to v2 using `pip install pydantic -U` with "
"langchain 0.0.267 and above."
)
cloudpickle.CloudPickler.dispatch[pydantic.fields.ModelField] = _CloudPicklerReducer
def unregister_pydantic_serializer():
import pydantic
if Version(pydantic.__version__) >= Version("2.0.0"):
return
cloudpickle.CloudPickler.dispatch.pop(pydantic.fields.ModelField, None)
@contextlib.contextmanager
def register_pydantic_v1_serializer_cm():
try:
register_pydantic_serializer()
yield
finally:
unregister_pydantic_serializer()
| [
"langchain.schema.output_parser.StrOutputParser",
"langchain.agents.initialize_agent",
"langchain.chains.loading.load_chain"
] | [((2074, 2101), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2091, 2101), False, 'import logging\n'), ((10682, 10728), 'os.path.join', 'os.path.join', (['path', '_MODEL_DATA_YAML_FILE_NAME'], {}), '(path, _MODEL_DATA_YAML_FILE_NAME)\n', (10694, 10728), False, 'import os\n'), ((17545, 17796), 'warnings.warn', 'warnings.warn', (['"""Using custom serializer to pickle pydantic.fields.ModelField classes, this might miss some fields and validators. To avoid this, please upgrade pydantic to v2 using `pip install pydantic -U` with langchain 0.0.267 and above."""'], {}), "(\n 'Using custom serializer to pickle pydantic.fields.ModelField classes, this might miss some fields and validators. To avoid this, please upgrade pydantic to v2 using `pip install pydantic -U` with langchain 0.0.267 and above.'\n )\n", (17558, 17796), False, 'import warnings\n'), ((18062, 18133), 'cloudpickle.CloudPickler.dispatch.pop', 'cloudpickle.CloudPickler.dispatch.pop', (['pydantic.fields.ModelField', 'None'], {}), '(pydantic.fields.ModelField, None)\n', (18099, 18133), False, 'import cloudpickle\n'), ((5832, 5868), 'packaging.version.parse', 'version.parse', (['langchain.__version__'], {}), '(langchain.__version__)\n', (5845, 5868), False, 'from packaging import version\n'), ((5872, 5896), 'packaging.version.parse', 'version.parse', (['"""0.0.246"""'], {}), "('0.0.246')\n", (5885, 5896), False, 'from packaging import version\n'), ((6002, 6037), 'importlib.util.find_spec', 'find_spec', (['"""langchain_experimental"""'], {}), "('langchain_experimental')\n", (6011, 6037), False, 'from importlib.util import find_spec\n'), ((10434, 10469), 'mlflow.langchain.retriever_chain._RetrieverChain', '_RetrieverChain', ([], {'retriever': 'lc_model'}), '(retriever=lc_model)\n', (10449, 10469), False, 'from mlflow.langchain.retriever_chain import _RetrieverChain\n'), ((14012, 14031), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (14028, 14031), False, 'import cloudpickle\n'), ((14102, 14114), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14111, 14114), False, 'import json\n'), ((14185, 14202), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (14199, 14202), False, 'import yaml\n'), ((14289, 14322), 'os.path.join', 'os.path.join', (['root_path', 'key_path'], {}), '(root_path, key_path)\n', (14301, 14322), False, 'import os\n'), ((16628, 16657), 'packaging.version.Version', 'Version', (['pydantic.__version__'], {}), '(pydantic.__version__)\n', (16635, 16657), False, 'from packaging.version import Version\n'), ((16661, 16677), 'packaging.version.Version', 'Version', (['"""2.0.0"""'], {}), "('2.0.0')\n", (16668, 16677), False, 'from packaging.version import Version\n'), ((17405, 17441), 'pydantic.fields.ModelField', 'pydantic.fields.ModelField', ([], {}), '(**kwargs)\n', (17431, 17441), False, 'import pydantic\n'), ((17991, 18020), 'packaging.version.Version', 'Version', (['pydantic.__version__'], {}), '(pydantic.__version__)\n', (17998, 18020), False, 'from packaging.version import Version\n'), ((18024, 18040), 'packaging.version.Version', 'Version', (['"""2.0.0"""'], {}), "('2.0.0')\n", (18031, 18040), False, 'from packaging.version import Version\n'), ((4848, 4873), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '(**config)\n', (4863, 4873), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((6381, 6415), 'mlflow.utils.class_utils._get_class_from_string', '_get_class_from_string', (['class_name'], {}), '(class_name)\n', (6403, 6415), False, 'from mlflow.utils.class_utils import _get_class_from_string\n'), ((7750, 7909), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['f"""If {lc_model} is a string, it must be the path to a file named `chain.py` on the local filesystem."""'], {}), "(\n f'If {lc_model} is a string, it must be the path to a file named `chain.py` on the local filesystem.'\n )\n", (7796, 7909), False, 'import mlflow\n'), ((10274, 10389), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""The `loader_fn` must be a function that returns a retriever."""'], {}), "(\n 'The `loader_fn` must be a function that returns a retriever.')\n", (10320, 10389), False, 'import mlflow\n'), ((12255, 12302), 'os.path.join', 'os.path.join', (['path', '_AGENT_PRIMITIVES_FILE_NAME'], {}), '(path, _AGENT_PRIMITIVES_FILE_NAME)\n', (12267, 12302), False, 'import os\n'), ((15215, 15334), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""Missing file for loader_fn which is required to build the model."""'], {}), "(\n 'Missing file for loader_fn which is required to build the model.')\n", (15261, 15334), False, 'import mlflow\n'), ((15629, 15664), 'langchain.chains.loading.load_chain', 'load_chain', (['lc_model_path'], {}), '(lc_model_path, **kwargs)\n', (15639, 15664), False, 'from langchain.chains.loading import load_chain\n'), ((15733, 15758), 'langchain.chains.loading.load_chain', 'load_chain', (['lc_model_path'], {}), '(lc_model_path)\n', (15743, 15758), False, 'from langchain.chains.loading import load_chain\n'), ((15838, 15863), 'langchain.chains.loading.load_chain', 'load_chain', (['lc_model_path'], {}), '(lc_model_path)\n', (15848, 15863), False, 'from langchain.chains.loading import load_chain\n'), ((15915, 15941), 'os.path.exists', 'os.path.exists', (['tools_path'], {}), '(tools_path)\n', (15929, 15941), False, 'import os\n'), ((16169, 16205), 'os.path.exists', 'os.path.exists', (['agent_primitive_path'], {}), '(agent_primitive_path)\n', (16183, 16205), False, 'import os\n'), ((16283, 16354), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent_path': 'agent_path'}), '(tools=tools, llm=llm, agent_path=agent_path, **kwargs)\n', (16299, 16354), False, 'from langchain.agents import initialize_agent\n'), ((7690, 7715), 'os.path.abspath', 'os.path.abspath', (['lc_model'], {}), '(lc_model)\n', (7705, 7715), False, 'import os\n'), ((8986, 9022), 'packaging.version.parse', 'version.parse', (['langchain.__version__'], {}), '(langchain.__version__)\n', (8999, 9022), False, 'from packaging import version\n'), ((9047, 9071), 'packaging.version.parse', 'version.parse', (['"""0.0.194"""'], {}), "('0.0.194')\n", (9060, 9071), False, 'from packaging import version\n'), ((11176, 11217), 'os.path.join', 'os.path.join', (['path', '_AGENT_DATA_FILE_NAME'], {}), '(path, _AGENT_DATA_FILE_NAME)\n', (11188, 11217), False, 'import os\n'), ((11390, 11431), 'os.path.join', 'os.path.join', (['path', '_TOOLS_DATA_FILE_NAME'], {}), '(path, _TOOLS_DATA_FILE_NAME)\n', (11402, 11431), False, 'import os\n'), ((11918, 12032), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""For initializing the AgentExecutor, tools must be provided."""'], {}), "(\n 'For initializing the AgentExecutor, tools must be provided.')\n", (11964, 12032), False, 'import mlflow\n'), ((12376, 12419), 'json.dump', 'json.dump', (['temp_dict', 'config_file'], {'indent': '(4)'}), '(temp_dict, config_file, indent=4)\n', (12385, 12419), False, 'import json\n'), ((12639, 12679), 'os.path.join', 'os.path.join', (['path', '_LOADER_FN_FILE_NAME'], {}), '(path, _LOADER_FN_FILE_NAME)\n', (12651, 12679), False, 'import os\n'), ((15539, 15584), 'mlflow.langchain.retriever_chain._RetrieverChain.load', '_RetrieverChain.load', (['lc_model_path'], {}), '(lc_model_path, **kwargs)\n', (15559, 15584), False, 'from mlflow.langchain.retriever_chain import _RetrieverChain\n'), ((16025, 16136), 'mlflow.MlflowException', 'mlflow.MlflowException', (['"""Missing file for tools which is required to build the AgentExecutor object."""'], {}), "(\n 'Missing file for tools which is required to build the AgentExecutor object.'\n )\n", (16047, 16136), False, 'import mlflow\n'), ((12738, 12768), 'cloudpickle.dump', 'cloudpickle.dump', (['loader_fn', 'f'], {}), '(loader_fn, f)\n', (12754, 12768), False, 'import cloudpickle\n'), ((12961, 12988), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (12975, 12988), False, 'import os\n'), ((11524, 11556), 'cloudpickle.dump', 'cloudpickle.dump', (['model.tools', 'f'], {}), '(model.tools, f)\n', (11540, 11556), False, 'import cloudpickle\n'), ((11614, 11756), 'mlflow.MlflowException', 'mlflow.MlflowException', (['"""Error when attempting to pickle the AgentExecutor tools. This model likely does not support serialization."""'], {}), "(\n 'Error when attempting to pickle the AgentExecutor tools. This model likely does not support serialization.'\n )\n", (11636, 11756), False, 'import mlflow\n'), ((13106, 13143), 'os.path.join', 'os.path.join', (['path', '_PERSIST_DIR_NAME'], {}), '(path, _PERSIST_DIR_NAME)\n', (13118, 13143), False, 'import os\n'), ((13160, 13211), 'shutil.copytree', 'shutil.copytree', (['persist_dir', 'persist_dir_data_path'], {}), '(persist_dir, persist_dir_data_path)\n', (13175, 13211), False, 'import shutil\n'), ((13324, 13433), 'mlflow.MlflowException.invalid_parameter_value', 'mlflow.MlflowException.invalid_parameter_value', (['"""The directory provided for persist_dir does not exist."""'], {}), "(\n 'The directory provided for persist_dir does not exist.')\n", (13370, 13433), False, 'import mlflow\n')] |
import json
import os
import sys
from simple_agent_app import SimpleAgentApp
from azure.identity import DefaultAzureCredential
from parse import *
# add parent directory to path
sys.path.insert(0, str(os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))))
import langchain_utils
import utils
credential = DefaultAzureCredential(additionally_allowed_tenants=["*"])
OPENAI_API_KEY = None
"""
Required AzureML deployment score functions
- init() - called during deployment creation
- run() - called when invoking the endpoint deployment
"""
def init():
utils.load_secrets(credential)
# Load plugins into tools
plugins = langchain_utils.create_plugins_static()
global agent
agent = SimpleAgentApp(openai_config=utils.OpenAIConfig.from_env(), plugins=plugins)
def run(raw_data):
print(f"raw_data: {raw_data}")
question = json.loads(raw_data)["question"]
result = agent.run(question)
return result
| [
"langchain_utils.create_plugins_static"
] | [((318, 376), 'azure.identity.DefaultAzureCredential', 'DefaultAzureCredential', ([], {'additionally_allowed_tenants': "['*']"}), "(additionally_allowed_tenants=['*'])\n", (340, 376), False, 'from azure.identity import DefaultAzureCredential\n'), ((579, 609), 'utils.load_secrets', 'utils.load_secrets', (['credential'], {}), '(credential)\n', (597, 609), False, 'import utils\n'), ((655, 694), 'langchain_utils.create_plugins_static', 'langchain_utils.create_plugins_static', ([], {}), '()\n', (692, 694), False, 'import langchain_utils\n'), ((873, 893), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (883, 893), False, 'import json\n'), ((754, 783), 'utils.OpenAIConfig.from_env', 'utils.OpenAIConfig.from_env', ([], {}), '()\n', (781, 783), False, 'import utils\n'), ((231, 256), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (246, 256), False, 'import os\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUMBER_ID = "<Phone number ID from your WhatsApp API Setup>"
def __init__(self):
self.headers = {
"Authorization": f"Bearer {self.WHATSAPP_API_TOKEN}",
"Content-Type": "application/json",
}
self.API_URL = self.API_URL + self.WHATSAPP_CLOUD_NUMBER_ID
def send_text_message(self,message, phone_number):
payload = {
"messaging_product": 'whatsapp',
"to": phone_number,
"type": "text",
"text": {
"preview_url": False,
"body": message
}
}
response = requests.post(f"{self.API_URL}/messages", json=payload,headers=self.headers)
print(response.status_code)
assert response.status_code == 200, "Error sending message"
return response.status_code
os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>"
llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llm = Replicate(
model=llama2_13b_chat,
model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500}
)
client = WhatsAppClient()
app = Flask(__name__)
@app.route("/")
def hello_llama():
return "<p>Hello Llama 2</p>"
@app.route('/msgrcvd', methods=['POST', 'GET'])
def msgrcvd():
message = request.args.get('message')
#client.send_template_message("hello_world", "en_US", "14086745477")
answer = llm(message)
print(message)
print(answer)
client.send_text_message(llm(message), "14086745477")
return message + "<p/>" + answer
| [
"langchain.llms.Replicate"
] | [((1502, 1609), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1511, 1609), False, 'from langchain.llms import Replicate\n'), ((1647, 1662), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1652, 1662), False, 'from flask import Flask\n'), ((1815, 1842), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (1831, 1842), False, 'from flask import request\n'), ((1101, 1178), 'requests.post', 'requests.post', (['f"""{self.API_URL}/messages"""'], {'json': 'payload', 'headers': 'self.headers'}), "(f'{self.API_URL}/messages', json=payload, headers=self.headers)\n", (1114, 1178), False, 'import requests\n')] |
import langchain
from langchain.cache import InMemoryCache
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
langchain.llm_cache = InMemoryCache()
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
if __name__ == "__main__":
# Run the chain only specifying the input variable.
print(chain.run("colorful socks"))
| [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.cache.InMemoryCache",
"langchain.llms.OpenAI"
] | [((199, 214), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (212, 214), False, 'from langchain.cache import InMemoryCache\n'), ((223, 246), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (229, 246), False, 'from langchain.llms import OpenAI\n'), ((256, 372), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (270, 372), False, 'from langchain.prompts import PromptTemplate\n'), ((389, 421), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (397, 421), False, 'from langchain.chains import LLMChain\n')] |
import time
from dotenv import load_dotenv
import langchain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
from langchain.cache import InMemoryCache
load_dotenv()
# to make caching obvious, we use a slow model
llm = OpenAI(model_name="text-davinci-002")
langchain.llm_cache = InMemoryCache()
with get_openai_callback() as cb:
start = time.time()
result = llm("What doesn't fall far from the tree?")
print(result)
end = time.time()
print("--- cb")
print(str(cb) + f" ({end - start:.2f} seconds)")
with get_openai_callback() as cb2:
start = time.time()
result2 = llm("What doesn't fall far from the tree?")
result3 = llm("What doesn't fall far from the tree?")
end = time.time()
print(result2)
print(result3)
print("--- cb2")
print(str(cb2) + f" ({end - start:.2f} seconds)")
| [
"langchain.cache.InMemoryCache",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback"
] | [((189, 202), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (200, 202), False, 'from dotenv import load_dotenv\n'), ((257, 294), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-002"""'}), "(model_name='text-davinci-002')\n", (263, 294), False, 'from langchain.llms import OpenAI\n'), ((318, 333), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (331, 333), False, 'from langchain.cache import InMemoryCache\n'), ((340, 361), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (359, 361), False, 'from langchain.callbacks import get_openai_callback\n'), ((381, 392), 'time.time', 'time.time', ([], {}), '()\n', (390, 392), False, 'import time\n'), ((478, 489), 'time.time', 'time.time', ([], {}), '()\n', (487, 489), False, 'import time\n'), ((569, 590), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (588, 590), False, 'from langchain.callbacks import get_openai_callback\n'), ((611, 622), 'time.time', 'time.time', ([], {}), '()\n', (620, 622), False, 'import time\n'), ((749, 760), 'time.time', 'time.time', ([], {}), '()\n', (758, 760), False, 'import time\n')] |
import langchain
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from steamship import File, Task
from steamship.invocable import PackageService, post
from steamship_langchain.cache import SteamshipCache
from steamship_langchain.llms import OpenAI
class SummarizeAudioPackage(PackageService):
def __init__(self, **kwargs):
super().__init__(**kwargs)
langchain.llm_cache = SteamshipCache(client=self.client)
self.llm = OpenAI(client=self.client, cache=True)
@post("summarize_file")
def summarize_file(self, file_handle: str) -> str:
file = File.get(self.client, handle=file_handle)
text_splitter = CharacterTextSplitter()
texts = []
for block in file.blocks:
texts.extend(text_splitter.split_text(block.text))
docs = [Document(page_content=t) for t in texts]
chain = load_summarize_chain(self.llm, chain_type="map_reduce")
return chain.run(docs)
@post("summarize_audio_file")
def summarize_audio_file(self, file_handle: str) -> Task[str]:
transcriber = self.client.use_plugin("whisper-s2t-blockifier")
audio_file = File.get(self.client, handle=file_handle)
transcribe_task = audio_file.blockify(plugin_instance=transcriber.handle)
return self.invoke_later(
"summarize_file",
wait_on_tasks=[transcribe_task],
arguments={"file_handle": audio_file.handle},
)
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chains.summarize.load_summarize_chain",
"langchain.docstore.document.Document"
] | [((613, 635), 'steamship.invocable.post', 'post', (['"""summarize_file"""'], {}), "('summarize_file')\n", (617, 635), False, 'from steamship.invocable import PackageService, post\n'), ((1078, 1106), 'steamship.invocable.post', 'post', (['"""summarize_audio_file"""'], {}), "('summarize_audio_file')\n", (1082, 1106), False, 'from steamship.invocable import PackageService, post\n'), ((514, 548), 'steamship_langchain.cache.SteamshipCache', 'SteamshipCache', ([], {'client': 'self.client'}), '(client=self.client)\n', (528, 548), False, 'from steamship_langchain.cache import SteamshipCache\n'), ((568, 606), 'steamship_langchain.llms.OpenAI', 'OpenAI', ([], {'client': 'self.client', 'cache': '(True)'}), '(client=self.client, cache=True)\n', (574, 606), False, 'from steamship_langchain.llms import OpenAI\n'), ((706, 747), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (714, 747), False, 'from steamship import File, Task\n'), ((772, 795), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {}), '()\n', (793, 795), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((985, 1040), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['self.llm'], {'chain_type': '"""map_reduce"""'}), "(self.llm, chain_type='map_reduce')\n", (1005, 1040), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1266, 1307), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (1274, 1307), False, 'from steamship import File, Task\n'), ((928, 952), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (936, 952), False, 'from langchain.docstore.document import Document\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import langchain
from langchain.llms import OpenAI
from langchain import PromptTemplate
from configparser import ConfigParser
from langchain.cache import InMemoryCache
from posttext.src.posttext import PostText
langchain.llm_cache = InMemoryCache()
class ViewEngine:
def __init__(self, path):
print(path)
config = ConfigParser(comment_prefixes=None)
config.read(os.path.join(path, 'config.ini'))
self.posttext = PostText(config, path)
self.llm = OpenAI()
def verbalize(self, query: str, answer: str):
template = "Question: {query}\n\nAnswer: {context}\n\n Can you summarize the answer in a single sentence (durations are in seconds)?"
prompt = PromptTemplate(
input_variables=["query", "context"],
template=template)
return self.llm(prompt.format(query=query, context=answer))
def flatten(self, lst):
"""Flatten a nested list
"""
flattened_list = []
for item in lst:
if isinstance(item, tuple):
item = list(item)
if isinstance(item, list):
flattened_list.extend(self.flatten(item))
else:
flattened_list.append(item)
return flattened_list
def query(self,
query: str):
"""Answer a query using a View-based QA method.
"""
formattedprompt, sqlquery_before, sqlquery, view_res, eng_answer, provenance_ids, retrieval_res = self.posttext.query(query)
# print(provenance_ids)
provenance_ids = self.flatten(provenance_ids)
# sources = [tpl[1][1] for tpl in sources]
return {"question": query,
"view_res": view_res,
"eng_answer": eng_answer,
"answer": eng_answer,
"sources": provenance_ids,
"sql": sqlquery,
"sql_before": sqlquery_before}
if __name__ == '__main__':
# engine = ViewEngine("public/digital_data/")
engine = ViewEngine("personal-data/app_data/")
# print(engine.query("How many cities did I visit when I travel to Japan?"))
| [
"langchain.cache.InMemoryCache",
"langchain.llms.OpenAI",
"langchain.PromptTemplate"
] | [((847, 862), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (860, 862), False, 'from langchain.cache import InMemoryCache\n'), ((951, 986), 'configparser.ConfigParser', 'ConfigParser', ([], {'comment_prefixes': 'None'}), '(comment_prefixes=None)\n', (963, 986), False, 'from configparser import ConfigParser\n'), ((1065, 1087), 'posttext.src.posttext.PostText', 'PostText', (['config', 'path'], {}), '(config, path)\n', (1073, 1087), False, 'from posttext.src.posttext import PostText\n'), ((1107, 1115), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1113, 1115), False, 'from langchain.llms import OpenAI\n'), ((1326, 1397), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'context']", 'template': 'template'}), "(input_variables=['query', 'context'], template=template)\n", (1340, 1397), False, 'from langchain import PromptTemplate\n'), ((1007, 1039), 'os.path.join', 'os.path.join', (['path', '"""config.ini"""'], {}), "(path, 'config.ini')\n", (1019, 1039), False, 'import os\n')] |
"""QA using native LangChain features"""
from dotenv import load_dotenv
from genai import Client, Credentials
from genai.extensions.langchain import LangChainInterface
from genai.schema import DecodingMethod, TextGenerationParameters
try:
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
except ImportError:
raise ImportError("Could not import langchain: Please install ibm-generative-ai[langchain] extension.")
# make sure you have a .env file under genai root with
# GENAI_KEY=<your-genai-key>
# GENAI_API=<genai-api-endpoint> (optional) DEFAULT_API = "https://bam-api.res.ibm.com"
load_dotenv()
def heading(text: str) -> str:
"""Helper function for centering text."""
return "\n" + f" {text} ".center(80, "=") + "\n"
print(heading("QA with Langchain"))
parameters = TextGenerationParameters(
decoding_method=DecodingMethod.SAMPLE,
max_new_tokens=100,
min_new_tokens=1,
temperature=0.5,
top_k=50,
top_p=1,
)
pt1 = PromptTemplate(
input_variables=["topic"],
template="Generate a random question about {topic}: Question: ",
)
pt2 = PromptTemplate(
input_variables=["question"],
template="Answer the following question: {question}",
)
client = Client(credentials=Credentials.from_env())
model_id = "google/flan-ul2"
flan = LangChainInterface(model_id=model_id, client=client, parameters=parameters)
model = LangChainInterface(model_id=model_id, client=client)
prompt_to_flan_chain = pt1 | flan | StrOutputParser()
flan_to_model_chain = pt2 | model | StrOutputParser()
chain = {"question": prompt_to_flan_chain} | flan_to_model_chain
print(chain.invoke({"topic": "life"}))
| [
"langchain_core.prompts.PromptTemplate",
"langchain_core.output_parsers.StrOutputParser"
] | [((659, 672), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (670, 672), False, 'from dotenv import load_dotenv\n'), ((857, 998), 'genai.schema.TextGenerationParameters', 'TextGenerationParameters', ([], {'decoding_method': 'DecodingMethod.SAMPLE', 'max_new_tokens': '(100)', 'min_new_tokens': '(1)', 'temperature': '(0.5)', 'top_k': '(50)', 'top_p': '(1)'}), '(decoding_method=DecodingMethod.SAMPLE,\n max_new_tokens=100, min_new_tokens=1, temperature=0.5, top_k=50, top_p=1)\n', (881, 998), False, 'from genai.schema import DecodingMethod, TextGenerationParameters\n'), ((1029, 1140), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['topic']", 'template': '"""Generate a random question about {topic}: Question: """'}), "(input_variables=['topic'], template=\n 'Generate a random question about {topic}: Question: ')\n", (1043, 1140), False, 'from langchain_core.prompts import PromptTemplate\n'), ((1153, 1256), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '"""Answer the following question: {question}"""'}), "(input_variables=['question'], template=\n 'Answer the following question: {question}')\n", (1167, 1256), False, 'from langchain_core.prompts import PromptTemplate\n'), ((1353, 1428), 'genai.extensions.langchain.LangChainInterface', 'LangChainInterface', ([], {'model_id': 'model_id', 'client': 'client', 'parameters': 'parameters'}), '(model_id=model_id, client=client, parameters=parameters)\n', (1371, 1428), False, 'from genai.extensions.langchain import LangChainInterface\n'), ((1437, 1489), 'genai.extensions.langchain.LangChainInterface', 'LangChainInterface', ([], {'model_id': 'model_id', 'client': 'client'}), '(model_id=model_id, client=client)\n', (1455, 1489), False, 'from genai.extensions.langchain import LangChainInterface\n'), ((1527, 1544), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1542, 1544), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1581, 1598), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1596, 1598), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((1293, 1315), 'genai.Credentials.from_env', 'Credentials.from_env', ([], {}), '()\n', (1313, 1315), False, 'from genai import Client, Credentials\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AI. @by PyCharm
# @File : chatbase
# @Time : 2023/7/5 15:29
# @Author : betterme
# @WeChat : meutils
# @Software : PyCharm
# @Description :
from meutils.pipe import *
from langchain.schema import Document
from langchain.chat_models import ChatOpenAI
from langchain.cache import InMemoryCache
from langchain.memory import ConversationBufferWindowMemory
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore, DocArrayInMemorySearch, Zilliz, FAISS
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain # 输出SOURCE废token
from langchain.chains import ConversationChain
from langchain.document_loaders import DirectoryLoader, PyMuPDFLoader
# import langchain
#
# langchain.verbose = True
# langchain.debug = True
class ChatBase(object):
"""
ChatBase().create_index().search().run(query='1+1')
"""
def __init__(self, model="gpt-3.5-turbo", embeddings: Embeddings = OpenAIEmbeddings(chunk_size=100), k=1,
temperature=0):
self.memory = ConversationBufferWindowMemory(memory_key="chat_history", return_messages=True, k=k)
self.memory_messages = self.memory.chat_memory.messages
self.embeddings = embeddings # todo: 本地向量
self.llm = ChatOpenAI(model=model, temperature=temperature, streaming=True)
self.chain = load_qa_chain(self.llm, chain_type="stuff") # map_rerank 重排序
#
self._docs = None
self._index = None
self._input = None
def create_index(self, docs: List[Document], vectorstore: VectorStore = DocArrayInMemorySearch): # 主要耗时,缓存是否生效
self._index = vectorstore.from_documents(docs, self.embeddings) # 向量阶段:可以多线程走缓存?
return self
def search(self, query, k: int = 5, threshold: float = 0.7, **kwargs):
docs_scores = self._index.similarity_search_with_score(query, k=k, **kwargs)
self._docs = []
for doc, score in docs_scores:
if score > threshold:
doc.metadata['score'] = score
doc.metadata['page_content'] = doc.page_content
self._docs.append(doc)
self._input = {"input_documents": self._docs, "question": query} # todo: input_func
return self
def run(self):
return self.chain.run(self._input) # 流式
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.memory.ConversationBufferWindowMemory",
"langchain.chat_models.ChatOpenAI"
] | [((1213, 1245), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'chunk_size': '(100)'}), '(chunk_size=100)\n', (1229, 1245), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1307, 1396), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'k': 'k'}), "(memory_key='chat_history', return_messages=\n True, k=k)\n", (1337, 1396), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((1526, 1590), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model', 'temperature': 'temperature', 'streaming': '(True)'}), '(model=model, temperature=temperature, streaming=True)\n', (1536, 1590), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1612, 1655), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['self.llm'], {'chain_type': '"""stuff"""'}), "(self.llm, chain_type='stuff')\n", (1625, 1655), False, 'from langchain.chains.question_answering import load_qa_chain\n')] |
import langchain
from langchain.chat_models.base import BaseChatModel, SimpleChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
SystemMessage,
)
from typing import Any, Dict, List, Mapping, Optional, Sequence, TypedDict
import websocket
import uuid
import json
from .general import get_open_port
class MessageDict(TypedDict):
role: str
content: str
class RequestDict(TypedDict):
messages: List[MessageDict]
temperature: float
request_id: str
class ResponseDict(TypedDict):
content: str
request_id: str
class ChatWindowAI(BaseChatModel):
model_name: str = "window"
"""Model name to use."""
temperature: float = 0
"""What sampling temperature to use."""
streaming: bool = False
"""Whether to stream the results."""
request_timeout: int = 3600
"""Timeout in seconds for the request."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "window-chat"
def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
output_str = self._call(messages, stop=stop)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
result = ChatResult(generations=[generation])
return result
async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
return self._generate(messages, stop=stop)
def _call(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> str:
request_id = str(uuid.uuid4())
request: RequestDict = {
"messages": [],
"temperature": self.temperature,
"request_id": request_id,
}
for message in messages:
role = "user" # default role is user
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, SystemMessage):
role = "system"
request["messages"].append(
{
"role": role,
"content": message.content,
}
)
ws = websocket.WebSocket()
port = get_open_port()
ws.connect(f"ws://127.0.0.1:{port}/windowmodel")
ws.send(json.dumps(request))
message = ws.recv()
ws.close()
response: ResponseDict = json.loads(message)
response_content = response["content"]
response_request_id = response["request_id"]
# sanity check that response corresponds to request
if request_id != response_request_id:
raise ValueError(
f"Invalid request ID: {response_request_id}, expected: {request_id}"
)
return response_content
| [
"langchain.schema.AIMessage",
"langchain.schema.ChatResult",
"langchain.schema.ChatGeneration"
] | [((1236, 1265), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'output_str'}), '(content=output_str)\n', (1245, 1265), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage\n'), ((1287, 1318), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (1301, 1318), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage\n'), ((1336, 1372), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': '[generation]'}), '(generations=[generation])\n', (1346, 1372), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, HumanMessage, SystemMessage\n'), ((2389, 2410), 'websocket.WebSocket', 'websocket.WebSocket', ([], {}), '()\n', (2408, 2410), False, 'import websocket\n'), ((2617, 2636), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (2627, 2636), False, 'import json\n'), ((1701, 1713), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1711, 1713), False, 'import uuid\n'), ((2515, 2534), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (2525, 2534), False, 'import json\n')] |
import logging
import requests
from typing import Optional, List, Dict, Mapping, Any
import langchain
from langchain.llms.base import LLM
from langchain.cache import InMemoryCache
logging.basicConfig(level=logging.INFO)
# 启动llm的缓存
langchain.llm_cache = InMemoryCache()
class AgentZhipuAI(LLM):
import zhipuai as zhipuai
# 模型服务url
url = "127.0.0.1"
zhipuai.api_key ="1f565e40af1198e11ff1fd8a5b42771d.SjNfezc40YFsz2KC"#控制台中获取的 APIKey 信息
model = "chatglm_pro" # 大模型版本
history = []
def getText(self,role, content):
# role 是指定角色,content 是 prompt 内容
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
self.history.append(jsoncon)
return self.history
@property
def _llm_type(self) -> str:
return "AgentZhipuAI"
@classmethod
def _post(self, url: str, query: Dict) -> Any:
"""POST请求"""
response = requests.post(url, data=query).json()
return response
def _call(self, prompt: str, stop: Optional[List[str]] = None,role = "user") -> str:
"""_call"""
# construct query
response = self.zhipuai.model_api.invoke(
model=self.model,
prompt=self.getText(role=role, content=prompt)
)
choices = (response['data']['choices'])[0]
self.history.append(choices)
return choices["content"]
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.
"""
_param_dict = {
"url": self.url
}
return _param_dict
if __name__ == '__main__':
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
llm = AgentZhipuAI()
# 没有输入变量的示例prompt
no_input_prompt = PromptTemplate(input_variables=[], template="给我讲个笑话。")
no_input_prompt.format()
prompt = PromptTemplate(
input_variables=["location", "street"],
template="作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。",
)
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run({"location": "南京", "street": "新街口"}))
from langchain.chains import ConversationChain
conversation = ConversationChain(llm=llm, verbose=True)
output = conversation.predict(input="你好!")
print(output)
output = conversation.predict(input="南京是哪里的省会?")
print(output)
output = conversation.predict(input="那里有什么好玩的地方,简单的说一个就好。")
print(output)
| [
"langchain.chains.LLMChain",
"langchain.cache.InMemoryCache",
"langchain.prompts.PromptTemplate",
"langchain.chains.ConversationChain"
] | [((183, 222), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (202, 222), False, 'import logging\n'), ((256, 271), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (269, 271), False, 'from langchain.cache import InMemoryCache\n'), ((1830, 1884), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': '[]', 'template': '"""给我讲个笑话。"""'}), "(input_variables=[], template='给我讲个笑话。')\n", (1844, 1884), False, 'from langchain.prompts import PromptTemplate\n'), ((1928, 2059), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['location', 'street']", 'template': '"""作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。"""'}), "(input_variables=['location', 'street'], template=\n '作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。')\n", (1942, 2059), False, 'from langchain.prompts import PromptTemplate\n'), ((2091, 2123), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2099, 2123), False, 'from langchain.chains import LLMChain\n'), ((2254, 2294), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (2271, 2294), False, 'from langchain.chains import ConversationChain\n'), ((942, 972), 'requests.post', 'requests.post', (['url'], {'data': 'query'}), '(url, data=query)\n', (955, 972), False, 'import requests\n')] |
'''
Example script to automatically write a screenplay from a newsgroup post using agents with Crew.ai (https://github.com/joaomdmoura/crewAI)
You can also try it out with a personal email with many replies back and forth and see it turn into a movie script.
Demonstrates:
- multiple API endpoints (offical Mistral, Together.ai, Anyscale)
- running single tasks: spam detection and scoring
- running a crew to create a screenplay from a newsgroup post by first analyzing the text, creating a dialogue and ultimately formatting it
Additional endpoints requirements:
pip install langchain_mistralai
pip install langchain-together
Author: Toon Beerten ([email protected])
License: MIT
'''
import os
import re
from crewai import Agent, Task, Crew, Process
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import openai
#endpoint specific imports
import langchain_mistralai
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_community.llms import Together
from langchain_community.chat_models import ChatAnyscale
## Choose here which API endpoint to use, uncomment only one:
# Official Mistral: benefit of having access to mistral-medium
# Together.ai: lots of models to choose from
# Anyscale: cheapest at the time of writing
#endpoint = 'mistral_official'
#endpoint = 'togetherai'
endpoint = 'mistral_official'
#put you API keys here
mistral_key = ''
togetherai_key = ''
anyscale_key = ''
#model choice: i already have good results with mistralai/Mistral-7B-Instruct-v0.2
if endpoint == 'mistral_official':
mixtral=ChatMistralAI(mistral_api_key=mistral_key, model="mistral-tiny",temperature=0.6)
elif endpoint == 'togetherai':
#i get timeouts using Together() , so i use ChatOpenAI() instead
#mixtral = Together(model="mistralai/Mistral-7B-Instruct-v0.2", together_api_key=togetherai_key ) #or mistralai/Mixtral-8x7B-Instruct-v0.1
mixtral= openai.ChatOpenAI(base_url="https://api.together.xyz/v1", api_key=togetherai_key, temperature=0.5, model="mistralai/Mistral-7B-Instruct-v0.2")
elif endpoint == 'anyscale':
mixtral = ChatAnyscale(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=anyscale_key, streaming=False)
## Define Agents
spamfilter = Agent(
role='spamfilter',
goal='''Decide whether a text is spam or not.''',
backstory='You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
analyst = Agent(
role='analyse',
goal='''You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.''',
backstory='You are an expert discussion analyst.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scriptwriter = Agent(
role='scriptwriter',
goal='Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.',
backstory='''You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.''',
llm=mixtral,
verbose=True,
allow_delegation=False
)
formatter = Agent(
role='formatter',
goal='''Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).''',
backstory='You are an expert text formatter.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scorer = Agent(
role='scorer',
goal='''You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:
Scale:
1-3: Poor - The dialogue has significant issues that prevent effective communication.
4-6: Average - The dialogue has some good points but also has notable weaknesses.
7-9: Good - The dialogue is mostly effective with minor issues.
10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.
Factors to Consider:
Clarity: How clear is the exchange? Are the statements and responses easy to understand?
Relevance: Do the responses stay on topic and contribute to the conversation's purpose?
Conciseness: Is the dialogue free of unnecessary information or redundancy?
Politeness: Are the participants respectful and considerate in their interaction?
Engagement: Do the participants seem interested and actively involved in the dialogue?
Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?
Coherence: Does the dialogue make logical sense as a whole?
Responsiveness: Do the participants address each other's points adequately?
Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?
Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?
''',
backstory='You are an expert at scoring conversations on a scale of 1 to 10.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
#this is one example of a public post in the newsgroup alt.atheism
#try it out yourself by replacing this with your own email thread or text or ...
discussion = '''From: [email protected] (Keith Allan Schneider)
Subject: Re: <Political Atheists?
Organization: California Institute of Technology, Pasadena
Lines: 50
NNTP-Posting-Host: punisher.caltech.edu
[email protected] (Robert Beauchaine) writes:
>>I think that about 70% (or so) people approve of the
>>death penalty, even realizing all of its shortcomings. Doesn't this make
>>it reasonable? Or are *you* the sole judge of reasonability?
>Aside from revenge, what merits do you find in capital punishment?
Are we talking about me, or the majority of the people that support it?
Anyway, I think that "revenge" or "fairness" is why most people are in
favor of the punishment. If a murderer is going to be punished, people
that think that he should "get what he deserves." Most people wouldn't
think it would be fair for the murderer to live, while his victim died.
>Revenge? Petty and pathetic.
Perhaps you think that it is petty and pathetic, but your views are in the
minority.
>We have a local televised hot topic talk show that very recently
>did a segment on capital punishment. Each and every advocate of
>the use of this portion of our system of "jurisprudence" cited the
>main reason for supporting it: "That bastard deserved it". True
>human compassion, forgiveness, and sympathy.
Where are we required to have compassion, forgiveness, and sympathy? If
someone wrongs me, I will take great lengths to make sure that his advantage
is removed, or a similar situation is forced upon him. If someone kills
another, then we can apply the golden rule and kill this person in turn.
Is not our entire moral system based on such a concept?
Or, are you stating that human life is sacred, somehow, and that it should
never be violated? This would sound like some sort of religious view.
>>I mean, how reasonable is imprisonment, really, when you think about it?
>>Sure, the person could be released if found innocent, but you still
>>can't undo the imiprisonment that was served. Perhaps we shouldn't
>>imprision people if we could watch them closely instead. The cost would
>>probably be similar, especially if we just implanted some sort of
>>electronic device.
>Would you rather be alive in prison or dead in the chair?
Once a criminal has committed a murder, his desires are irrelevant.
And, you still have not answered my question. If you are concerned about
the death penalty due to the possibility of the execution of an innocent,
then why isn't this same concern shared with imprisonment. Shouldn't we,
by your logic, administer as minimum as punishment as possible, to avoid
violating the liberty or happiness of an innocent person?
keith
'''
# Filter out spam and vulgar posts
task0 = Task(description='Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n' + discussion, agent=spamfilter)
result = task0.execute()
if "STOP" in result:
#stop here and proceed to next post
print('This spam message will be filtered out')
# process post with a crew of agents, ultimately delivering a well formatted dialogue
task1 = Task(description='Analyse in much detail the following discussion:\n### DISCUSSION:\n' + discussion, agent=analyst)
task2 = Task(description='Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.', agent=scriptwriter)
task3 = Task(description='''Format the script exactly like this:
## (person 1):
(first text line from person 1)
## (person 2):
(first text line from person 2)
## (person 1):
(second text line from person 1)
## (person 2):
(second text line from person 2)
''', agent=formatter)
crew = Crew(
agents=[analyst, scriptwriter,formatter],
tasks=[task1, task2, task3],
verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels
process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next.
)
result = crew.kickoff()
#get rid of directions and actions between brackets, eg: (smiling)
result = re.sub(r'\(.*?\)', '', result)
print('===================== end result from crew ===================================')
print(result)
print('===================== score ==================================================')
task4 = Task(description='Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n'+result, agent=scorer)
score = task4.execute()
score = score.split('\n')[0] #sometimes an explanation comes after score, ignore
print(f'Scoring the dialogue as: {score}/10') | [
"langchain.chat_models.openai.ChatOpenAI",
"langchain_community.chat_models.ChatAnyscale",
"langchain_mistralai.chat_models.ChatMistralAI"
] | [((2292, 2556), 'crewai.Agent', 'Agent', ([], {'role': '"""spamfilter"""', 'goal': '"""Decide whether a text is spam or not."""', 'backstory': '"""You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='spamfilter', goal='Decide whether a text is spam or not.',\n backstory=\n 'You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2297, 2556), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2581, 2886), 'crewai.Agent', 'Agent', ([], {'role': '"""analyse"""', 'goal': '"""You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain."""', 'backstory': '"""You are an expert discussion analyst."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='analyse', goal=\n 'You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.'\n , backstory='You are an expert discussion analyst.', llm=mixtral,\n verbose=True, allow_delegation=False)\n", (2586, 2886), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2916, 3352), 'crewai.Agent', 'Agent', ([], {'role': '"""scriptwriter"""', 'goal': '"""Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals."""', 'backstory': '"""You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='scriptwriter', goal=\n 'Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.'\n , backstory=\n 'You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2921, 3352), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3375, 3631), 'crewai.Agent', 'Agent', ([], {'role': '"""formatter"""', 'goal': '"""Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling)."""', 'backstory': '"""You are an expert text formatter."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='formatter', goal=\n 'Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).'\n , backstory='You are an expert text formatter.', llm=mixtral, verbose=\n True, allow_delegation=False)\n", (3380, 3631), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3654, 5254), 'crewai.Agent', 'Agent', ([], {'role': '"""scorer"""', 'goal': '"""You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """', 'backstory': '"""You are an expert at scoring conversations on a scale of 1 to 10."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), '(role=\'scorer\', goal=\n """You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """\n , backstory=\n \'You are an expert at scoring conversations on a scale of 1 to 10.\',\n llm=mixtral, verbose=True, allow_delegation=False)\n', (3659, 5254), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8227, 8430), 'crewai.Task', 'Task', ([], {'description': '("""Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion)', 'agent': 'spamfilter'}), '(description=\n """Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion, agent=spamfilter)\n', (8231, 8430), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8659, 8785), 'crewai.Task', 'Task', ([], {'description': '("""Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion)', 'agent': 'analyst'}), '(description=\n """Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion, agent=analyst)\n', (8663, 8785), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8784, 8991), 'crewai.Task', 'Task', ([], {'description': '"""Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes."""', 'agent': 'scriptwriter'}), "(description=\n 'Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.'\n , agent=scriptwriter)\n", (8788, 8991), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8991, 9332), 'crewai.Task', 'Task', ([], {'description': '"""Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """', 'agent': 'formatter'}), '(description=\n """Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """\n , agent=formatter)\n', (8995, 9332), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9344, 9463), 'crewai.Crew', 'Crew', ([], {'agents': '[analyst, scriptwriter, formatter]', 'tasks': '[task1, task2, task3]', 'verbose': '(2)', 'process': 'Process.sequential'}), '(agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3],\n verbose=2, process=Process.sequential)\n', (9348, 9463), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9847, 9878), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 'result'], {}), "('\\\\(.*?\\\\)', '', result)\n", (9853, 9878), False, 'import re\n'), ((10082, 10288), 'crewai.Task', 'Task', ([], {'description': '("""Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result)', 'agent': 'scorer'}), '(description=\n """Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result, agent=scorer)\n', (10086, 10288), False, 'from crewai import Agent, Task, Crew, Process\n'), ((1635, 1720), 'langchain_mistralai.chat_models.ChatMistralAI', 'ChatMistralAI', ([], {'mistral_api_key': 'mistral_key', 'model': '"""mistral-tiny"""', 'temperature': '(0.6)'}), "(mistral_api_key=mistral_key, model='mistral-tiny',\n temperature=0.6)\n", (1648, 1720), False, 'from langchain_mistralai.chat_models import ChatMistralAI\n'), ((1970, 2122), 'langchain.chat_models.openai.ChatOpenAI', 'openai.ChatOpenAI', ([], {'base_url': '"""https://api.together.xyz/v1"""', 'api_key': 'togetherai_key', 'temperature': '(0.5)', 'model': '"""mistralai/Mistral-7B-Instruct-v0.2"""'}), "(base_url='https://api.together.xyz/v1', api_key=\n togetherai_key, temperature=0.5, model='mistralai/Mistral-7B-Instruct-v0.2'\n )\n", (1987, 2122), False, 'from langchain.chat_models import openai\n'), ((2157, 2257), 'langchain_community.chat_models.ChatAnyscale', 'ChatAnyscale', ([], {'model': '"""mistralai/Mistral-7B-Instruct-v0.1"""', 'api_key': 'anyscale_key', 'streaming': '(False)'}), "(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=\n anyscale_key, streaming=False)\n", (2169, 2257), False, 'from langchain_community.chat_models import ChatAnyscale\n')] |
"""Chat agent with question answering
"""
import os
from utils.giphy import GiphyAPIWrapper
from dataclasses import dataclass
from langchain.chains import LLMChain, LLMRequestsChain
from langchain import Wikipedia, OpenAI
from langchain.agents.react.base import DocstoreExplorer
from langchain.agents import (
ZeroShotAgent,
Tool,
AgentExecutor,
get_all_tool_names,
load_tools,
initialize_agent,
)
from langchain.prompts import PromptTemplate
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.agents.conversational.base import ConversationalAgent
from datetime import datetime
import langchain
from langchain.cache import InMemoryCache
langchain.llm_cache = InMemoryCache()
news_api_key = os.environ["NEWS_API_KEY"]
tmdb_bearer_token = os.environ["TMDB_API_KEY"]
@dataclass
class ChatAgent:
agent_executor: AgentExecutor = None
def _get_docstore_agent(self):
docstore = DocstoreExplorer(Wikipedia())
docstore_tools = [
Tool(name="Search", func=docstore.search, description="Search wikipedia"),
Tool(
name="Lookup",
func=docstore.lookup,
description="Lookup a wikipedia page",
),
]
docstore_llm = OpenAI(temperature=0, model_name="gpt-3.5-turbo")
docstore_agent = initialize_agent(
docstore_tools, docstore_llm, agent="react-docstore", verbose=True
)
return docstore_agent
def _get_requests_llm_tool(self):
template = """
Extracted: {requests_result}"""
PROMPT = PromptTemplate(
input_variables=["requests_result"],
template=template,
)
def lambda_func(input):
out = LLMRequestsChain(
llm_chain=LLMChain(
llm=OpenAI(temperature=0), prompt=PROMPT, verbose=True
)
).run(input)
return out.strip()
return lambda_func
def __init__(self, *, conversation_chain: LLMChain = None, history_array):
date = datetime.today().strftime("%B %d, %Y")
# set up a Wikipedia docstore agent
docstore_agent = self._get_docstore_agent()
giphy = GiphyAPIWrapper()
# tool_names = get_all_tool_names()
tool_names = [
"wolfram-alpha",
"llm-math",
"open-meteo-api",
"news-api",
"tmdb-api",
"wikipedia",
]
requests_tool = self._get_requests_llm_tool()
tools = load_tools(
tool_names,
llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", verbose=True),
news_api_key=news_api_key,
tmdb_bearer_token=tmdb_bearer_token,
)
# Tweak some of the tool descriptions
for tool in tools:
if tool.name == "Calculator":
tool.description = (
"Use this only to solve numeric math problems and to do arithmetic."
)
tools = tools + [
Tool(
name="WikipediaSearch",
description="Useful for answering a wide range of factual, scientific, academic, political and historical questions.",
func=docstore_agent.run,
),
Tool(
name="GiphySearch",
func=giphy.run,
return_direct=True,
description="useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer.",
),
Tool(
name="Requests",
func=requests_tool,
description="A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page.",
),
]
# set up the google search tool if the env var is set
if "GOOGLE_API_KEY" in os.environ:
from langchain.utilities import GoogleSearchAPIWrapper
tools.append(
Tool(
name="Search",
func=GoogleSearchAPIWrapper().run,
description="Use this tool for questions relating to current events, or when you can't find an answer using any of the other tools.",
)
)
# set up the serpapi search tool if the env var is set
if "SERPAPI_API_KEY" in os.environ:
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
tools.append(
Tool(
name="Search",
func=serpapi.run,
description="Use this tool for questions relating to current events, or when you can't find an answer using any of the other tools.",
)
)
ai_prefix = "AI"
human_prefix = "Human"
prefix = f"""{ai_prefix} is a large language model. {ai_prefix} is represented by a 🤖.
{ai_prefix} uses a light, humorous tone, and very frequently includes emojis its responses. Responses with code examples should be formatted in code blocks using <pre><code></code></pre> tags.
{ai_prefix} is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, {ai_prefix} is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
If {ai_prefix} can't provide a good response, it will truthfully answer that it can't help with the user's request.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
TOOLS:
------
Assistant has access to the following tools:
"""
suffix = f"""
The current date is {date}. Questions that refer to a specific date or time period will be interpreted relative to this date.
Questions that refer to a specific date or time period will be interpreted relative to this date.
After you answer the question, you MUST to determine which langauge your answer is written in, and append the language code to the end of the Final Answer, within parentheses, like this (en-US).
Begin!
Previous conversation history:
{{chat_history}}
New input: {{input}}
{{agent_scratchpad}}
"""
memory = ConversationBufferMemory(memory_key="chat_history")
for item in history_array:
memory.save_context(
{f"{ai_prefix}": item["prompt"]}, {f"{human_prefix}": item["response"]}
)
llm = OpenAI(temperature=0.5, model_name="gpt-3.5-turbo")
llm_chain = LLMChain(
llm=llm,
prompt=ConversationalAgent.create_prompt(
tools,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
prefix=prefix,
suffix=suffix,
),
verbose=True,
)
agent_obj = ConversationalAgent(
llm_chain=llm_chain, ai_prefix=ai_prefix, verbose=True
)
self.agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj, tools=tools, verbose=True, max_iterations=5, memory=memory
)
| [
"langchain.agents.initialize_agent",
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.utilities.GoogleSearchAPIWrapper",
"langchain.Wikipedia",
"langchain.agents.conversational.base.ConversationalAgent",
"langchain.SerpAPIWrapper",
"langchain.agents.conversational.base.ConversationalAgent.create_prompt",
"langchain.agents.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.cache.InMemoryCache",
"langchain.prompts.PromptTemplate",
"langchain.OpenAI"
] | [((726, 741), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (739, 741), False, 'from langchain.cache import InMemoryCache\n'), ((1293, 1342), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (1299, 1342), False, 'from langchain import Wikipedia, OpenAI\n'), ((1368, 1456), 'langchain.agents.initialize_agent', 'initialize_agent', (['docstore_tools', 'docstore_llm'], {'agent': '"""react-docstore"""', 'verbose': '(True)'}), "(docstore_tools, docstore_llm, agent='react-docstore',\n verbose=True)\n", (1384, 1456), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1625, 1695), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['requests_result']", 'template': 'template'}), "(input_variables=['requests_result'], template=template)\n", (1639, 1695), False, 'from langchain.prompts import PromptTemplate\n'), ((2261, 2278), 'utils.giphy.GiphyAPIWrapper', 'GiphyAPIWrapper', ([], {}), '()\n', (2276, 2278), False, 'from utils.giphy import GiphyAPIWrapper\n'), ((6817, 6868), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (6841, 6868), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((7054, 7105), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.5, model_name='gpt-3.5-turbo')\n", (7060, 7105), False, 'from langchain import Wikipedia, OpenAI\n'), ((7448, 7523), 'langchain.agents.conversational.base.ConversationalAgent', 'ConversationalAgent', ([], {'llm_chain': 'llm_chain', 'ai_prefix': 'ai_prefix', 'verbose': '(True)'}), '(llm_chain=llm_chain, ai_prefix=ai_prefix, verbose=True)\n', (7467, 7523), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((7577, 7693), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent_obj', 'tools': 'tools', 'verbose': '(True)', 'max_iterations': '(5)', 'memory': 'memory'}), '(agent=agent_obj, tools=tools, verbose=\n True, max_iterations=5, memory=memory)\n', (7611, 7693), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((976, 987), 'langchain.Wikipedia', 'Wikipedia', ([], {}), '()\n', (985, 987), False, 'from langchain import Wikipedia, OpenAI\n'), ((1028, 1101), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'docstore.search', 'description': '"""Search wikipedia"""'}), "(name='Search', func=docstore.search, description='Search wikipedia')\n", (1032, 1101), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1115, 1200), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Lookup"""', 'func': 'docstore.lookup', 'description': '"""Lookup a wikipedia page"""'}), "(name='Lookup', func=docstore.lookup, description='Lookup a wikipedia page'\n )\n", (1119, 1200), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((4690, 4706), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (4704, 4706), False, 'from langchain import SerpAPIWrapper\n'), ((2108, 2124), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2122, 2124), False, 'from datetime import datetime\n'), ((2637, 2700), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'verbose': '(True)'}), "(temperature=0, model_name='gpt-3.5-turbo', verbose=True)\n", (2643, 2700), False, 'from langchain import Wikipedia, OpenAI\n'), ((3099, 3281), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""WikipediaSearch"""', 'description': '"""Useful for answering a wide range of factual, scientific, academic, political and historical questions."""', 'func': 'docstore_agent.run'}), "(name='WikipediaSearch', description=\n 'Useful for answering a wide range of factual, scientific, academic, political and historical questions.'\n , func=docstore_agent.run)\n", (3103, 3281), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((3348, 3630), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""GiphySearch"""', 'func': 'giphy.run', 'return_direct': '(True)', 'description': '"""useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer."""'}), "(name='GiphySearch', func=giphy.run, return_direct=True, description=\n 'useful for when you need to find a gif or picture, and for adding humor to your replies. Input should be a query, and output will be an html embed code which you MUST include in your Final Answer.'\n )\n", (3352, 3630), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((3713, 3947), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Requests"""', 'func': 'requests_tool', 'description': '"""A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."""'}), "(name='Requests', func=requests_tool, description=\n 'A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page.'\n )\n", (3717, 3947), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((4750, 4931), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'serpapi.run', 'description': '"""Use this tool for questions relating to current events, or when you can\'t find an answer using any of the other tools."""'}), '(name=\'Search\', func=serpapi.run, description=\n "Use this tool for questions relating to current events, or when you can\'t find an answer using any of the other tools."\n )\n', (4754, 4931), False, 'from langchain.agents import ZeroShotAgent, Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((7176, 7299), 'langchain.agents.conversational.base.ConversationalAgent.create_prompt', 'ConversationalAgent.create_prompt', (['tools'], {'ai_prefix': 'ai_prefix', 'human_prefix': 'human_prefix', 'prefix': 'prefix', 'suffix': 'suffix'}), '(tools, ai_prefix=ai_prefix, human_prefix=\n human_prefix, prefix=prefix, suffix=suffix)\n', (7209, 7299), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((4294, 4318), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (4316, 4318), False, 'from langchain.utilities import GoogleSearchAPIWrapper\n'), ((1860, 1881), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1866, 1881), False, 'from langchain import Wikipedia, OpenAI\n')] |
import os
import pinecone
from rich.console import Console
from rich.markdown import Markdown
import langchain
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.vectorstores import Pinecone
# langchain.debug = True
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PINECONE_API_KEY = os.getenv("PINECONE_KEY")
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT")
PINECONE_INDEX = os.getenv("PINECONE_INDEX")
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
index = pinecone.Index(PINECONE_INDEX)
vector_store = Pinecone(index, embeddings, "text")
prompt_template = """
You are a question-answering bot for Airbyte company employees and will be provided
relevant context from Notion pages on the Airbyte company knowlege base.
Whenever you are asked a question you answer with a helpful answer if you can, along
with the links to those relevant pages for further information. If you are not sure, you
will say that you are not sure but still provide links if anything might be helpful to the
questioner.
Only use the provided context. Do not guess and do not use prior knowlege.
Please provide your response in markdown format, starting with a level 2 header that describes
the answer under a reasonable summary header.
Notion context for this question:
{context}
Question: {question}
Please provide a helpful answer along one or more URLs that would be helpful for finding additional information:
"""
prompt = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
qa = RetrievalQA.from_chain_type(
llm=OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY),
chain_type="stuff",
retriever=vector_store.as_retriever(),
chain_type_kwargs={"prompt": prompt},
)
console = Console()
console.print(Markdown("\n------\n> What do you want to know?"))
console.print("")
while True:
try:
query = input("")
except KeyboardInterrupt:
console.print("\n")
console.print(Markdown("_Goodbye!_ 👋"))
exit(0)
answer = qa.run(query)
console.print(Markdown(answer))
console.print(Markdown("\n------\n> What else do you want to know?\n"))
console.print("\n")
| [
"langchain.llms.OpenAI",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.prompts.PromptTemplate",
"langchain.vectorstores.Pinecone"
] | [((371, 398), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (380, 398), False, 'import os\n'), ((418, 443), 'os.getenv', 'os.getenv', (['"""PINECONE_KEY"""'], {}), "('PINECONE_KEY')\n", (427, 443), False, 'import os\n'), ((467, 500), 'os.getenv', 'os.getenv', (['"""PINECONE_ENVIRONMENT"""'], {}), "('PINECONE_ENVIRONMENT')\n", (476, 500), False, 'import os\n'), ((518, 545), 'os.getenv', 'os.getenv', (['"""PINECONE_INDEX"""'], {}), "('PINECONE_INDEX')\n", (527, 545), False, 'import os\n'), ((560, 607), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (576, 607), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((608, 681), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINECONE_API_KEY', 'environment': 'PINECONE_ENVIRONMENT'}), '(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\n', (621, 681), False, 'import pinecone\n'), ((690, 720), 'pinecone.Index', 'pinecone.Index', (['PINECONE_INDEX'], {}), '(PINECONE_INDEX)\n', (704, 720), False, 'import pinecone\n'), ((736, 771), 'langchain.vectorstores.Pinecone', 'Pinecone', (['index', 'embeddings', '"""text"""'], {}), "(index, embeddings, 'text')\n", (744, 771), False, 'from langchain.vectorstores import Pinecone\n'), ((1643, 1728), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1657, 1728), False, 'from langchain.prompts import PromptTemplate\n'), ((1950, 1959), 'rich.console.Console', 'Console', ([], {}), '()\n', (1957, 1959), False, 'from rich.console import Console\n'), ((1975, 2026), 'rich.markdown.Markdown', 'Markdown', (['"""\n------\n> What do you want to know?"""'], {}), '("""\n------\n> What do you want to know?""")\n', (1983, 2026), False, 'from rich.markdown import Markdown\n'), ((1774, 1826), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'OPENAI_API_KEY'}), '(temperature=0, openai_api_key=OPENAI_API_KEY)\n', (1780, 1826), False, 'from langchain.llms import OpenAI\n'), ((2259, 2275), 'rich.markdown.Markdown', 'Markdown', (['answer'], {}), '(answer)\n', (2267, 2275), False, 'from rich.markdown import Markdown\n'), ((2296, 2353), 'rich.markdown.Markdown', 'Markdown', (['"""\n------\n> What else do you want to know?\n"""'], {}), '("""\n------\n> What else do you want to know?\n""")\n', (2304, 2353), False, 'from rich.markdown import Markdown\n'), ((2171, 2195), 'rich.markdown.Markdown', 'Markdown', (['"""_Goodbye!_ 👋"""'], {}), "('_Goodbye!_ 👋')\n", (2179, 2195), False, 'from rich.markdown import Markdown\n')] |
from typing import Union, Callable, List, Dict, Any, TypeVar
from lionagi.libs.sys_util import SysUtil
T = TypeVar("T")
def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
"""
Converts a generic data node into a Langchain Document.
This function transforms a node, typically from another data schema, into a Langchain Document format.
It requires the source node to have a `to_dict` method to convert it into a dictionary, then it renames specific keys
to match the Langchain Document schema before creating a Langchain Document object.
Args:
datanode (T): The data node to convert. Must have a `to_dict` method.
**kwargs: Additional keyword arguments to be passed to the Langchain Document constructor.
Returns:
Any: An instance of `LangchainDocument` populated with data from the input node.
"""
SysUtil.check_import("langchain")
from langchain.schema import Document as LangchainDocument
dnode = datanode.to_dict()
SysUtil.change_dict_key(dnode, old_key="content", new_key="page_content")
SysUtil.change_dict_key(dnode, old_key="lc_id", new_key="id_")
dnode = {**dnode, **kwargs}
return LangchainDocument(**dnode)
def langchain_loader(
loader: Union[str, Callable],
loader_args: List[Any] = [],
loader_kwargs: Dict[str, Any] = {},
) -> Any:
"""
Initializes and uses a specified loader to load data within the Langchain ecosystem.
This function supports dynamically selecting a loader by name or directly using a loader function.
It passes specified arguments and keyword arguments to the loader for data retrieval or processing.
Args:
loader (Union[str, Callable]): A string representing the loader's name or a callable loader function.
loader_args (List[Any], optional): A list of positional arguments for the loader.
loader_kwargs (Dict[str, Any], optional): A dictionary of keyword arguments for the loader.
Returns:
Any: The result returned by the loader function, typically data loaded into a specified format.
Raises:
ValueError: If the loader cannot be initialized or fails to load data.
Examples:
>>> data = langchain_loader("json_loader", loader_args=["data.json"])
>>> isinstance(data, dict)
True
"""
SysUtil.check_import("langchain")
import langchain_community.document_loaders as document_loaders
try:
if isinstance(loader, str):
loader = getattr(document_loaders, loader)
else:
loader = loader
except Exception as e:
raise ValueError(f"Invalid loader: {loader}. Error: {e}")
try:
loader_obj = loader(*loader_args, **loader_kwargs)
data = loader_obj.load()
return data
except Exception as e:
raise ValueError(f"Failed to load. Error: {e}")
def langchain_text_splitter(
data: Union[str, List],
splitter: Union[str, Callable],
splitter_args: List[Any] = None,
splitter_kwargs: Dict[str, Any] = None,
) -> List[str]:
"""
Splits text or a list of texts using a specified Langchain text splitter.
This function allows for dynamic selection of a text splitter, either by name or as a function, to split text
or documents into chunks. The splitter can be configured with additional arguments and keyword arguments.
Args:
data (Union[str, List]): The text or list of texts to be split.
splitter (Union[str, Callable]): The name of the splitter function or the splitter function itself.
splitter_args (List[Any], optional): Positional arguments to pass to the splitter function.
splitter_kwargs (Dict[str, Any], optional): Keyword arguments to pass to the splitter function.
Returns:
List[str]: A list of text chunks produced by the text splitter.
Raises:
ValueError: If the splitter is invalid or fails during the split operation.
"""
splitter_args = splitter_args or []
splitter_kwargs = splitter_kwargs or {}
SysUtil.check_import("langchain")
import langchain_text_splitters as text_splitter
try:
if isinstance(splitter, str):
splitter = getattr(text_splitter, splitter)
else:
splitter = splitter
except Exception as e:
raise ValueError(f"Invalid text splitter: {splitter}. Error: {e}")
try:
splitter_obj = splitter(*splitter_args, **splitter_kwargs)
if isinstance(data, str):
chunk = splitter_obj.split_text(data)
else:
chunk = splitter_obj.split_documents(data)
return chunk
except Exception as e:
raise ValueError(f"Failed to split. Error: {e}")
| [
"langchain.schema.Document"
] | [((110, 122), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (117, 122), False, 'from typing import Union, Callable, List, Dict, Any, TypeVar\n'), ((877, 910), 'lionagi.libs.sys_util.SysUtil.check_import', 'SysUtil.check_import', (['"""langchain"""'], {}), "('langchain')\n", (897, 910), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((1010, 1083), 'lionagi.libs.sys_util.SysUtil.change_dict_key', 'SysUtil.change_dict_key', (['dnode'], {'old_key': '"""content"""', 'new_key': '"""page_content"""'}), "(dnode, old_key='content', new_key='page_content')\n", (1033, 1083), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((1088, 1150), 'lionagi.libs.sys_util.SysUtil.change_dict_key', 'SysUtil.change_dict_key', (['dnode'], {'old_key': '"""lc_id"""', 'new_key': '"""id_"""'}), "(dnode, old_key='lc_id', new_key='id_')\n", (1111, 1150), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((1194, 1220), 'langchain.schema.Document', 'LangchainDocument', ([], {}), '(**dnode)\n', (1211, 1220), True, 'from langchain.schema import Document as LangchainDocument\n'), ((2342, 2375), 'lionagi.libs.sys_util.SysUtil.check_import', 'SysUtil.check_import', (['"""langchain"""'], {}), "('langchain')\n", (2362, 2375), False, 'from lionagi.libs.sys_util import SysUtil\n'), ((4063, 4096), 'lionagi.libs.sys_util.SysUtil.check_import', 'SysUtil.check_import', (['"""langchain"""'], {}), "('langchain')\n", (4083, 4096), False, 'from lionagi.libs.sys_util import SysUtil\n')] |
import re
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import langchain
from langchain import LLMChain
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from .prompts import (FINAL_ANSWER_ACTION, FORMAT_INSTRUCTIONS,
QUESTION_PROMPT, SUFFIX)
class ChatZeroShotOutputParser(AgentOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if FINAL_ANSWER_ACTION in text:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
# Remove 'Thought' SUFFIX
if text.startswith('Thought:'):
text = text[8:]
# \s matches against tab/newline/whitespace
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
match = re.search(regex, text, re.DOTALL)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(action, action_input.strip(" ").strip('"'), text.strip())
| [
"langchain.schema.OutputParserException"
] | [((1023, 1056), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (1032, 1056), False, 'import re\n'), ((1097, 1159), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (1118, 1159), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')] |
from typing import List
from uuid import uuid4
from langchain.prompts import ChatPromptTemplate
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain_community.chat_models.fake import FakeListChatModel
from honcho import Honcho
from honcho.ext.langchain import langchain_message_converter
app_name = str(uuid4())
honcho = Honcho(
app_name=app_name, base_url="http://localhost:8000"
) # uncomment to use local
# honcho = Honcho(app_name=app_name) # uses demo server at https://demo.honcho.dev
honcho.initialize()
responses = ["Fake LLM Response :)"]
llm = FakeListChatModel(responses=responses)
system = SystemMessage(
content="You are world class technical documentation writer. Be as concise as possible"
)
user_name = "CLI-Test"
user = honcho.create_user(user_name)
session = user.create_session()
# def langchain_message_converter(messages: List):
# new_messages = []
# for message in messages:
# if message.is_user:
# new_messages.append(HumanMessage(content=message.content))
# else:
# new_messages.append(AIMessage(content=message.content))
# return new_messages
def chat():
while True:
user_input = input("User: ")
if user_input == "exit":
session.close()
break
user_message = HumanMessage(content=user_input)
history = list(session.get_messages_generator())
langchain_history = langchain_message_converter(history)
prompt = ChatPromptTemplate.from_messages(
[system, *langchain_history, user_message]
)
chain = prompt | llm
response = chain.invoke({})
print(type(response))
print(f"AI: {response.content}")
session.create_message(is_user=True, content=user_input)
session.create_message(is_user=False, content=response.content)
chat()
| [
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain_community.chat_models.fake.FakeListChatModel",
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((356, 415), 'honcho.Honcho', 'Honcho', ([], {'app_name': 'app_name', 'base_url': '"""http://localhost:8000"""'}), "(app_name=app_name, base_url='http://localhost:8000')\n", (362, 415), False, 'from honcho import Honcho\n'), ((596, 634), 'langchain_community.chat_models.fake.FakeListChatModel', 'FakeListChatModel', ([], {'responses': 'responses'}), '(responses=responses)\n', (613, 634), False, 'from langchain_community.chat_models.fake import FakeListChatModel\n'), ((644, 756), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are world class technical documentation writer. Be as concise as possible"""'}), "(content=\n 'You are world class technical documentation writer. Be as concise as possible'\n )\n", (657, 756), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((337, 344), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (342, 344), False, 'from uuid import uuid4\n'), ((1338, 1370), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (1350, 1370), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((1456, 1492), 'honcho.ext.langchain.langchain_message_converter', 'langchain_message_converter', (['history'], {}), '(history)\n', (1483, 1492), False, 'from honcho.ext.langchain import langchain_message_converter\n'), ((1510, 1586), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system, *langchain_history, user_message]'], {}), '([system, *langchain_history, user_message])\n', (1542, 1586), False, 'from langchain.prompts import ChatPromptTemplate\n')] |
""" A simple cloud consultant bot that can answer questions
about kubernetes, aws and cloud native."""
import langchain
from langchain.agents import Tool, AgentType, initialize_agent
from langchain.tools import HumanInputRun
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from termcolor import colored
from cloud_tool import CloudTool
from approval import ApprovalCallBackHandler
langchain.debug = False
MODEL = "gpt-3.5-turbo"
cloud_tool = CloudTool(callbacks=[ApprovalCallBackHandler()])
cloud_tool.description = cloud_tool.description + f"args {cloud_tool.args}".replace(
"{", "{{"
).replace("}", "}}")
human = HumanInputRun()
llm = ChatOpenAI(temperature=0, model=MODEL)
embeddings = OpenAIEmbeddings()
vectorstore = Chroma(persist_directory="./", embedding_function=embeddings)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
kubememory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
awsmemory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
docs = ConversationalRetrievalChain.from_llm(
llm, vectorstore.as_retriever(), memory=memory
)
cloud_tools = [cloud_tool, human]
kubectl_agent_chain = initialize_agent(
tools=cloud_tools,
llm=llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
memory=kubememory,
verbose=False,
agent_kwargs={
"prefix": """
You are a Kubernetes Command line tool (kubectl) expert.
Given an input question, first create a syntactically correct kubectl command to run, then look at the results of the command and return the answer to the input question.
If there is no namespace name given please use the "default" namespace.
Only return the command. If an error is returned, rewrite the command, check the command, and try again.
""",
},
)
aws_agent_chain = initialize_agent(
cloud_tools,
llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
memory=awsmemory,
verbose=False,
agent_kwargs={
"prefix": """
You are a AWS Command line tool (aws cli) expert.
Given an input question, first create a syntactically correct aws cli command to run, then look at the results of the query and return the answer to the input question.
You must generate the correct aws cli command to answer he question.
Only return the command. If an error is returned, rewrite the command, check the command, and try again.
""",
},
)
tools = [
Tool(
name="Kubernetes QA System",
func=docs.run,
description="useful for when you need to answer questions about kubernetes or cloud native and from the kubernetes or cloud native documentation. input should be a fully formed question.",
),
Tool(
name="Kubectl",
func=kubectl_agent_chain.run,
description="useful for when you need to use kubectl to look up, change or update your kubernetes cluster.",
),
Tool(
name="Aws CLI",
func=aws_agent_chain.run,
description="useful for when you need to use aws cli to look up, change or update your AWS setup.",
),
human,
]
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=False,
memory=memory,
)
def ask_ai():
"""Main method to talk to the ai"""
print(
colored(
"Welcome, i am Your AI cloud consultant. How can i help You today?", "green"
)
)
try:
while True:
query = input(colored("You: ", "white", attrs=["bold"]))
result = agent_chain.run(input=query)
print(
colored("Answer: ", "green", attrs=["bold"]),
colored(result, "light_green"),
)
except (EOFError, KeyboardInterrupt):
print("kthxbye")
exit()
if __name__ == "__main__":
ask_ai()
| [
"langchain.agents.initialize_agent",
"langchain.tools.HumanInputRun",
"langchain.memory.ConversationBufferMemory",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI",
"langchain.agents.Tool",
"langchain.vectorstores.Chroma"
] | [((893, 908), 'langchain.tools.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (906, 908), False, 'from langchain.tools import HumanInputRun\n'), ((917, 955), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'MODEL'}), '(temperature=0, model=MODEL)\n', (927, 955), False, 'from langchain.chat_models import ChatOpenAI\n'), ((969, 987), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (985, 987), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1002, 1063), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""./"""', 'embedding_function': 'embeddings'}), "(persist_directory='./', embedding_function=embeddings)\n", (1008, 1063), False, 'from langchain.vectorstores import Chroma\n'), ((1073, 1146), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1097, 1146), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1160, 1233), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1184, 1233), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1246, 1319), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1270, 1319), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1477, 2069), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'cloud_tools', 'llm': 'llm', 'agent': 'AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION', 'memory': 'kubememory', 'verbose': '(False)', 'agent_kwargs': '{\'prefix\':\n """\nYou are a Kubernetes Command line tool (kubectl) expert. \nGiven an input question, first create a syntactically correct kubectl command to run, then look at the results of the command and return the answer to the input question.\nIf there is no namespace name given please use the "default" namespace.\nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n\n"""\n }'}), '(tools=cloud_tools, llm=llm, agent=AgentType.\n CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=kubememory, verbose=False,\n agent_kwargs={\'prefix\':\n """\nYou are a Kubernetes Command line tool (kubectl) expert. \nGiven an input question, first create a syntactically correct kubectl command to run, then look at the results of the command and return the answer to the input question.\nIf there is no namespace name given please use the "default" namespace.\nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n\n"""\n })\n', (1493, 2069), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((2113, 2682), 'langchain.agents.initialize_agent', 'initialize_agent', (['cloud_tools', 'llm'], {'agent': 'AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION', 'memory': 'awsmemory', 'verbose': '(False)', 'agent_kwargs': '{\'prefix\':\n """\nYou are a AWS Command line tool (aws cli) expert. \nGiven an input question, first create a syntactically correct aws cli command to run, then look at the results of the query and return the answer to the input question.\nYou must generate the correct aws cli command to answer he question. \nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n"""\n }'}), '(cloud_tools, llm, agent=AgentType.\n CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=awsmemory, verbose=False,\n agent_kwargs={\'prefix\':\n """\nYou are a AWS Command line tool (aws cli) expert. \nGiven an input question, first create a syntactically correct aws cli command to run, then look at the results of the query and return the answer to the input question.\nYou must generate the correct aws cli command to answer he question. \nOnly return the command. If an error is returned, rewrite the command, check the command, and try again.\n"""\n })\n', (2129, 2682), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((3401, 3519), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION', 'verbose': '(False)', 'memory': 'memory'}), '(tools, llm, agent=AgentType.\n CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=False, memory=memory)\n', (3417, 3519), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((2723, 2970), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Kubernetes QA System"""', 'func': 'docs.run', 'description': '"""useful for when you need to answer questions about kubernetes or cloud native and from the kubernetes or cloud native documentation. input should be a fully formed question."""'}), "(name='Kubernetes QA System', func=docs.run, description=\n 'useful for when you need to answer questions about kubernetes or cloud native and from the kubernetes or cloud native documentation. input should be a fully formed question.'\n )\n", (2727, 2970), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((2997, 3166), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Kubectl"""', 'func': 'kubectl_agent_chain.run', 'description': '"""useful for when you need to use kubectl to look up, change or update your kubernetes cluster."""'}), "(name='Kubectl', func=kubectl_agent_chain.run, description=\n 'useful for when you need to use kubectl to look up, change or update your kubernetes cluster.'\n )\n", (3001, 3166), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((3193, 3349), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Aws CLI"""', 'func': 'aws_agent_chain.run', 'description': '"""useful for when you need to use aws cli to look up, change or update your AWS setup."""'}), "(name='Aws CLI', func=aws_agent_chain.run, description=\n 'useful for when you need to use aws cli to look up, change or update your AWS setup.'\n )\n", (3197, 3349), False, 'from langchain.agents import Tool, AgentType, initialize_agent\n'), ((3613, 3702), 'termcolor.colored', 'colored', (['"""Welcome, i am Your AI cloud consultant. How can i help You today?"""', '"""green"""'], {}), "('Welcome, i am Your AI cloud consultant. How can i help You today?',\n 'green')\n", (3620, 3702), False, 'from termcolor import colored\n'), ((736, 761), 'approval.ApprovalCallBackHandler', 'ApprovalCallBackHandler', ([], {}), '()\n', (759, 761), False, 'from approval import ApprovalCallBackHandler\n'), ((3782, 3823), 'termcolor.colored', 'colored', (['"""You: """', '"""white"""'], {'attrs': "['bold']"}), "('You: ', 'white', attrs=['bold'])\n", (3789, 3823), False, 'from termcolor import colored\n'), ((3910, 3954), 'termcolor.colored', 'colored', (['"""Answer: """', '"""green"""'], {'attrs': "['bold']"}), "('Answer: ', 'green', attrs=['bold'])\n", (3917, 3954), False, 'from termcolor import colored\n'), ((3972, 4002), 'termcolor.colored', 'colored', (['result', '"""light_green"""'], {}), "(result, 'light_green')\n", (3979, 4002), False, 'from termcolor import colored\n')] |
import langchain.text_splitter as splitter
# local imports
import settings_template as settings
class SplitterCreator():
"""
Splitter class to import into other modules
"""
def __init__(self, text_splitter_method=None, chunk_size=None, chunk_overlap=None) -> None:
self.text_splitter_method = settings.TEXT_SPLITTER_METHOD \
if text_splitter_method is None else text_splitter_method
self.chunk_size = settings.CHUNK_SIZE if chunk_size is None else chunk_size
self.chunk_overlap = settings.CHUNK_OVERLAP if chunk_overlap is None else chunk_overlap
def get_splitter(self):
"""
Get the text splitter object
"""
if self.text_splitter_method == "NLTKTextSplitter":
text_splitter = splitter.NLTKTextSplitter(
separator="\n\n",
language="english",
chunk_size=self.chunk_size,
chunk_overlap=self.chunk_overlap
)
elif self.text_splitter_method == "RecursiveCharacterTextSplitter":
text_splitter = splitter.RecursiveCharacterTextSplitter(
chunk_size=self.chunk_size,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=self.chunk_overlap
)
return text_splitter
| [
"langchain.text_splitter.NLTKTextSplitter",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((779, 909), 'langchain.text_splitter.NLTKTextSplitter', 'splitter.NLTKTextSplitter', ([], {'separator': '"""\n\n"""', 'language': '"""english"""', 'chunk_size': 'self.chunk_size', 'chunk_overlap': 'self.chunk_overlap'}), "(separator='\\n\\n', language='english', chunk_size=\n self.chunk_size, chunk_overlap=self.chunk_overlap)\n", (804, 909), True, 'import langchain.text_splitter as splitter\n'), ((1087, 1253), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'self.chunk_size', 'separators': "['\\n\\n', '\\n', '.', '!', '?', ',', ' ', '']", 'chunk_overlap': 'self.chunk_overlap'}), "(chunk_size=self.chunk_size,\n separators=['\\n\\n', '\\n', '.', '!', '?', ',', ' ', ''], chunk_overlap=\n self.chunk_overlap)\n", (1126, 1253), True, 'import langchain.text_splitter as splitter\n')] |
from langchain.cache import SQLiteCache
import langchain
from pydantic import BaseModel
from creator.code_interpreter import CodeInterpreter
from creator.config.load_config import load_yaml_config
import os
# Load configuration from YAML
yaml_config = load_yaml_config()
# Helper function to prepend '~/' to paths if not present
def resolve_path(path):
if not path.startswith("~"):
return os.path.expanduser("~/" + path)
return os.path.expanduser(path)
project_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
# Fetch values from the loaded YAML config or set default values
_local_skill_library_path = resolve_path(yaml_config.get("LOCAL_SKILL_LIBRARY_PATH", ".cache/open_creator/skill_library"))
_remote_skill_library_path = resolve_path(yaml_config.get("REMOTE_SKILL_LIBRARY_PATH", ".cache/open_creator/remote"))
_local_skill_library_vectordb_path = resolve_path(yaml_config.get("LOCAL_SKILL_LIBRARY_VECTORD_PATH", ".cache/open_creator/vectordb/"))
_prompt_cache_history_path = resolve_path(yaml_config.get("PROMPT_CACHE_HISTORY_PATH", ".cache/open_creator/prompt_cache/"))
_logger_cache_path = resolve_path(yaml_config.get("LOGGER_CACHE_PATH", ".cache/open_creator/logs/"))
_skill_extract_agent_cache_path = resolve_path(yaml_config.get("SKILL_EXTRACT_AGENT_CACHE_PATH", ".cache/open_creator/llm_cache"))
_official_skill_library_path = resolve_path(yaml_config.get("OFFICIAL_SKILL_LIBRARY_PATH", "timedomain/skill-library"))
_official_skill_library_template_path = resolve_path(yaml_config.get("OFFICIAL_SKILL_LIBRARY_TEMPLATE_PATH", "timedomain/skill-library-template"))
_model = yaml_config.get("MODEL_NAME", "gpt-3.5-turbo-16k-0613")
_temperature = yaml_config.get("TEMPERATURE", 0)
_run_human_confirm = yaml_config.get("RUN_HUMAN_CONFIRM", False)
_use_stream_callback = yaml_config.get("USE_STREAM_CALLBACK", True)
_build_in_skill_library_dir = yaml_config.get("BUILD_IN_SKILL_LIBRARY_DIR", "skill_library/open-creator/")
_build_in_skill_library_dir = os.path.join(project_dir, _build_in_skill_library_dir)
# Ensure directories exist
for path in [_skill_extract_agent_cache_path, _local_skill_library_path, _local_skill_library_vectordb_path, _prompt_cache_history_path, _logger_cache_path]:
if not os.path.exists(path):
os.makedirs(path)
if not os.path.exists(_logger_cache_path):
open(os.path.join(_logger_cache_path, "output.log"), 'a').close()
# Ensure the history file exists
if not os.path.exists(_prompt_cache_history_path):
open(os.path.join(_prompt_cache_history_path, "history.txt"), 'a').close()
build_in_skill_library_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
build_in_skill_config = {
"create": os.path.join(_build_in_skill_library_dir, "create"),
"save": os.path.join(_build_in_skill_library_dir, "save"),
"search": os.path.join(_build_in_skill_library_dir, "search"),
} # Placeholder for any built-in skill configurations
class LibraryConfig(BaseModel):
local_skill_library_path: str = _local_skill_library_path
remote_skill_library_path: str = _remote_skill_library_path
local_skill_library_vectordb_path: str = _local_skill_library_vectordb_path
prompt_cache_history_path: str = _prompt_cache_history_path
logger_cache_path: str = _logger_cache_path
skill_extract_agent_cache_path: str = _skill_extract_agent_cache_path
model: str = _model
temperature: float = _temperature
official_skill_library_path: str = _official_skill_library_path
official_skill_library_template_path: str = _official_skill_library_template_path
build_in_skill_config: dict = build_in_skill_config
run_human_confirm: bool = _run_human_confirm
use_stream_callback: bool = _use_stream_callback
code_interpreter: CodeInterpreter = CodeInterpreter()
# prompt paths
refactor_agent_prompt_path: str = os.path.join(project_dir, "prompts", "refactor_agent_prompt.md")
codeskill_function_schema_path: str = os.path.join(project_dir, "prompts", "codeskill_function_schema.json")
creator_agent_prompt_path: str = os.path.join(project_dir, "prompts", "creator_agent_prompt.md")
api_doc_path: str = os.path.join(project_dir, "prompts", "api_doc.md")
extractor_agent_prompt_path: str = os.path.join(project_dir, "prompts", "extractor_agent_prompt.md")
interpreter_agent_prompt_path: str = os.path.join(project_dir, "prompts", "interpreter_agent_prompt.md")
tester_agent_prompt_path: str = os.path.join(project_dir, "prompts", "tester_agent_prompt.md")
testsummary_function_schema_path: str = os.path.join(project_dir, "prompts", "testsummary_function_schema.json")
tips_for_debugging_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_debugging_prompt.md")
tips_for_testing_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_testing_prompt.md")
tips_for_veryfy_prompt_path: str = os.path.join(project_dir, "prompts", "tips_for_veryfy_prompt.md")
use_rich: bool = True
use_file_logger: bool = False
config = LibraryConfig()
langchain.llm_cache = SQLiteCache(database_path=f"{config.skill_extract_agent_cache_path}/.langchain.db")
| [
"langchain.cache.SQLiteCache"
] | [((254, 272), 'creator.config.load_config.load_yaml_config', 'load_yaml_config', ([], {}), '()\n', (270, 272), False, 'from creator.config.load_config import load_yaml_config\n'), ((2004, 2058), 'os.path.join', 'os.path.join', (['project_dir', '_build_in_skill_library_dir'], {}), '(project_dir, _build_in_skill_library_dir)\n', (2016, 2058), False, 'import os\n'), ((5083, 5171), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'f"""{config.skill_extract_agent_cache_path}/.langchain.db"""'}), "(database_path=\n f'{config.skill_extract_agent_cache_path}/.langchain.db')\n", (5094, 5171), False, 'from langchain.cache import SQLiteCache\n'), ((448, 472), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (466, 472), False, 'import os\n'), ((2312, 2346), 'os.path.exists', 'os.path.exists', (['_logger_cache_path'], {}), '(_logger_cache_path)\n', (2326, 2346), False, 'import os\n'), ((2459, 2501), 'os.path.exists', 'os.path.exists', (['_prompt_cache_history_path'], {}), '(_prompt_cache_history_path)\n', (2473, 2501), False, 'import os\n'), ((2710, 2761), 'os.path.join', 'os.path.join', (['_build_in_skill_library_dir', '"""create"""'], {}), "(_build_in_skill_library_dir, 'create')\n", (2722, 2761), False, 'import os\n'), ((2775, 2824), 'os.path.join', 'os.path.join', (['_build_in_skill_library_dir', '"""save"""'], {}), "(_build_in_skill_library_dir, 'save')\n", (2787, 2824), False, 'import os\n'), ((2840, 2891), 'os.path.join', 'os.path.join', (['_build_in_skill_library_dir', '"""search"""'], {}), "(_build_in_skill_library_dir, 'search')\n", (2852, 2891), False, 'import os\n'), ((3789, 3806), 'creator.code_interpreter.CodeInterpreter', 'CodeInterpreter', ([], {}), '()\n', (3804, 3806), False, 'from creator.code_interpreter import CodeInterpreter\n'), ((3865, 3929), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""refactor_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'refactor_agent_prompt.md')\n", (3877, 3929), False, 'import os\n'), ((3972, 4042), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""codeskill_function_schema.json"""'], {}), "(project_dir, 'prompts', 'codeskill_function_schema.json')\n", (3984, 4042), False, 'import os\n'), ((4080, 4143), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""creator_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'creator_agent_prompt.md')\n", (4092, 4143), False, 'import os\n'), ((4168, 4218), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""api_doc.md"""'], {}), "(project_dir, 'prompts', 'api_doc.md')\n", (4180, 4218), False, 'import os\n'), ((4258, 4323), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""extractor_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'extractor_agent_prompt.md')\n", (4270, 4323), False, 'import os\n'), ((4365, 4432), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""interpreter_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'interpreter_agent_prompt.md')\n", (4377, 4432), False, 'import os\n'), ((4469, 4531), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tester_agent_prompt.md"""'], {}), "(project_dir, 'prompts', 'tester_agent_prompt.md')\n", (4481, 4531), False, 'import os\n'), ((4576, 4648), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""testsummary_function_schema.json"""'], {}), "(project_dir, 'prompts', 'testsummary_function_schema.json')\n", (4588, 4648), False, 'import os\n'), ((4691, 4759), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tips_for_debugging_prompt.md"""'], {}), "(project_dir, 'prompts', 'tips_for_debugging_prompt.md')\n", (4703, 4759), False, 'import os\n'), ((4800, 4866), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tips_for_testing_prompt.md"""'], {}), "(project_dir, 'prompts', 'tips_for_testing_prompt.md')\n", (4812, 4866), False, 'import os\n'), ((4906, 4971), 'os.path.join', 'os.path.join', (['project_dir', '"""prompts"""', '"""tips_for_veryfy_prompt.md"""'], {}), "(project_dir, 'prompts', 'tips_for_veryfy_prompt.md')\n", (4918, 4971), False, 'import os\n'), ((405, 436), 'os.path.expanduser', 'os.path.expanduser', (["('~/' + path)"], {}), "('~/' + path)\n", (423, 436), False, 'import os\n'), ((518, 543), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (533, 543), False, 'import os\n'), ((2256, 2276), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2270, 2276), False, 'import os\n'), ((2286, 2303), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2297, 2303), False, 'import os\n'), ((2641, 2666), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2656, 2666), False, 'import os\n'), ((2357, 2403), 'os.path.join', 'os.path.join', (['_logger_cache_path', '"""output.log"""'], {}), "(_logger_cache_path, 'output.log')\n", (2369, 2403), False, 'import os\n'), ((2512, 2567), 'os.path.join', 'os.path.join', (['_prompt_cache_history_path', '"""history.txt"""'], {}), "(_prompt_cache_history_path, 'history.txt')\n", (2524, 2567), False, 'import os\n')] |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None)
tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tenant_id: Optional[str] = None,
session_extra: Optional[Dict[str, Any]] = None,
) -> Generator[TracerSession, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The experimental tracing v2 is in development. " "This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
tenant_id=tenant_id,
session_name=session_name,
example_id=example_id,
session_extra=session_extra,
)
session = cb.ensure_session()
tracing_v2_callback_var.set(cb)
yield session
tracing_v2_callback_var.set(None)
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logging.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(None, functools.partial(event, *args, **kwargs))
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(handler, event_name, ignore_condition_name, *args, **kwargs)
for handler in handlers
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
) -> None:
"""Initialize run manager."""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations."""
return cls(uuid4(), [], [])
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running."""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_end_data_model(
self,
output,
**kwargs: Any,
):
"""Return the data model for the on_tool_end event."""
_handle_event(
self.handlers,
"on_tool_end_data_model",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
# Re-use the LLM Run Manager since the outputs are treated
# the same for now
return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> CallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> AsyncCallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
os.environ.get("LANGCHAIN_TRACING") is not None
or tracer is not None
or os.environ.get("LANGCHAIN_HANDLER") is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None
tracer_session = os.environ.get("LANGCHAIN_SESSION")
debug = _get_debug()
if tracer_session is None:
tracer_session = "default"
if verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None:
if verbose and not any(isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer) for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
handler.ensure_session()
callback_manager.add_handler(handler, True)
except Exception as e:
logger.debug("Unable to load requested LangChainTracer", e)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1"
] | [((1036, 1063), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1053, 1063), False, 'import logging\n'), ((1208, 1251), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1218, 1251), False, 'from contextvars import ContextVar\n'), ((1316, 1360), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1326, 1360), False, 'from contextvars import ContextVar\n'), ((1446, 1493), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1456, 1493), False, 'from contextvars import ContextVar\n'), ((5790, 5828), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (5797, 5828), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((22935, 22986), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (22942, 22986), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1731, 1754), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1752, 1754), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2005, 2024), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2022, 2024), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2570, 2696), 'warnings.warn', 'warnings.warn', (['"""The experimental tracing v2 is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The experimental tracing v2 is in development. This is not yet stable and may change in the future.'\n )\n", (2583, 2696), False, 'import warnings\n'), ((2787, 2907), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'tenant_id': 'tenant_id', 'session_name': 'session_name', 'example_id': 'example_id', 'session_extra': 'session_extra'}), '(tenant_id=tenant_id, session_name=session_name, example_id=\n example_id, session_extra=session_extra)\n', (2802, 2907), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((24635, 24670), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (24649, 24670), False, 'import os\n'), ((2761, 2777), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2765, 2777), False, 'from uuid import UUID, uuid4\n'), ((4562, 4596), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (4589, 4596), False, 'import asyncio\n'), ((6507, 6514), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (6512, 6514), False, 'from uuid import UUID, uuid4\n'), ((16682, 16689), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (16687, 16689), False, 'from uuid import UUID, uuid4\n'), ((17367, 17374), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (17372, 17374), False, 'from uuid import UUID, uuid4\n'), ((18148, 18155), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18153, 18155), False, 'from uuid import UUID, uuid4\n'), ((18861, 18868), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18866, 18868), False, 'from uuid import UUID, uuid4\n'), ((20121, 20128), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20126, 20128), False, 'from uuid import UUID, uuid4\n'), ((20760, 20767), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20765, 20767), False, 'from uuid import UUID, uuid4\n'), ((21471, 21478), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21476, 21478), False, 'from uuid import UUID, uuid4\n'), ((22207, 22214), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22212, 22214), False, 'from uuid import UUID, uuid4\n'), ((24322, 24357), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING"""'], {}), "('LANGCHAIN_TRACING')\n", (24336, 24357), False, 'import os\n'), ((24411, 24446), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HANDLER"""'], {}), "('LANGCHAIN_HANDLER')\n", (24425, 24446), False, 'import os\n'), ((24538, 24576), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING_V2"""'], {}), "('LANGCHAIN_TRACING_V2')\n", (24552, 24576), False, 'import os\n'), ((4161, 4216), 'logging.warning', 'logging.warning', (['f"""Error in {event_name} callback: {e}"""'], {}), "(f'Error in {event_name} callback: {e}')\n", (4176, 4216), False, 'import logging\n'), ((25265, 25289), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (25287, 25289), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((25567, 25586), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (25584, 25586), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((4889, 4909), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (4906, 4909), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((25076, 25099), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (25097, 25099), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((26002, 26046), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (26017, 26046), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4730, 4771), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (4747, 4771), False, 'import functools\n'), ((3713, 3733), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (3730, 3733), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((4683, 4707), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4705, 4707), False, 'import asyncio\n')] |
"""Base interface that all chains should implement."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, root_validator, validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.schema import RUN_KEY, BaseMemory, RunInfo
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[BaseMemory] = None
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
verbose: bool = Field(
default_factory=_get_verbosity
) # Whether to print the response text
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Output keys this chain expects."""
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
raise ValueError(f"Missing some output keys: {missing_keys}")
@abstractmethod
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. If not provided, will
use the callbacks provided to the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
"""
inputs = self.prep_inputs(inputs)
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
)
try:
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_chain_error(e)
raise e
run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
async def acall(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. If not provided, will
use the callbacks provided to the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
"""
inputs = self.prep_inputs(inputs)
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
)
try:
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_chain_error(e)
raise e
await run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
def apply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0], callbacks=callbacks)[self.output_keys[0]]
if kwargs and not args:
return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
if not kwargs and not args:
raise ValueError(
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]]
if kwargs and not args:
return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of chain."""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict()
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
| [
"langchain.schema.RunInfo",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.callbacks.manager.CallbackManager.configure"
] | [((816, 849), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (821, 849), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((904, 937), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (909, 937), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((958, 995), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (963, 995), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1295, 1311), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1309, 1311), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1747, 1790), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (1756, 1790), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((4447, 4513), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (4472, 4513), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6411, 6482), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (6441, 6482), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((1502, 1604), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (1515, 1604), False, 'import warnings\n'), ((5284, 5318), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (5291, 5318), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((7286, 7320), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7293, 7320), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((11932, 11947), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11936, 11947), False, 'from pathlib import Path\n'), ((12267, 12301), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (12276, 12301), False, 'import json\n'), ((4564, 4593), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (4581, 4593), False, 'import inspect\n'), ((6533, 6563), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (6550, 6563), False, 'import inspect\n'), ((12404, 12454), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (12413, 12454), False, 'import yaml\n')] |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Generation,
LLMResult,
PromptValue,
RunInfo,
get_buffer_string,
)
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseLanguageModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return self.generate(prompt_strings, stop=stop, callbacks=callbacks)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
self._generate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(output)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
self._generate(missing_prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
await self._agenerate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._agenerate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e, verbose=self.verbose)
raise e
await run_manager.on_llm_end(output, verbose=self.verbose)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
await self._agenerate(
missing_prompts, stop=stop, run_manager=run_manager
)
if new_arg_supported
else await self._agenerate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
await run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
def __call__(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
return (
self.generate([prompt], stop=stop, callbacks=callbacks)
.generations[0][0]
.text
)
async def _call_async(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
result = await self.agenerate([prompt], stop=stop, callbacks=callbacks)
return result.generations[0][0].text
def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop)
def predict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = self(text, stop=_stop)
return AIMessage(content=content)
async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(text, stop=_stop)
async def apredict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = await self._call_async(text, stop=_stop)
return AIMessage(content=content)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
for prompt in prompts:
text = (
self._call(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._call(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
for prompt in prompts:
text = (
await self._acall(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._acall(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMResult"
] | [((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')] |
"""Base interface for large language models to expose."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
from langchain.schema import Generation, LLMResult
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
try:
output = self._generate(prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
)
try:
new_results = self._generate(missing_prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
else:
llm_output = {}
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output)
async def agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if self.callback_manager.is_async:
await self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
else:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
try:
output = await self._agenerate(prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
else:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_llm_end(output, verbose=self.verbose)
else:
self.callback_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
verbose=self.verbose,
)
else:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
verbose=self.verbose,
)
try:
new_results = await self._agenerate(missing_prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
else:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_llm_end(
new_results, verbose=self.verbose
)
else:
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
else:
llm_output = {}
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output)
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
# TODO: this method may not be exact.
# TODO: this method may differ based on model (eg codex).
try:
from transformers import GPT2TokenizerFast
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"This is needed in order to calculate get_num_tokens. "
"Please it install it with `pip install transformers`."
)
# create a GPT-3 tokenizer instance
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
# tokenize the text using the GPT-3 tokenizer
tokenized_text = tokenizer.tokenize(text)
# calculate the number of tokens in the tokenized text
return len(tokenized_text)
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
return self.generate([prompt], stop=stop).generations[0][0].text
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Run the LLM on the given prompt and input."""
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
for prompt in prompts:
text = self._call(prompt, stop=stop)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
| [
"langchain.schema.Generation",
"langchain.llm_cache.update",
"langchain.llm_cache.lookup",
"langchain.schema.LLMResult",
"langchain.callbacks.get_callback_manager"
] | [((1991, 2028), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1996, 2028), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2119, 2162), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_callback_manager)\n', (2124, 2162), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2311, 2363), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (2320, 2363), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2693, 2736), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2702, 2736), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((5769, 5826), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (5778, 5826), False, 'from langchain.schema import Generation, LLMResult\n'), ((9134, 9191), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (9143, 9191), False, 'from langchain.schema import Generation, LLMResult\n'), ((9826, 9867), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9859, 9867), False, 'from transformers import GPT2TokenizerFast\n'), ((12744, 12778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (12753, 12778), False, 'from langchain.schema import Generation, LLMResult\n'), ((932, 978), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (958, 978), False, 'import langchain\n'), ((1720, 1774), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (1746, 1774), False, 'import langchain\n'), ((2664, 2686), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2684, 2686), False, 'from langchain.callbacks import get_callback_manager\n'), ((11346, 11361), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11350, 11361), False, 'from pathlib import Path\n'), ((11682, 11717), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (11691, 11717), False, 'import json\n'), ((11820, 11871), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (11829, 11871), False, 'import yaml\n'), ((12705, 12726), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (12715, 12726), False, 'from langchain.schema import Generation, LLMResult\n')] |
from typing import Optional
import langchain
from dotenv import load_dotenv
from langchain import PromptTemplate, chains
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from pydantic import ValidationError
from rmrkl import ChatZeroShotAgent, RetryAgentExecutor
from .prompts import FORMAT_INSTRUCTIONS, QUESTION_PROMPT, REPHRASE_TEMPLATE, SUFFIX
from .tools import make_tools
def _make_llm(model, temp, api_key, streaming: bool = False):
if model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"):
llm = langchain.chat_models.ChatOpenAI(
temperature=temp,
model_name=model,
request_timeout=1000,
streaming=streaming,
callbacks=[StreamingStdOutCallbackHandler()],
openai_api_key=api_key,
)
elif model.startswith("text-"):
llm = langchain.OpenAI(
temperature=temp,
model_name=model,
streaming=streaming,
callbacks=[StreamingStdOutCallbackHandler()],
openai_api_key=api_key,
)
else:
raise ValueError(f"Invalid model name: {model}")
return llm
class ChemCrow:
def __init__(
self,
tools=None,
model="gpt-4-0613",
tools_model="gpt-3.5-turbo-0613",
temp=0.1,
max_iterations=40,
verbose=True,
streaming: bool = True,
openai_api_key: Optional[str] = None,
api_keys: dict = {},
):
"""Initialize ChemCrow agent."""
load_dotenv()
try:
self.llm = _make_llm(model, temp, openai_api_key, streaming)
except ValidationError:
raise ValueError("Invalid OpenAI API key")
if tools is None:
api_keys["OPENAI_API_KEY"] = openai_api_key
tools_llm = _make_llm(tools_model, temp, openai_api_key, streaming)
tools = make_tools(tools_llm, api_keys=api_keys, verbose=verbose)
# Initialize agent
self.agent_executor = RetryAgentExecutor.from_agent_and_tools(
tools=tools,
agent=ChatZeroShotAgent.from_llm_and_tools(
self.llm,
tools,
suffix=SUFFIX,
format_instructions=FORMAT_INSTRUCTIONS,
question_prompt=QUESTION_PROMPT,
),
verbose=True,
max_iterations=max_iterations,
)
rephrase = PromptTemplate(
input_variables=["question", "agent_ans"], template=REPHRASE_TEMPLATE
)
self.rephrase_chain = chains.LLMChain(prompt=rephrase, llm=self.llm)
def run(self, prompt):
outputs = self.agent_executor({"input": prompt})
return outputs["output"]
| [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.chains.LLMChain",
"langchain.PromptTemplate"
] | [((1543, 1556), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1554, 1556), False, 'from dotenv import load_dotenv\n'), ((2451, 2541), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question', 'agent_ans']", 'template': 'REPHRASE_TEMPLATE'}), "(input_variables=['question', 'agent_ans'], template=\n REPHRASE_TEMPLATE)\n", (2465, 2541), False, 'from langchain import PromptTemplate, chains\n'), ((2590, 2636), 'langchain.chains.LLMChain', 'chains.LLMChain', ([], {'prompt': 'rephrase', 'llm': 'self.llm'}), '(prompt=rephrase, llm=self.llm)\n', (2605, 2636), False, 'from langchain import PromptTemplate, chains\n'), ((2113, 2259), 'rmrkl.ChatZeroShotAgent.from_llm_and_tools', 'ChatZeroShotAgent.from_llm_and_tools', (['self.llm', 'tools'], {'suffix': 'SUFFIX', 'format_instructions': 'FORMAT_INSTRUCTIONS', 'question_prompt': 'QUESTION_PROMPT'}), '(self.llm, tools, suffix=SUFFIX,\n format_instructions=FORMAT_INSTRUCTIONS, question_prompt=QUESTION_PROMPT)\n', (2149, 2259), False, 'from rmrkl import ChatZeroShotAgent, RetryAgentExecutor\n'), ((744, 776), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (774, 776), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((1009, 1041), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (1039, 1041), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
import langchain_hb
from colorama import Fore, Back, Style
import sys
import json
import requests
import os
import agent
class hb():
stats = True
exe = {
"command" : "",
"type": ""
}
index = langchain_hb.initialize_index()
def get_openAIKey():
return os.environ["OPENAI_KEY"]
def read_file(self, file_path):
file = open(file_path, "r")
return file.readlines()
def excute_command(self, command, output_file):
os.system("{} | tee {}".format(command, output_file))
command_output = self.read_file(output_file)
return command_output
def submit_stats(self, human_request, ai_response, ai_sources):
url = "https://k7fbf35swk.execute-api.us-east-1.amazonaws.com/stats" # Replace with the actual URL
response = requests.get(url, params={
"human_request": human_request,
"ai_response": ai_response,
"ai_sources": ai_sources
}
)
return response.status_code
def print_banner(self):
print("Welcome to " + Fore.GREEN + "HackerBot" + Style.RESET_ALL + "!")
def process_user_input(self,user_input):
subroutine = user_input.split(' ')[0]
if subroutine == "exit":
print("Good Bye!")
exit()
elif subroutine == "go":
self.excute_command(self.exe["command"],"{}.out".format(self.exe["type"]))
elif subroutine == "cmd":
command = user_input[3:]
self.excute_command(command, "cmd.out")
elif subroutine == "reload":
self.index = langchain_hb.initialize_index()
elif subroutine == "": pass
elif subroutine == "agent":
try:
agent_response = agent.agent_run(user_input[5:])
except:
print("Agent Error!")
if self.stats == True:
self.submit_stats(user_input, agent_response,"AGENT_ACTIVITY")
else:
ai_response, exe = langchain_hb.ask_ai(user_input, self.index, self.exe)
if self.stats == True:
self.submit_stats(user_input, ai_response['answer'],ai_response['sources'])
def prompt(self,):
user_input = input(Fore.GREEN + "hb>" + Style.RESET_ALL)
self.process_user_input(user_input)
def run(self,):
self.print_banner()
while(True):
self.prompt()
if __name__ == "__main__":
hb = hb()
if len(sys.argv) > 1:
if sys.argv[1] == "--stats-off":
hb.stats = False
hb.run() | [
"langchain_hb.initialize_index",
"langchain_hb.ask_ai"
] | [((224, 255), 'langchain_hb.initialize_index', 'langchain_hb.initialize_index', ([], {}), '()\n', (253, 255), False, 'import langchain_hb\n'), ((820, 936), 'requests.get', 'requests.get', (['url'], {'params': "{'human_request': human_request, 'ai_response': ai_response, 'ai_sources':\n ai_sources}"}), "(url, params={'human_request': human_request, 'ai_response':\n ai_response, 'ai_sources': ai_sources})\n", (832, 936), False, 'import requests\n'), ((1617, 1648), 'langchain_hb.initialize_index', 'langchain_hb.initialize_index', ([], {}), '()\n', (1646, 1648), False, 'import langchain_hb\n'), ((2020, 2073), 'langchain_hb.ask_ai', 'langchain_hb.ask_ai', (['user_input', 'self.index', 'self.exe'], {}), '(user_input, self.index, self.exe)\n', (2039, 2073), False, 'import langchain_hb\n'), ((1771, 1802), 'agent.agent_run', 'agent.agent_run', (['user_input[5:]'], {}), '(user_input[5:])\n', (1786, 1802), False, 'import agent\n')] |
import streamlit as st
import langchain
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.chat_models import ChatOpenAI
from langsmith import Client
from langchain.smith import RunEvalConfig, run_on_dataset
from pydantic import BaseModel, Field
db = SQLDatabase.from_uri("sqlite:///Chinook.db")
llm = ChatOpenAI(temperature=0)
db_chain = SQLDatabaseChain.from_llm(llm, db, return_intermediate_steps=True)
from langsmith import Client
client = Client()
def send_feedback(run_id, score):
client.create_feedback(run_id, "user_score", score=score)
st.set_page_config(page_title='🦜🔗 Ask the SQL DB App')
st.title('🦜🔗 Ask the SQL DB App')
st.info("Most 'question answering' applications run over unstructured text data. But a lot of the data in the world is tabular data! This is an attempt to create an application using [LangChain](https://github.com/langchain-ai/langchain) to let you ask questions of data in tabular format. For this demo application, we will use the Chinook dataset in a SQL database. Please explore the schema [here](https://www.sqlitetutorial.net/wp-content/uploads/2015/11/sqlite-sample-database-color.jpg) to get a sense for what questions you can ask. Please leave feedback on well the question is answered, and we will use that improve the application!")
query_text = st.text_input('Enter your question:', placeholder = 'Ask something like "How many artists are there?" or "Which artist has the most albums"')
# Form input and query
result = None
with st.form('myform', clear_on_submit=True):
submitted = st.form_submit_button('Submit')
if submitted:
with st.spinner('Calculating...'):
inputs = {"query": query_text}
response = db_chain(inputs, include_run_info=True)
result = response["result"]
sql_command = response["intermediate_steps"][1]
sql_result = response["intermediate_steps"][3]
run_id = response["__run"].run_id
if result is not None:
st.info(result)
st.code(sql_command)
st.code(sql_result)
col_blank, col_text, col1, col2 = st.columns([10, 2,1,1])
with col_text:
st.text("Feedback:")
with col1:
st.button("👍", on_click=send_feedback, args=(run_id, 1))
with col2:
st.button("👎", on_click=send_feedback, args=(run_id, 0))
| [
"langchain_experimental.sql.SQLDatabaseChain.from_llm",
"langchain.utilities.SQLDatabase.from_uri",
"langchain.chat_models.ChatOpenAI"
] | [((316, 360), 'langchain.utilities.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['"""sqlite:///Chinook.db"""'], {}), "('sqlite:///Chinook.db')\n", (336, 360), False, 'from langchain.utilities import SQLDatabase\n'), ((367, 392), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (377, 392), False, 'from langchain.chat_models import ChatOpenAI\n'), ((404, 470), 'langchain_experimental.sql.SQLDatabaseChain.from_llm', 'SQLDatabaseChain.from_llm', (['llm', 'db'], {'return_intermediate_steps': '(True)'}), '(llm, db, return_intermediate_steps=True)\n', (429, 470), False, 'from langchain_experimental.sql import SQLDatabaseChain\n'), ((510, 518), 'langsmith.Client', 'Client', ([], {}), '()\n', (516, 518), False, 'from langsmith import Client\n'), ((616, 670), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""🦜🔗 Ask the SQL DB App"""'}), "(page_title='🦜🔗 Ask the SQL DB App')\n", (634, 670), True, 'import streamlit as st\n'), ((671, 704), 'streamlit.title', 'st.title', (['"""🦜🔗 Ask the SQL DB App"""'], {}), "('🦜🔗 Ask the SQL DB App')\n", (679, 704), True, 'import streamlit as st\n'), ((705, 1358), 'streamlit.info', 'st.info', (['"""Most \'question answering\' applications run over unstructured text data. But a lot of the data in the world is tabular data! This is an attempt to create an application using [LangChain](https://github.com/langchain-ai/langchain) to let you ask questions of data in tabular format. For this demo application, we will use the Chinook dataset in a SQL database. Please explore the schema [here](https://www.sqlitetutorial.net/wp-content/uploads/2015/11/sqlite-sample-database-color.jpg) to get a sense for what questions you can ask. Please leave feedback on well the question is answered, and we will use that improve the application!"""'], {}), '(\n "Most \'question answering\' applications run over unstructured text data. But a lot of the data in the world is tabular data! This is an attempt to create an application using [LangChain](https://github.com/langchain-ai/langchain) to let you ask questions of data in tabular format. For this demo application, we will use the Chinook dataset in a SQL database. Please explore the schema [here](https://www.sqlitetutorial.net/wp-content/uploads/2015/11/sqlite-sample-database-color.jpg) to get a sense for what questions you can ask. Please leave feedback on well the question is answered, and we will use that improve the application!"\n )\n', (712, 1358), True, 'import streamlit as st\n'), ((1363, 1512), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""Ask something like "How many artists are there?" or "Which artist has the most albums\\""""'}), '(\'Enter your question:\', placeholder=\n \'Ask something like "How many artists are there?" or "Which artist has the most albums"\'\n )\n', (1376, 1512), True, 'import streamlit as st\n'), ((1547, 1586), 'streamlit.form', 'st.form', (['"""myform"""'], {'clear_on_submit': '(True)'}), "('myform', clear_on_submit=True)\n", (1554, 1586), True, 'import streamlit as st\n'), ((1601, 1632), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (1622, 1632), True, 'import streamlit as st\n'), ((1966, 1981), 'streamlit.info', 'st.info', (['result'], {}), '(result)\n', (1973, 1981), True, 'import streamlit as st\n'), ((1983, 2003), 'streamlit.code', 'st.code', (['sql_command'], {}), '(sql_command)\n', (1990, 2003), True, 'import streamlit as st\n'), ((2005, 2024), 'streamlit.code', 'st.code', (['sql_result'], {}), '(sql_result)\n', (2012, 2024), True, 'import streamlit as st\n'), ((2060, 2085), 'streamlit.columns', 'st.columns', (['[10, 2, 1, 1]'], {}), '([10, 2, 1, 1])\n', (2070, 2085), True, 'import streamlit as st\n'), ((2102, 2122), 'streamlit.text', 'st.text', (['"""Feedback:"""'], {}), "('Feedback:')\n", (2109, 2122), True, 'import streamlit as st\n'), ((2137, 2193), 'streamlit.button', 'st.button', (['"""👍"""'], {'on_click': 'send_feedback', 'args': '(run_id, 1)'}), "('👍', on_click=send_feedback, args=(run_id, 1))\n", (2146, 2193), True, 'import streamlit as st\n'), ((2208, 2264), 'streamlit.button', 'st.button', (['"""👎"""'], {'on_click': 'send_feedback', 'args': '(run_id, 0)'}), "('👎', on_click=send_feedback, args=(run_id, 0))\n", (2217, 2264), True, 'import streamlit as st\n'), ((1655, 1683), 'streamlit.spinner', 'st.spinner', (['"""Calculating..."""'], {}), "('Calculating...')\n", (1665, 1683), True, 'import streamlit as st\n')] |
import langchain
from langchain.agents import initialize_agent
from llama_index import GPTListIndex, GPTIndexMemory
from langchain.callbacks import get_openai_callback
from langchain.agents import AgentType
class Eunomia:
def __init__(
self,
tools,
model="text-davinci-003",
temp=0.1,
get_cost=False,
max_iterations=40,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
**kwargs,
):
if model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"):
self.llm = langchain.chat_models.ChatOpenAI(
temperature=temp,
model_name=model,
request_timeout=1000,
max_tokens=2000,
)
elif model.startswith("text-"):
self.llm = langchain.OpenAI(temperature=temp, model_name=model)
self.get_cost = get_cost
self.max_iterations = max_iterations
# Initialize agent
index = GPTListIndex([])
memory = GPTIndexMemory(
index=index,
memory_key="chat_history",
query_kwargs={"response_mode": "compact"},
)
self.agent_chain = initialize_agent(
tools, self.llm, agent=agent_type, verbose=True, memory=memory, **kwargs
)
def run(self, prompt):
with get_openai_callback() as cb:
result = self.agent_chain.run(input=prompt)
if self.get_cost:
print(cb)
return result
| [
"langchain.OpenAI",
"langchain.agents.initialize_agent",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI"
] | [((980, 996), 'llama_index.GPTListIndex', 'GPTListIndex', (['[]'], {}), '([])\n', (992, 996), False, 'from llama_index import GPTListIndex, GPTIndexMemory\n'), ((1014, 1116), 'llama_index.GPTIndexMemory', 'GPTIndexMemory', ([], {'index': 'index', 'memory_key': '"""chat_history"""', 'query_kwargs': "{'response_mode': 'compact'}"}), "(index=index, memory_key='chat_history', query_kwargs={\n 'response_mode': 'compact'})\n", (1028, 1116), False, 'from llama_index import GPTListIndex, GPTIndexMemory\n'), ((1186, 1281), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'self.llm'], {'agent': 'agent_type', 'verbose': '(True)', 'memory': 'memory'}), '(tools, self.llm, agent=agent_type, verbose=True, memory=\n memory, **kwargs)\n', (1202, 1281), False, 'from langchain.agents import initialize_agent\n'), ((555, 666), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'temperature': 'temp', 'model_name': 'model', 'request_timeout': '(1000)', 'max_tokens': '(2000)'}), '(temperature=temp, model_name=model,\n request_timeout=1000, max_tokens=2000)\n', (587, 666), False, 'import langchain\n'), ((1340, 1361), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1359, 1361), False, 'from langchain.callbacks import get_openai_callback\n'), ((805, 857), 'langchain.OpenAI', 'langchain.OpenAI', ([], {'temperature': 'temp', 'model_name': 'model'}), '(temperature=temp, model_name=model)\n', (821, 857), False, 'import langchain\n')] |
"""
Utilities for ingesting different types of documents.
This includes cutting text into chunks and cleaning text.
"""
import re
from typing import Callable, Dict, List, Tuple
import langchain.docstore.document as docstore
import langchain.text_splitter as splitter
from loguru import logger
class IngestUtils:
"""
Utils for ingesting different types of documents.
This includes cutting text into chunks and cleaning text.
"""
def __init__(self, chunk_size: int, chunk_overlap: int, file_no: int, text_splitter_method: str):
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
self.file_no = file_no
self.text_splitter_method = text_splitter_method
def merge_hyphenated_words(self, text: str) -> str:
"""
Merge words in the text that have been split with a hyphen.
"""
return re.sub(r"(\w)-\n(\w)", r"\1\2", text)
def fix_newlines(self, text: str) -> str:
"""
Replace single newline characters in the text with spaces.
"""
return re.sub(r"(?<!\n)\n(?!\n)", " ", text)
def remove_multiple_newlines(self, text: str) -> str:
"""
Reduce multiple newline characters in the text to a single newline.
"""
return re.sub(r"\n{2,}", "\n", text)
def clean_texts(self,
texts: List[Tuple[int, str]],
cleaning_functions: List[Callable[[str], str]]
) -> List[Tuple[int, str]]:
"""
Apply the cleaning functions to the text of each page.
"""
logger.info("Cleaning texts")
cleaned_texts = []
for page_num, text in texts:
for cleaning_function in cleaning_functions:
text = cleaning_function(text)
cleaned_texts.append((page_num, text))
return cleaned_texts
# def split_text_into_chunks(self,
# text: Tuple[int, str],
# metadata: Dict[str, str]):
# """
# Split the text into chunks
# """
# text_splitter = self.get_splitter()
# chunk_no = 0
# for page_num, page in texts:
# logger.info(f"Splitting page {page_num}")
# chunks = text_splitter.split_text(page)
# def chunks_to_docs(self,
# chunks,
# metadata: Dict[str, str]):
# """
# Convert chunks into Documents
# """
# # initialize empty list of Documents
# docs: List[docstore.Document] = []
# # loop over chunks
# for i, chunk in enumerate(chunks):
# if self.file_no:
# metadata_combined = {
# "file_no": self.file_no,
# "chunk_no": chunk_no,
# "source": f"F{self.file_no}-{chunk_no}"
# }
# else:
# metadata_combined = {
# "page_number": page_num,
# "chunk": i,
# "source": f"p{page_num}-{i}",
# **metadata,
# }
# doc = docstore.Document(
# page_content=chunk,
# metadata=metadata_combined
# )
# docs.append(doc)
# chunk_no += 1
# return docs
def texts_to_docs(self,
texts: List[Tuple[int, str]],
metadata: Dict[str, str]) -> List[docstore.Document]:
"""
Split the text into chunks and return them as Documents.
"""
text_splitter = self.get_splitter()
docs: List[docstore.Document] = []
chunk_no = 0
for page_num, page in texts:
logger.info(f"Splitting page {page_num}")
chunks = text_splitter.split_text(page)
for i, chunk in enumerate(chunks):
if self.file_no:
metadata_combined = {
"file_no": self.file_no,
"chunk_no": chunk_no,
"source": f"F{self.file_no}-{chunk_no}"
}
else:
metadata_combined = {
"page_number": page_num,
"chunk": i,
"source": f"p{page_num}-{i}",
**metadata,
}
doc = docstore.Document(
page_content=chunk,
metadata=metadata_combined
)
docs.append(doc)
chunk_no += 1
return docs
def clean_texts_to_docs(self, raw_pages, metadata) -> List[docstore.Document]:
""""
Combines the functions clean_text and text_to_docs
"""
cleaning_functions: List = [
self.merge_hyphenated_words,
self.fix_newlines,
self.remove_multiple_newlines
]
cleaned_texts = self.clean_texts(raw_pages, cleaning_functions)
# for cleaned_text in cleaned_texts:
# cleaned_chunks = self.split_text_into_chunks(cleaned_text, metadata)
docs = self.texts_to_docs(cleaned_texts, metadata)
return docs
def get_splitter(self):
"""
Get the text splitter object
"""
if self.text_splitter_method == "NLTKTextSplitter":
text_splitter = splitter.NLTKTextSplitter(
separator="\n\n",
language="english",
chunk_size=self.chunk_size,
chunk_overlap=self.chunk_overlap
)
elif self.text_splitter_method == "RecursiveCharacterTextSplitter":
text_splitter = splitter.RecursiveCharacterTextSplitter(
chunk_size=self.chunk_size,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=self.chunk_overlap
)
return text_splitter
| [
"langchain.text_splitter.NLTKTextSplitter",
"langchain.docstore.document.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((881, 921), 're.sub', 're.sub', (['"""(\\\\w)-\\\\n(\\\\w)"""', '"""\\\\1\\\\2"""', 'text'], {}), "('(\\\\w)-\\\\n(\\\\w)', '\\\\1\\\\2', text)\n", (887, 921), False, 'import re\n'), ((1072, 1111), 're.sub', 're.sub', (['"""(?<!\\\\n)\\\\n(?!\\\\n)"""', '""" """', 'text'], {}), "('(?<!\\\\n)\\\\n(?!\\\\n)', ' ', text)\n", (1078, 1111), False, 'import re\n'), ((1284, 1313), 're.sub', 're.sub', (['"""\\\\n{2,}"""', '"""\n"""', 'text'], {}), "('\\\\n{2,}', '\\n', text)\n", (1290, 1313), False, 'import re\n'), ((1601, 1630), 'loguru.logger.info', 'logger.info', (['"""Cleaning texts"""'], {}), "('Cleaning texts')\n", (1612, 1630), False, 'from loguru import logger\n'), ((3783, 3824), 'loguru.logger.info', 'logger.info', (['f"""Splitting page {page_num}"""'], {}), "(f'Splitting page {page_num}')\n", (3794, 3824), False, 'from loguru import logger\n'), ((5456, 5586), 'langchain.text_splitter.NLTKTextSplitter', 'splitter.NLTKTextSplitter', ([], {'separator': '"""\n\n"""', 'language': '"""english"""', 'chunk_size': 'self.chunk_size', 'chunk_overlap': 'self.chunk_overlap'}), "(separator='\\n\\n', language='english', chunk_size=\n self.chunk_size, chunk_overlap=self.chunk_overlap)\n", (5481, 5586), True, 'import langchain.text_splitter as splitter\n'), ((4463, 4528), 'langchain.docstore.document.Document', 'docstore.Document', ([], {'page_content': 'chunk', 'metadata': 'metadata_combined'}), '(page_content=chunk, metadata=metadata_combined)\n', (4480, 4528), True, 'import langchain.docstore.document as docstore\n'), ((5764, 5930), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'self.chunk_size', 'separators': "['\\n\\n', '\\n', '.', '!', '?', ',', ' ', '']", 'chunk_overlap': 'self.chunk_overlap'}), "(chunk_size=self.chunk_size,\n separators=['\\n\\n', '\\n', '.', '!', '?', ',', ' ', ''], chunk_overlap=\n self.chunk_overlap)\n", (5803, 5930), True, 'import langchain.text_splitter as splitter\n')] |
import numpy as np
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.litellm import LiteLLM
from langchain_google_genai import ChatGoogleGenerativeAI
from trulens_eval.feedback.provider.langchain import Langchain
from trulens_eval import Tru, Feedback, TruLlama
from trulens_eval.feedback import Groundedness
# Setup RAG
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir="base_index"),
embed_model="local:../models/bge-small-en-v1.5",
)
llm = LiteLLM(model="gemini/gemini-pro", temperature=0.1)
query_engine = index.as_query_engine(llm=llm)
# Evaluate with trulens-eval
# Define provider and database
_llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0)
provider = Langchain(chain=_llm)
database_url = "sqlite:///data/trulens.db"
tru = Tru(database_url=database_url, database_redact_keys=True)
# tru.reset_database()
# Using TruLlama
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons, name="Answer Relevance"
).on_input_output()
f_context_relevance = (
Feedback(provider.relevance_with_cot_reasons, name="Context Relevance")
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(TruLlama.select_source_nodes().node.text)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
app_id = "Chain2"
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks=[
f_qa_relevance,
f_context_relevance,
f_groundedness,
],
)
qns = ...
for qn in qns:
with tru_recorder as recording:
res = query_engine.query(qn)
# Results
# dashboard
tru.run_dashboard(port=8601)
# # dataframe
# records_df, feednack = tru.get_records_and_feednack(app_ids=[app_id])
# records_df.head()
| [
"langchain_google_genai.ChatGoogleGenerativeAI"
] | [((519, 570), 'llama_index.llms.litellm.LiteLLM', 'LiteLLM', ([], {'model': '"""gemini/gemini-pro"""', 'temperature': '(0.1)'}), "(model='gemini/gemini-pro', temperature=0.1)\n", (526, 570), False, 'from llama_index.llms.litellm import LiteLLM\n'), ((687, 744), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""', 'temperature': '(0)'}), "(model='gemini-pro', temperature=0)\n", (709, 744), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((756, 777), 'trulens_eval.feedback.provider.langchain.Langchain', 'Langchain', ([], {'chain': '_llm'}), '(chain=_llm)\n', (765, 777), False, 'from trulens_eval.feedback.provider.langchain import Langchain\n'), ((828, 885), 'trulens_eval.Tru', 'Tru', ([], {'database_url': 'database_url', 'database_redact_keys': '(True)'}), '(database_url=database_url, database_redact_keys=True)\n', (831, 885), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1245, 1289), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (1257, 1289), False, 'from trulens_eval.feedback import Groundedness\n'), ((1551, 1657), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_context_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_context_relevance, f_groundedness])\n', (1559, 1657), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((401, 455), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""base_index"""'}), "(persist_dir='base_index')\n", (429, 455), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((945, 1015), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (953, 1015), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1165, 1195), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1193, 1195), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1069, 1140), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Context Relevance')\n", (1077, 1140), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1313, 1390), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (1321, 1390), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1399, 1429), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1427, 1429), False, 'from trulens_eval import Tru, Feedback, TruLlama\n')] |
import os
import json
import re
import string
import time
from tqdm import tqdm
import langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import pickle
from langchain.prompts import PromptTemplate
from langchain.vectorstores import FAISS
from langchain.chains import LLMChain, RetrievalQA
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.schema import Document, BaseRetriever
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT_DIR)
from repo_downloader import RepoDownloader
from web_downloader import WebScraper
class RetrieverWithScore(BaseRetriever):
"""Just a custom retriever to track distance between query and retrieval docs
Args:
search_type (str): How to measure similarity
vector_store (FAISS): Retrieval Doc Embeddings
k: How many chunks
"""
def __init__(self,
search_type: str,
vector_store: FAISS,
k: int,
score_threshold: int):
self.k = k
self.vector_store=vector_store
self.score_threshold = score_threshold
self.search_type=search_type
def _get_relevant_documents(self, query: str) -> list[Document]:
# [NOTE] we removed the search type, only use search_type = "similarity"
if self.search_type != "similarity":
raise ValueError(f"Only search_type='similarity' is supported with scores")
docs_and_scores = self.vector_store.similarity_search_with_score(query=query,
k=self.k,
score_threshold=self.score_threshold)
for doc, distance in docs_and_scores:
doc.metadata = {**doc.metadata, **{"score": 1-distance}}
return [doc for (doc, _) in docs_and_scores]
def aget_relevant_documents(self, query):
return self._get_relevant_documents(query)
def get_relevant_documents(self, query: str) -> list[Document]:
return self._get_relevant_documents(query)
__all__ = ['ChatBot']
EVAL_7B_TEMPLATE = (f'Answer the following question as one function, class, or object. If you do not know, just say "I do not know".'
'\n{context}'
'\nQuestion: {question}')
CHAT_30B_TEMPLATE = ("""<|im_start|>system
A conversation between a user and an LLM-based AI assistant about the codebase for the MosaicML library Composer.
Provide a helpful and simple answer given the following context to the question. If you do not know, just say "I
do not know".<|im_end|>
<|im_start|>context
{context}<|im_end|>
<|im_start|>user
{question}<|im_end|>
<|im_start|>assistant""")
SUBQUERY_INTENT_TEMPLATE = ("""<|im_start|>system
A conversation between a user and an LLM-based AI assistant about the codebase for MosaicML.
Provide a helpful, short and simple answer given the following context to the question. Do not
attempt to explain any terms and do not go in depth.
If you do not know, just say "I do not know".<|im_end|>
<|im_start|>context
{context}<|im_end|>
<|im_start|>user
What is the user trying to learn from this question: {question}<|im_end|>
<|im_start|>assistant""")
SUBQUERY_RELATED_TEMPLATE = ("""<|im_start|>system
A conversation between a user and an LLM-based AI assistant about the codebase for MosaicML.
Only output a "Yes" or "No" with no extra information given the following context to the question.
The context must be related to the question, otherwise output "No". If you aren't sure, output "No".<|im_end|>
<|im_start|>context
{context}<|im_end|>
<|im_start|>user
Can this question be answered with information provided only from context: {question}<|im_end|>
<|im_start|>assistant""")
PARTIAL_SUBQA_TEMPLATE = ("""<|im_start|>system
A conversation between a user and an LLM-based AI assistant.
Given the context, the job of the assistant is to determine if the user's question is relevant given the context. If the given
context is unrelated to the question, then the assistant will break the question into smaller questions that can likely be answered
by a single section of the relevant context. Else if the question can't be answered using the context alone, the LLM-based AI assistant
should not reply with anything.<|im_end|>
<|im_start|>context
{{context}}<|im_end|>
<|im_start|>user
{{question}} {} Can this question be answered with the context given alone? If so, break the question down into less than five
smaller questions that can likely be answered by a single section of the relevant documentation. Make sure that the smaller question
is related to the main question. If you aren't sure, just don't include the smaller question
Please only respond with a list of smaller questions without any extra information.<|im_end|>
<|im_start|>assistant""")
PARTIAL_COMBINE_TEMPLATE = ("""<|im_start|>system A conversation between a user and an LLM-based AI assistant.
Here are smaller questions regarding the user's question and their answers:
{}
Provide a helpful and in depth answer given the following context to the question and heavily reference
the smaller questions provided.
If you do not know, just say "I do not know".<|im_end|>
<|im_start|>context
{{context}}<|im_end|>
<|im_start|>user
{{question}}<|im_end|>
<|im_start|>assistant""")
class ChatBot:
"""Given a folder of .txt files from data_path, create a Chatbot object that can process the files into documents, split them
into managable sizes, and store them in a vector store. The Chatbot can then be used to answer questions about the documents.
Args:
data_path (str): The path of the directory where the txt files of interest is located
embedding (langchain.embeddings.base.Embeddings): The embedding to use for the vector store
model (langchain.llms.base.LLM): The model to use for the LLMChain
k (int): The number of similar documents to return from the vector store
chunk_size (int): The size of the chunks to split the documents into when splitting the documents
chunk_overlap (int): The amount of overlap between chunks when splitting the documents
Example:
.. testcode::
from langchain.embeddings import MosaicMLInstructorEmbeddings
from langchain.llms import MosaicML
chatbot = ChatBot(data_path= "support_chatbot/retrieval_data",
embedding=MosaicMLInstructorEmbeddings(),
k=3,
model=MosaicML())
chatbot.chat()
"""
def __init__(self,
data_path: str,
embedding: langchain.embeddings.base.Embeddings,
model: langchain.llms.base.LLM,
chunk_size: int,
chunk_overlap: int,
k: int,
) -> None:
self.data_path = data_path
self.embedding = embedding
self.model = model
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
self.k = k
self.saved_state = {'k': k, 'chunk_size': chunk_size, 'chunk_overlap': chunk_overlap, 'model_k': model.model_kwargs['top_k']}
self.chat_chain = None
self.intent_chain = None
self.subchain = None
self.subsubchain = None
self.vector_store = None
if os.path.isfile(os.path.join(data_path, 'vectors.pickle')):
with open(os.path.join(self.data_path, 'vectors.pickle'), 'rb') as f:
self.vector_store = pickle.load(f)
def load_data(self) -> list[Document]:
"""Given a directory find all .txt files and load them as documents into a list
Returns:
list[Document]: list of documents loaded from data_dir
"""
data = []
for dirpath, _, filenames in os.walk(self.data_path):
for filename in filenames:
if filename.endswith(".txt"):
file_path = os.path.join(dirpath, filename)
loaders = UnstructuredFileLoader(file_path, encoding='utf8')
document = loaders.load()[0]
document.metadata = {**document.metadata, **{'file_name': filename.replace('{slash}', '/').replace('{dot}', '.').replace('{colon}', ':')[:-4]}}
data.append(document)
return data
def split_pages(self,
pages: list[Document]) -> list[Document]:
"""Given a list of documents split them into smaller documents of size `self.chunk_size`
Args:
pages (list[Document]): list of pages (Documents) we want to split
Returns:
list[Document]: list of chunks (Documents) split from pages (Documents)
"""
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=self.chunk_size,
chunk_overlap=self.chunk_overlap,
separators=[
r'(?<=\.) ',
r'(?<=\?) ',
r'(?<=\!) ',
r'\n',
], # Split on periods, question marks, exclamation marks, new lines, spaces, and empty strings, in the order
)
return text_splitter.split_documents(pages)
def documents_to_str(self,
documents: list[Document]) -> list[str]:
return map(lambda doc: doc.page_content, documents)
def clean_response(self, input_text: str) -> str:
"""Clean the response from the model by stripping some bad answer prefixes, new lines, etc.
Args:
input_text (str): The response from the model.
Returns:
str: The cleaned response.
"""
input_text = str(input_text.strip('\n'))
context_prefix = 'Context:'
answer_prefix = 'Answer:'
prefixes = [context_prefix, answer_prefix]
while True:
prefix_found = False
for prefix in prefixes:
if input_text.startswith(prefix):
input_text = input_text[len(prefix):].strip()
input_text = input_text.strip('\n')
prefix_found = True
break
if not prefix_found:
break
input_text = input_text.lstrip('\n :')
return str(input_text)
def store_vectors(self,
pages: list[Document]) -> None:
"""Given a list of documents, split them into chunks, and store them in a vector store.
Args:
pages (list[Document]): list of pages (Documents) we have splitted
"""
content_batches = []
content_current_batch = []
current_char_count = 0
for page in pages:
content_current_batch.append(page)
current_char_count += len(page.page_content)
if current_char_count > 1e4:
content_batches.append(content_current_batch)
content_current_batch = []
current_char_count = 0
if len(content_current_batch) > 0:
content_batches.append(content_current_batch)
txt_embeddings = []
for batch in tqdm(content_batches, desc='Embedding documents', total=len(content_batches)):
batch_embeddings = self.embedding.embed_documents([p.page_content for p in batch])
txt_embeddings.extend(list(zip([p.page_content for p in batch], batch_embeddings)))
# Component for storing the embeddings in a vector store, using FAISS
vector_store = FAISS.from_embeddings(
text_embeddings=txt_embeddings,
metadatas=[p.metadata for p in pages],
embedding=self.embedding
)
with open(os.path.join(ROOT_DIR, 'retrieval_data/vectors.pickle'), 'wb') as f:
pickle.dump(vector_store, f)
self.vector_store = vector_store
def create_vector_store(self, repository_urls) -> None:
"""Download the repositories, load the data, split the data into chunks, and store the chunks in a vector store.
Args:
repository_urls (list[str]): list of repository urls to download
"""
scraper = WebScraper(path=self.data_path)
scraper.scrape()
for repo_url in repository_urls:
downloader = RepoDownloader(output_dir=self.data_path, current_dir="", repo_url=repo_url)
if os.path.exists(downloader.clone_dir):
continue
downloader.download_repo()
pages = self.load_data()
documents = self.split_pages(pages)
self.store_vectors(documents)
def create_chain(self,
prompt_template: str,
score_threshold: int=0.4) -> RetrievalQA:
"""Create a RetrievalQAWithScores given a prompt template.
Args:
prompt_template (str): The prompt template to use for the chain
"""
retriever = RetrieverWithScore(search_type='similarity',
vector_store=self.vector_store,
k=self.k,
score_threshold=score_threshold)
answer_question_prompt_template = PromptTemplate(
template=prompt_template,
input_variables=['context', 'question'])
# Component connecting the LLM with the prompt template
llm_chain = LLMChain(
llm=self.model,
prompt=answer_question_prompt_template,
)
doc_prompt = PromptTemplate(input_variables=['page_content'],
template='Context:\n{page_content}')
# Component connecting the context documents with the LLM chain
stuff_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name='context',
document_prompt=doc_prompt,
)
# Complete component for retrieval question answering
chain = RetrievalQA(
retriever=retriever,
combine_documents_chain=stuff_documents_chain,
return_source_documents=True,
)
return chain
def normalize_str(self,
answer: str):
"""Lower text and remove punctuation, articles and extra whitespace.
Copied from https://github.com/mandarjoshi90/triviaqa/blob/master/evaluation/triviaqa_evaluation.py
"""
def remove_articles(text: str) -> str:
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text: str) -> str:
return ' '.join(text.split())
def handle_punc(text: str) -> str:
exclude = set(string.punctuation + ''.join([u'‘', u'’', u'´', u'`']))
return ''.join(ch if ch not in exclude else ' ' for ch in text)
def lower(text: str) -> str:
return text.lower()
def remove_parentheses(s):
return re.sub(r'\(.*?\)', '', s)
def replace_underscore(s):
return re.sub('_', '-', s)
return white_space_fix(remove_parentheses(remove_articles(handle_punc(lower(replace_underscore(answer)))))).strip()
def set_eval_state(self) -> None:
"""Set the state of the chatbot to the evaluation state. This is used to change the chunk size, chunk overlap, and k"""
self.chunk_overlap = 150
self.chunk_size = 750
self.k = 1
self.model.model_kwargs['output_len'] = 40
def reload_chat_state(self) -> None:
"""Reload the chatbot state to the saved state the user set when creating the chatbot"""
self.chunk_overlap = self.saved_state['chunk_overlap']
self.chunk_size = self.saved_state['chunk_size']
self.k = self.saved_state['k']
def evaluate_simple(self,
data_path: str,
answer_question_string_template: str) -> str:
"""Evaluate the chatbot on simple retrieval dataset given a data_path and a chain
Args:
data_path (str): The path to the dataset
answer_question_string_template (str): The prompt to use for the chain
Returns:
str: The score of the chatbot on the dataset including number of exact matches, close matches, and total questions
"""
chain = self.create_chain(prompt_template=answer_question_string_template)
exact_match = 0
close_match = 0
total = 1
total_lines = sum(1 for _ in open(data_path))
with open(data_path, 'r') as file:
for line in tqdm(file, total=total_lines, desc="Processing lines"):
data = json.loads(line)
question = data.get('context')
continuation = data.get('continuation')
response = chain(question)
answer = self.clean_response(response['result'].lstrip('\n'))
if self.normalize_str(answer) == self.normalize_str(continuation):
exact_match += 1
elif self.normalize_str(continuation).replace(" ", "") in self.normalize_str(answer).replace(" ", ""):
close_match += 1
else:
print('\n', self.normalize_str(answer), '||', self.normalize_str(continuation), '\n')
print(f'{exact_match} exact matches and {close_match} close matches out of {total} questions.')
total += 1
time.sleep(0.5)
return f'Given Score: {(exact_match + 0.5*close_match)/ total} with {exact_match} exact matches and {close_match} close matches out of {total} questions.'
def evaluate_complex(self,
data_path: str,
answer_question_string_template: str) -> str:
"""Evaluate the chatbot on complex eval dataset given a data_path and a chain
Args:
data_path (str): The path to the dataset
answer_question_string_template (str): The prompt to use for the chain
Returns:
A long string of all questions, answers, and responses
"""
chain = self.create_chain(prompt_template=answer_question_string_template)
total_lines = sum(1 for _ in open(data_path))
with open(data_path, 'r') as file:
save = ''
for line in tqdm(file, total=total_lines, desc="Processing lines"):
data = json.loads(line)
question = data.get('context')
continuation = data.get('continuation')
response = chain(question)
answer = self.clean_response(response['result'].lstrip('\n'))
save += f'Question:\n{question}\nAnswer:\n{continuation}\nResponse:\n{answer}\n\n'
return save
def sub_query_chat(self,
query: str,
threshold = 0.4)-> str:
if not self.intent_chain:
save_k = self.k
self.k = 5
self.intent_chain = self.create_chain(prompt_template=SUBQUERY_INTENT_TEMPLATE)
self.k = save_k
intent_response = self.intent_chain(query)
intent_answer = self.clean_response(intent_response['result'].lstrip('\n'))
SUBQUERY_SUBQA_TEMPLATE = PARTIAL_SUBQA_TEMPLATE.format(intent_answer)
subQA_chain = self.create_chain(prompt_template=SUBQUERY_SUBQA_TEMPLATE)
subQA_response = subQA_chain(query)
subQA_answer = self.clean_response(subQA_response['result'].lstrip('\n'))
all_sub_QA = subQA_answer.split('\n')
sub_QA_injection = ''
# Don't create a new chain on every query
if not self.subchain:
self.subchain = self.create_chain(prompt_template=CHAT_30B_TEMPLATE, score_threshold=threshold)
for sub_QA in all_sub_QA:
if sub_QA:
response = self.subchain(sub_QA)
answer = self.clean_response(response['result'].lstrip('\n'))
if response['source_documents'] and response["source_documents"][0].metadata["score"]>threshold:
answer = self.clean_response(response['result'].lstrip('\n'))
sub_QA_injection += f'Question: {sub_QA} \nAnswer: {answer}\n'
if sub_QA_injection:
SUBQUERY_COMBINE_TEMPLATE = PARTIAL_COMBINE_TEMPLATE.format(str(sub_QA_injection).replace("{", "{{").replace("}", "}}"))
combine_chain = self.create_chain(prompt_template=SUBQUERY_COMBINE_TEMPLATE)
combine_response = combine_chain(query)
combine_answer = self.clean_response(combine_response['result'].lstrip('\n'))
combine_answer_sources = ''
for d in combine_response['source_documents']:
if d.metadata["score"] > 0.6:
combine_answer_sources = combine_answer_sources + f'{d.metadata["file_name"].replace("{slash}", "/")}\n'
if not combine_answer_sources:
return f'Answer: \n{str(combine_answer)}\n\nIntent: \n{str(intent_answer)}\n\n Sub-questions: \n{str(sub_QA_injection)}'
else:
return f'Answer: \n{str(combine_answer)}\n\nIntent: \n{str(intent_answer)}\n\n Sub-questions: \n{str(sub_QA_injection)}\nSources: \n{str(combine_answer_sources)}'
else:
return f"I'm not sure but here is my best answer: \n{self.chat(query)[7:]}"
def relation_sub_query_chat(self,
query: str,
threshold: int=0.4)-> str:
if not self.intent_chain:
save_k = self.k
self.k = 3
self.intent_chain = self.create_chain(prompt_template=SUBQUERY_INTENT_TEMPLATE)
self.k = save_k
intent_response = self.intent_chain(query)
intent_answer = self.clean_response(intent_response['result'].lstrip('\n'))
SUBQUERY_SUBQA_TEMPLATE = PARTIAL_SUBQA_TEMPLATE.format(intent_answer)
subQA_chain = self.create_chain(prompt_template=SUBQUERY_SUBQA_TEMPLATE)
subQA_response = subQA_chain(query)
subQA_answer = self.clean_response(subQA_response['result'].lstrip('\n'))
all_sub_QA = subQA_answer.split('\n')
sub_QA_injection = ''
# Don't create a new chain on every query
if not self.subsubchain:
save_k = self.k
self.k = 2
self.subsubchain = self.create_chain(prompt_template=SUBQUERY_RELATED_TEMPLATE, score_threshold=0)
self.k = save_k
for sub_QA in all_sub_QA:
if sub_QA:
answerable = self.clean_response(self.subsubchain(sub_QA)['result'].lstrip('\n'))
if "Yes" in answerable:
if not self.subchain:
self.subchain = self.create_chain(prompt_template=CHAT_30B_TEMPLATE)
response = self.subchain(sub_QA)
answer = self.clean_response(response['result'].lstrip('\n'))
sub_QA_injection += f'Question: {sub_QA} \nAnswer: {answer}\n'
if sub_QA_injection:
SUBQUERY_COMBINE_TEMPLATE = PARTIAL_COMBINE_TEMPLATE.format(str(sub_QA_injection).replace("{", "{{").replace("}", "}}"))
combine_chain = self.create_chain(prompt_template=SUBQUERY_COMBINE_TEMPLATE)
combine_response = combine_chain(query)
combine_answer = self.clean_response(combine_response['result'].lstrip('\n'))
sources = ''
for d in combine_response['source_documents']:
if d.metadata["score"] > threshold:
sources = sources + f'{d.metadata["file_name"].replace("{slash}", "/")}\n'
if not sources:
return f'Answer: \n{str(combine_answer)}\n\nIntent: \n{str(intent_answer)}\n\n Sub-questions: \n{str(sub_QA_injection)}'
else:
return f'Answer: \n{str(combine_answer)}\n\nIntent: \n{str(intent_answer)}\n\n Sub-questions: \n{str(sub_QA_injection)}\nSources: \n{str(sources)}'
else:
return f"I'm not sure but here is my best answer: \n{self.chat(query)[7:]}"
def chat(self,
query: str) -> str:
"""Chat with the chatbot given a query
Args:
query (str): The query to ask the chatbot
"""
# Don't create a new chain on every query
if not self.chat_chain:
self.chat_chain = self.create_chain(prompt_template=CHAT_30B_TEMPLATE, score_threshold=0)
response = self.chat_chain(query)
answer = self.clean_response(response['result'].lstrip('\n'))
sources = ''
for d in response['source_documents']:
if d.metadata["score"] > 0.6:
sources = sources + f'{d.metadata["file_name"].replace("{slash}", "/")}\n'
if not sources:
return f"Answer: \n{answer}"
else:
return f"Answer: \n{answer} \nSources: \n{sources}" | [
"langchain.document_loaders.UnstructuredFileLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.chains.RetrievalQA",
"langchain.vectorstores.FAISS.from_embeddings",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain"
] | [((570, 595), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (585, 595), False, 'import sys\n'), ((543, 568), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (558, 568), False, 'import os\n'), ((9331, 9354), 'os.walk', 'os.walk', (['self.data_path'], {}), '(self.data_path)\n', (9338, 9354), False, 'import os\n'), ((10291, 10451), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'self.chunk_size', 'chunk_overlap': 'self.chunk_overlap', 'separators': "['(?<=\\\\.) ', '(?<=\\\\?) ', '(?<=\\\\!) ', '\\\\n']"}), "(chunk_size=self.chunk_size, chunk_overlap=\n self.chunk_overlap, separators=['(?<=\\\\.) ', '(?<=\\\\?) ', '(?<=\\\\!) ',\n '\\\\n'])\n", (10321, 10451), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((13056, 13178), 'langchain.vectorstores.FAISS.from_embeddings', 'FAISS.from_embeddings', ([], {'text_embeddings': 'txt_embeddings', 'metadatas': '[p.metadata for p in pages]', 'embedding': 'self.embedding'}), '(text_embeddings=txt_embeddings, metadatas=[p.metadata for\n p in pages], embedding=self.embedding)\n', (13077, 13178), False, 'from langchain.vectorstores import FAISS\n'), ((13707, 13738), 'web_downloader.WebScraper', 'WebScraper', ([], {'path': 'self.data_path'}), '(path=self.data_path)\n', (13717, 13738), False, 'from web_downloader import WebScraper\n'), ((14753, 14838), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (14767, 14838), False, 'from langchain.prompts import PromptTemplate\n'), ((14945, 15009), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self.model', 'prompt': 'answer_question_prompt_template'}), '(llm=self.model, prompt=answer_question_prompt_template)\n', (14953, 15009), False, 'from langchain.chains import LLMChain, RetrievalQA\n'), ((15067, 15160), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['page_content']", 'template': '"""Context:\n{page_content}"""'}), '(input_variables=[\'page_content\'], template=\n """Context:\n{page_content}""")\n', (15081, 15160), False, 'from langchain.prompts import PromptTemplate\n'), ((15294, 15400), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_variable_name': '"""context"""', 'document_prompt': 'doc_prompt'}), "(llm_chain=llm_chain, document_variable_name='context',\n document_prompt=doc_prompt)\n", (15313, 15400), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((15523, 15637), 'langchain.chains.RetrievalQA', 'RetrievalQA', ([], {'retriever': 'retriever', 'combine_documents_chain': 'stuff_documents_chain', 'return_source_documents': '(True)'}), '(retriever=retriever, combine_documents_chain=\n stuff_documents_chain, return_source_documents=True)\n', (15534, 15637), False, 'from langchain.chains import LLMChain, RetrievalQA\n'), ((8866, 8907), 'os.path.join', 'os.path.join', (['data_path', '"""vectors.pickle"""'], {}), "(data_path, 'vectors.pickle')\n", (8878, 8907), False, 'import os\n'), ((13329, 13357), 'pickle.dump', 'pickle.dump', (['vector_store', 'f'], {}), '(vector_store, f)\n', (13340, 13357), False, 'import pickle\n'), ((13830, 13906), 'repo_downloader.RepoDownloader', 'RepoDownloader', ([], {'output_dir': 'self.data_path', 'current_dir': '""""""', 'repo_url': 'repo_url'}), "(output_dir=self.data_path, current_dir='', repo_url=repo_url)\n", (13844, 13906), False, 'from repo_downloader import RepoDownloader\n'), ((13922, 13958), 'os.path.exists', 'os.path.exists', (['downloader.clone_dir'], {}), '(downloader.clone_dir)\n', (13936, 13958), False, 'import os\n'), ((16037, 16074), 're.sub', 're.sub', (['"""\\\\b(a|an|the)\\\\b"""', '""" """', 'text'], {}), "('\\\\b(a|an|the)\\\\b', ' ', text)\n", (16043, 16074), False, 'import re\n'), ((16499, 16525), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 's'], {}), "('\\\\(.*?\\\\)', '', s)\n", (16505, 16525), False, 'import re\n'), ((16588, 16607), 're.sub', 're.sub', (['"""_"""', '"""-"""', 's'], {}), "('_', '-', s)\n", (16594, 16607), False, 'import re\n'), ((18150, 18204), 'tqdm.tqdm', 'tqdm', (['file'], {'total': 'total_lines', 'desc': '"""Processing lines"""'}), "(file, total=total_lines, desc='Processing lines')\n", (18154, 18204), False, 'from tqdm import tqdm\n'), ((19925, 19979), 'tqdm.tqdm', 'tqdm', (['file'], {'total': 'total_lines', 'desc': '"""Processing lines"""'}), "(file, total=total_lines, desc='Processing lines')\n", (19929, 19979), False, 'from tqdm import tqdm\n'), ((9028, 9042), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9039, 9042), False, 'import pickle\n'), ((13248, 13303), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""retrieval_data/vectors.pickle"""'], {}), "(ROOT_DIR, 'retrieval_data/vectors.pickle')\n", (13260, 13303), False, 'import os\n'), ((18229, 18245), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (18239, 18245), False, 'import json\n'), ((19033, 19048), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (19043, 19048), False, 'import time\n'), ((20004, 20020), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (20014, 20020), False, 'import json\n'), ((8932, 8978), 'os.path.join', 'os.path.join', (['self.data_path', '"""vectors.pickle"""'], {}), "(self.data_path, 'vectors.pickle')\n", (8944, 8978), False, 'import os\n'), ((9473, 9504), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (9485, 9504), False, 'import os\n'), ((9535, 9585), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['file_path'], {'encoding': '"""utf8"""'}), "(file_path, encoding='utf8')\n", (9557, 9585), False, 'from langchain.document_loaders import UnstructuredFileLoader\n')] |
"""Base interface that all chains should implement."""
from __future__ import annotations
import asyncio
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import langchain
import yaml
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.load.dump import dumpd
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Field, root_validator, validator
from langchain.schema import RUN_KEY, BaseMemory, RunInfo
from langchain.schema.runnable import Runnable, RunnableConfig
logger = logging.getLogger(__name__)
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):
"""Abstract base class for creating structured sequences of calls to components.
Chains should be used to encode a sequence of calls to components like
models, document retrievers, other chains, etc., and provide a simple interface
to this sequence.
Copied from langchain v0.0.283.
The Chain interface makes it easy to create apps that are:
- Stateful: add Memory to any Chain to give it state,
- Observable: pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- Composable: the Chain API is flexible enough that it is easy to combine
Chains with other components, including other Chains.
The main methods exposed by chains are:
- `__call__`: Chains are callable. The `__call__` method is the primary way to
execute a Chain. This takes inputs as a dictionary and returns a
dictionary output.
- `run`: A convenience method that takes inputs as args/kwargs and returns the
output as a string or object. This method can only be used for a subset of
chains and cannot return as rich of an output as `__call__`.
"""
def invoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
config = config or {}
return self(
input,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
**kwargs,
)
async def ainvoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
if type(self)._acall == Chain._acall:
# If the chain does not implement async, fall back to default implementation
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.invoke, input, config, **kwargs)
)
config = config or {}
return await self.acall(
input,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
**kwargs,
)
memory: Optional[BaseMemory] = None
"""Optional memory object. Defaults to None.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Optional list of callback handlers (or callback manager). Defaults to None.
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs
for full details."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""Deprecated, use `callbacks` instead."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to `langchain.verbose` value."""
tags: Optional[List[str]] = None
"""Optional list of tags associated with the chain. Defaults to None.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
metadata: Optional[Dict[str, Any]] = None
"""Optional metadata associated with the chain. Defaults to None.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@root_validator()
def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
if values.get("callbacks") is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks. "
"callback_manager is deprecated, callbacks is the preferred "
"parameter to pass in."
)
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Keys expected to be in the chain input."""
raise NotImplementedError
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Keys expected to be in the chain output."""
raise NotImplementedError
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
raise ValueError(f"Missing some output keys: {missing_keys}")
@abstractmethod
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
raise NotImplementedError
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Asynchronously execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.acall`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
run_name: Optional[str] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain. Defaults to None
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
inputs = self.prep_inputs(inputs)
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
dumpd(self),
inputs,
name=run_name,
)
try:
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_chain_error(e)
raise e
run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
async def acall(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
run_name: Optional[str] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Asynchronously execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain. Defaults to None
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
inputs = self.prep_inputs(inputs)
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
dumpd(self),
inputs,
name=run_name,
)
try:
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_chain_error(e)
raise e
await run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
@property
def _run_output_key(self) -> str:
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
return self.output_keys[0]
def run(
self,
*args: Any,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
"""
# Run at start to make sure this is possible/defined
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if not kwargs and not args:
raise ValueError(
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
else:
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(
self,
*args: Any,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
"""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
elif args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (
await self.acall(
args[0], callbacks=callbacks, tags=tags, metadata=metadata
)
)[self.output_keys[0]]
if kwargs and not args:
return (
await self.acall(
kwargs, callbacks=callbacks, tags=tags, metadata=metadata
)
)[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
.. code-block:: python
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
"""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict(**kwargs)
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
def apply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
| [
"langchain.pydantic_v1.Field",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.pydantic_v1.validator",
"langchain.pydantic_v1.root_validator"
] | [((858, 885), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (875, 885), False, 'import logging\n'), ((3854, 3887), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (3859, 3887), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((4297, 4330), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (4302, 4330), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((4398, 4435), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (4403, 4435), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((5528, 5544), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (5542, 5544), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((6302, 6345), 'langchain.pydantic_v1.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (6311, 6345), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((11248, 11360), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose, tags,\n self.tags, metadata, self.metadata)\n', (11273, 11360), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((14156, 14273), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose,\n tags, self.tags, metadata, self.metadata)\n', (14186, 14273), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6057, 6159), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (6070, 6159), False, 'import warnings\n'), ((11607, 11618), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (11612, 11618), False, 'from langchain.load.dump import dumpd\n'), ((12205, 12239), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (12212, 12239), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((15151, 15185), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (15158, 15185), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((25263, 25278), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (25267, 25278), False, 'from pathlib import Path\n'), ((14527, 14538), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (14532, 14538), False, 'from langchain.load.dump import dumpd\n'), ((25598, 25632), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (25607, 25632), False, 'import json\n'), ((3080, 3125), 'functools.partial', 'partial', (['self.invoke', 'input', 'config'], {}), '(self.invoke, input, config, **kwargs)\n', (3087, 3125), False, 'from functools import partial\n'), ((11480, 11509), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (11497, 11509), False, 'import inspect\n'), ((14393, 14423), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (14410, 14423), False, 'import inspect\n'), ((25735, 25785), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (25744, 25785), False, 'import yaml\n'), ((3014, 3040), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (3038, 3040), False, 'import asyncio\n')] |
import os
from fastapi import FastAPI, WebSocket
from fastapi.responses import HTMLResponse
from google.api_core.client_options import ClientOptions
from google.cloud.speech_v1 import SpeechAsyncClient
from google.cloud.texttospeech_v1 import TextToSpeechAsyncClient
from langchain_community.chat_models import ChatVertexAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
# 0 - VoiceStream imports
from voice_stream import map_step, log_step, recover_exception_step
from voice_stream.audio import AudioFormat
from voice_stream.integrations.fastapi import (
fastapi_websocket_bytes_source,
fastapi_websocket_bytes_sink,
)
from voice_stream.integrations.google import (
google_speech_v1_step,
google_text_to_speech_step,
)
from voice_stream.integrations.langchain import langchain_load_memory_step
# 1 - HTML shown by the browser
html = """
<!DOCTYPE html>
<html>
<head><title>VoiceStream Quickstart</title></head>
<body>
<script
src="https://cdn.jsdelivr.net/gh/DaveDeCaprio/voice-stream@main/examples/static/audio_ws.js">
</script>
<button onclick="startAudio('audio-player', '/ws/audio')">Start Voice Chat</button>
<button onclick="stopAudio()">Stop Voice Chat</button>
<audio id="audio-player"></audio>
</body>
</html>
"""
# 2 - FastAPI app and route to serve the UI
app = FastAPI()
@app.get("/")
def get():
return HTMLResponse(html)
# 3 - Set up Google client and credentials
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "google_creds.json"
speech_async_client = SpeechAsyncClient(
client_options=ClientOptions(api_endpoint="us-speech.googleapis.com")
)
text_to_speech_async_client = TextToSpeechAsyncClient()
chain = (
ChatPromptTemplate.from_messages([("human", "{query}")])
| ChatVertexAI()
| StrOutputParser()
)
# 4 - The VoiceStream data flow to run the voice chat
@app.websocket("/ws/audio")
async def audio_websocket_endpoint(websocket: WebSocket):
stream = fastapi_websocket_bytes_source(websocket)
stream = google_speech_v1_step(
stream,
speech_async_client,
audio_format=AudioFormat.WEBM_OPUS,
)
stream = log_step(stream, "Recognized speech")
stream = map_step(stream, lambda x: {"query": x})
stream = langchain_load_memory_step(stream, chain, on_completion="")
stream = recover_exception_step(
stream,
Exception,
lambda x: "Google blocked the response. Ending conversation.",
)
stream = google_text_to_speech_step(
stream, text_to_speech_async_client, audio_format=AudioFormat.MP3
)
stream = map_step(stream, lambda x: x.audio)
await fastapi_websocket_bytes_sink(stream, websocket)
| [
"langchain_core.prompts.ChatPromptTemplate.from_messages",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.chat_models.ChatVertexAI"
] | [((1420, 1429), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (1427, 1429), False, 'from fastapi import FastAPI, WebSocket\n'), ((1746, 1771), 'google.cloud.texttospeech_v1.TextToSpeechAsyncClient', 'TextToSpeechAsyncClient', ([], {}), '()\n', (1769, 1771), False, 'from google.cloud.texttospeech_v1 import TextToSpeechAsyncClient\n'), ((1468, 1486), 'fastapi.responses.HTMLResponse', 'HTMLResponse', (['html'], {}), '(html)\n', (1480, 1486), False, 'from fastapi.responses import HTMLResponse\n'), ((1870, 1887), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1885, 1887), False, 'from langchain_core.output_parsers import StrOutputParser\n'), ((2045, 2086), 'voice_stream.integrations.fastapi.fastapi_websocket_bytes_source', 'fastapi_websocket_bytes_source', (['websocket'], {}), '(websocket)\n', (2075, 2086), False, 'from voice_stream.integrations.fastapi import fastapi_websocket_bytes_source, fastapi_websocket_bytes_sink\n'), ((2100, 2191), 'voice_stream.integrations.google.google_speech_v1_step', 'google_speech_v1_step', (['stream', 'speech_async_client'], {'audio_format': 'AudioFormat.WEBM_OPUS'}), '(stream, speech_async_client, audio_format=AudioFormat\n .WEBM_OPUS)\n', (2121, 2191), False, 'from voice_stream.integrations.google import google_speech_v1_step, google_text_to_speech_step\n'), ((2231, 2268), 'voice_stream.log_step', 'log_step', (['stream', '"""Recognized speech"""'], {}), "(stream, 'Recognized speech')\n", (2239, 2268), False, 'from voice_stream import map_step, log_step, recover_exception_step\n'), ((2282, 2322), 'voice_stream.map_step', 'map_step', (['stream', "(lambda x: {'query': x})"], {}), "(stream, lambda x: {'query': x})\n", (2290, 2322), False, 'from voice_stream import map_step, log_step, recover_exception_step\n'), ((2336, 2395), 'voice_stream.integrations.langchain.langchain_load_memory_step', 'langchain_load_memory_step', (['stream', 'chain'], {'on_completion': '""""""'}), "(stream, chain, on_completion='')\n", (2362, 2395), False, 'from voice_stream.integrations.langchain import langchain_load_memory_step\n'), ((2409, 2518), 'voice_stream.recover_exception_step', 'recover_exception_step', (['stream', 'Exception', "(lambda x: 'Google blocked the response. Ending conversation.')"], {}), "(stream, Exception, lambda x:\n 'Google blocked the response. Ending conversation.')\n", (2431, 2518), False, 'from voice_stream import map_step, log_step, recover_exception_step\n'), ((2559, 2656), 'voice_stream.integrations.google.google_text_to_speech_step', 'google_text_to_speech_step', (['stream', 'text_to_speech_async_client'], {'audio_format': 'AudioFormat.MP3'}), '(stream, text_to_speech_async_client,\n audio_format=AudioFormat.MP3)\n', (2585, 2656), False, 'from voice_stream.integrations.google import google_speech_v1_step, google_text_to_speech_step\n'), ((2680, 2715), 'voice_stream.map_step', 'map_step', (['stream', '(lambda x: x.audio)'], {}), '(stream, lambda x: x.audio)\n', (2688, 2715), False, 'from voice_stream import map_step, log_step, recover_exception_step\n'), ((1659, 1713), 'google.api_core.client_options.ClientOptions', 'ClientOptions', ([], {'api_endpoint': '"""us-speech.googleapis.com"""'}), "(api_endpoint='us-speech.googleapis.com')\n", (1672, 1713), False, 'from google.api_core.client_options import ClientOptions\n'), ((1786, 1842), 'langchain_core.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('human', '{query}')]"], {}), "([('human', '{query}')])\n", (1818, 1842), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1849, 1863), 'langchain_community.chat_models.ChatVertexAI', 'ChatVertexAI', ([], {}), '()\n', (1861, 1863), False, 'from langchain_community.chat_models import ChatVertexAI\n'), ((2726, 2773), 'voice_stream.integrations.fastapi.fastapi_websocket_bytes_sink', 'fastapi_websocket_bytes_sink', (['stream', 'websocket'], {}), '(stream, websocket)\n', (2754, 2773), False, 'from voice_stream.integrations.fastapi import fastapi_websocket_bytes_source, fastapi_websocket_bytes_sink\n')] |
import os
import langchain
from langchain.chains import LLMChain, LLMRequestsChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.llms import VertexAI
from langchain import PromptTemplate, LLMChain
#langchain.debug = True
#template = """Question: {question}
#
#Answer: Let's think step by step."""
#
#prompt = PromptTemplate(template=template, input_variables=["question"])
#
#llm = VertexAI()
#llm_chain = LLMChain(prompt=prompt, llm=llm)
#
#question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
#
#a = llm_chain.run(question)
#print(a)
template = """Within the markdown block below is the full content of a website I am interested in.
```
{requests_result}
```
{query}?"""
#"http://feeds.hanselman.com/~/676711904/0/scotthanselman~Using-Home-Assistant-to-integrate-a-Unifi-Protect-G-Doorbell-and-Amazon-Alexa-to-announce-visitors",
#"http://feeds.hanselman.com/~/673288256/0/scotthanselman~NET-Hot-Reload-and-Refused-to-connect-to-ws-because-it-violates-the-Content-Security-Policy-directive-because-Web-Sockets",
#"http://feeds.hanselman.com/~/673288256/0/scotthanselman~NET-Hot-Reload-and-Refused-to-connect-to-ws-because-it-violates-the-Content-Security-Policy-directive-because-Web-Sockets",
#"https://www.theverge.com/2023/6/2/23746354/apple-vr-headset-rumors-metaverse-potential",
#"https://lifehacker.com/30-of-the-best-queer-movies-of-the-last-century-1850471612",
#"https://slashdot.org/story/23/06/02/1039236/fidelity-cuts-reddit-valuation-by-41?utm_source=atom1.0mainlinkanon&utm_medium=feed",
#"https://tech.slashdot.org/story/23/06/02/1237215/meta-requires-office-workers-to-return-to-desks-three-days-a-week?utm_source=atom1.0mainlinkanon&utm_medium=feed",
#"https://browse.feddit.de/",
#"https://fedia.io/",
#"https://blurha.sh/",
#"https://www.inmytree.co.za",
#"https://generalrobots.substack.com/p/dimension-hopper-part-1",
#"https://aws.amazon.com/blogs/machine-learning/technology-innovation-institute-trains-the-state-of-the-art-falcon-llm-40b-foundation-model-on-amazon-sagemaker/"
for url in [x.strip() for x in open("urls.txt").readlines()]:
llm = VertexAI(max_output_tokens=1024)
PROMPT = PromptTemplate(
input_variables=["query", "requests_result"],
template=template,
)
chain = LLMRequestsChain(llm_chain = LLMChain(llm=llm, prompt=PROMPT))
inputs = {
"query": "What is the article about?",
"url": url
}
a = chain(inputs)
#print(a)
print("---------")
print(url)
print(a['output'])
print("------------------")
#from langchain.embeddings import VertexAIEmbeddings
#
#embeddings = VertexAIEmbeddings()
#text = "This is a test document."
#query_result = embeddings.embed_query(text)
#doc_result = embeddings.embed_documents([text])
#print(query_result)
| [
"langchain.LLMChain",
"langchain.PromptTemplate",
"langchain.llms.VertexAI"
] | [((2164, 2196), 'langchain.llms.VertexAI', 'VertexAI', ([], {'max_output_tokens': '(1024)'}), '(max_output_tokens=1024)\n', (2172, 2196), False, 'from langchain.llms import VertexAI\n'), ((2210, 2289), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query', 'requests_result']", 'template': 'template'}), "(input_variables=['query', 'requests_result'], template=template)\n", (2224, 2289), False, 'from langchain import PromptTemplate, LLMChain\n'), ((2355, 2387), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'PROMPT'}), '(llm=llm, prompt=PROMPT)\n', (2363, 2387), False, 'from langchain import PromptTemplate, LLMChain\n')] |
import langchain
from dotenv import load_dotenv
from langchain.chains import HypotheticalDocumentEmbedder, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
langchain.debug = True
load_dotenv()
# HyDE (LLMが生成した仮説的な回答のベクトル化) の準備
base_embeddings = OpenAIEmbeddings()
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
embeddings = HypotheticalDocumentEmbedder.from_llm(chat, base_embeddings, "web_search")
# FAISSで保存されたベクトルを読み込む
db = FAISS.load_local("./tmp/faiss", embeddings)
retriever = db.as_retriever()
# 「関連する文書を検索 => LLMに回答を生成させる」を実行する「RetrievalQA」を準備
qa_chain = RetrievalQA.from_chain_type(
llm=chat, chain_type="stuff", retriever=retriever
)
query = "LangChainとは"
result = qa_chain.run(query)
print(result)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.vectorstores.FAISS.load_local",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.HypotheticalDocumentEmbedder.from_llm",
"langchain.embeddings.OpenAIEmbeddings"
] | [((280, 293), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (291, 293), False, 'from dotenv import load_dotenv\n'), ((347, 365), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (363, 365), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((373, 426), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (383, 426), False, 'from langchain.chat_models import ChatOpenAI\n'), ((440, 514), 'langchain.chains.HypotheticalDocumentEmbedder.from_llm', 'HypotheticalDocumentEmbedder.from_llm', (['chat', 'base_embeddings', '"""web_search"""'], {}), "(chat, base_embeddings, 'web_search')\n", (477, 514), False, 'from langchain.chains import HypotheticalDocumentEmbedder, RetrievalQA\n'), ((544, 587), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""./tmp/faiss"""', 'embeddings'], {}), "('./tmp/faiss', embeddings)\n", (560, 587), False, 'from langchain.vectorstores import FAISS\n'), ((681, 759), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'chain_type': '"""stuff"""', 'retriever': 'retriever'}), "(llm=chat, chain_type='stuff', retriever=retriever)\n", (708, 759), False, 'from langchain.chains import HypotheticalDocumentEmbedder, RetrievalQA\n')] |
import os
from langchain.embeddings import OpenAIEmbeddings
import langchain
from annoy import AnnoyIndex
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from sentence_transformers import SentenceTransformer, util
import sys
embeddings = OpenAIEmbeddings(openai_api_key="")
model = SentenceTransformer('sentence-transformers/allenai-specter', device='cpu')
##name = "langchain"
##GITHUB_PATH = "/home/raghavan/langchain"
##name = "open_interpreter"
##GITHUB_PATH = "/home/raghavan/open-interpreter"
name = sys.argv[1]
GITHUB_PATH = sys.argv[2]
def get_files(path):
files = []
for r, d, f in os.walk(path):
for file in f:
if ".py" in file or ".sh" in file or ".java" in file:
files.append(os.path.join(r, file))
return files
def get_file_embeddings(path):
try:
text = get_file_contents(path)
ret = embeddings.embed_query(text)
return ret
except:
return None
def get_file_contents(path):
with open(path, 'r') as f:
return f.read()
print (name)
print (GITHUB_PATH)
files = get_files(GITHUB_PATH)
print(len(files))
embeddings_dict = {}
embeddings_dict2 = {}
i = 0
s = set()
for file in files:
e = get_file_embeddings(file)
if (e is None):
print ("Error in embedding file: ")
print (file)
s.add(file)
else:
embeddings_dict[file] = e
embeddings_dict2[file] = model.encode(get_file_contents(file))
i+=1
if (i%100 == 0):
print ("No of files processed: " + str(i))
t = AnnoyIndex(1536, 'angular')
t2 = AnnoyIndex(768, 'angular')
index_map = {}
i = 0
for file in embeddings_dict:
t.add_item(i, embeddings_dict[file])
t2.add_item(i, embeddings_dict2[file])
index_map[i] = file
i+=1
t.build(len(files))
name1= name + "_ada.ann"
t.save(name1)
t2.build(len(files))
name2 = name + "_specter.ann"
t2.save(name2)
with open('index_map' + name + '.txt', 'w') as f:
for idx, path in index_map.items():
f.write(f'{idx}\t{path}\n')
print("Indices created :" + name1 + " , " + name2)
print("Number of files indexed: " + str(len(files))) | [
"langchain.embeddings.OpenAIEmbeddings"
] | [((353, 388), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': '""""""'}), "(openai_api_key='')\n", (369, 388), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((397, 471), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""sentence-transformers/allenai-specter"""'], {'device': '"""cpu"""'}), "('sentence-transformers/allenai-specter', device='cpu')\n", (416, 471), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((1656, 1683), 'annoy.AnnoyIndex', 'AnnoyIndex', (['(1536)', '"""angular"""'], {}), "(1536, 'angular')\n", (1666, 1683), False, 'from annoy import AnnoyIndex\n'), ((1689, 1715), 'annoy.AnnoyIndex', 'AnnoyIndex', (['(768)', '"""angular"""'], {}), "(768, 'angular')\n", (1699, 1715), False, 'from annoy import AnnoyIndex\n'), ((719, 732), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (726, 732), False, 'import os\n'), ((852, 873), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (864, 873), False, 'import os\n')] |
import discord
from discord import app_commands
from discord.ext import commands
import langchain
from langchain.document_loaders import YoutubeLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
import torch
class YoutubeSummaryCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.llm = self.bot.llm
@app_commands.command(name="youtubesummary", description="Summarize a YouTube video given its URL")
async def summarize(self, interaction: discord.Interaction, url: str):
await interaction.response.defer()
# Notifies the user that the bot is processing their command.
await interaction.followup.send(
embed=discord.Embed(
title=f"{interaction.user.display_name} used Youtube Summary 📺",
description=f"Summarizing {url} \nGenerating response\nPlease wait..",
color=0x9C84EF
)
)
try:
# Load transcript
loader = YoutubeLoader.from_youtube_url(url)
transcript = loader.load()
# Split text
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=50)
texts = text_splitter.split_documents(transcript)
# Create and configure chain
chain = load_summarize_chain(llm=self.llm, chain_type="map_reduce", verbose=True)
# chain.llm_chain.prompt.template = \
# """### Instruction:
# Write a 1-3 paragraph summary the following:
# "{text}"
# ### Response:
# 1-3 PARAGRAPH SUMMARY:"""
# Run the chain and get summary
summary = chain.run(texts)
await interaction.followup.send(f'Summary:\n{summary}')
except Exception as e:
await interaction.channel.send(f'Sorry, an error occurred: {str(e)}')
async def setup(bot):
await bot.add_cog(YoutubeSummaryCog(bot))
| [
"langchain.chains.summarize.load_summarize_chain",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((425, 528), 'discord.app_commands.command', 'app_commands.command', ([], {'name': '"""youtubesummary"""', 'description': '"""Summarize a YouTube video given its URL"""'}), "(name='youtubesummary', description=\n 'Summarize a YouTube video given its URL')\n", (445, 528), False, 'from discord import app_commands\n'), ((1074, 1109), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['url'], {}), '(url)\n', (1104, 1109), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((1203, 1268), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(2000)', 'chunk_overlap': '(50)'}), '(chunk_size=2000, chunk_overlap=50)\n', (1233, 1268), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1393, 1466), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'self.llm', 'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(llm=self.llm, chain_type='map_reduce', verbose=True)\n", (1413, 1466), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((772, 954), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""{interaction.user.display_name} used Youtube Summary 📺"""', 'description': 'f"""Summarizing {url} \nGenerating response\nPlease wait.."""', 'color': '(10257647)'}), '(title=\n f\'{interaction.user.display_name} used Youtube Summary 📺\', description=\n f"""Summarizing {url} \nGenerating response\nPlease wait..""", color=10257647\n )\n', (785, 954), False, 'import discord\n')] |
import os
import re
import langchain
import molbloom
import paperqa
import paperscraper
from langchain import SerpAPIWrapper
from langchain.base_language import BaseLanguageModel
from langchain.tools import BaseTool
from langchain.embeddings.openai import OpenAIEmbeddings
from pypdf.errors import PdfReadError
from chemcrow.utils import is_multiple_smiles, split_smiles
def paper_scraper(search: str, pdir: str = "query", semantic_scholar_api_key: str = None) -> dict:
try:
return paperscraper.search_papers(
search,
pdir=pdir,
semantic_scholar_api_key=semantic_scholar_api_key,
)
except KeyError:
return {}
def paper_search(llm, query, semantic_scholar_api_key=None):
prompt = langchain.prompts.PromptTemplate(
input_variables=["question"],
template="""
I would like to find scholarly papers to answer
this question: {question}. Your response must be at
most 10 words long.
'A search query that would bring up papers that can answer
this question would be: '""",
)
query_chain = langchain.chains.llm.LLMChain(llm=llm, prompt=prompt)
if not os.path.isdir("./query"): # todo: move to ckpt
os.mkdir("query/")
search = query_chain.run(query)
print("\nSearch:", search)
papers = paper_scraper(search, pdir=f"query/{re.sub(' ', '', search)}", semantic_scholar_api_key=semantic_scholar_api_key)
return papers
def scholar2result_llm(llm, query, k=5, max_sources=2, openai_api_key=None, semantic_scholar_api_key=None):
"""Useful to answer questions that require
technical knowledge. Ask a specific question."""
papers = paper_search(llm, query, semantic_scholar_api_key=semantic_scholar_api_key)
if len(papers) == 0:
return "Not enough papers found"
docs = paperqa.Docs(
llm=llm,
summary_llm=llm,
embeddings=OpenAIEmbeddings(openai_api_key=openai_api_key),
)
not_loaded = 0
for path, data in papers.items():
try:
docs.add(path, data["citation"])
except (ValueError, FileNotFoundError, PdfReadError):
not_loaded += 1
if not_loaded > 0:
print(f"\nFound {len(papers.items())} papers but couldn't load {not_loaded}.")
else:
print(f"\nFound {len(papers.items())} papers and loaded all of them.")
answer = docs.query(query, k=k, max_sources=max_sources).formatted_answer
return answer
class Scholar2ResultLLM(BaseTool):
name = "LiteratureSearch"
description = (
"Useful to answer questions that require technical "
"knowledge. Ask a specific question."
)
llm: BaseLanguageModel = None
openai_api_key: str = None
semantic_scholar_api_key: str = None
def __init__(self, llm, openai_api_key, semantic_scholar_api_key):
super().__init__()
self.llm = llm
# api keys
self.openai_api_key = openai_api_key
self.semantic_scholar_api_key = semantic_scholar_api_key
def _run(self, query) -> str:
return scholar2result_llm(
self.llm,
query,
openai_api_key=self.openai_api_key,
semantic_scholar_api_key=self.semantic_scholar_api_key
)
async def _arun(self, query) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("this tool does not support async")
def web_search(keywords, search_engine="google"):
try:
return SerpAPIWrapper(
serpapi_api_key=os.getenv("SERP_API_KEY"), search_engine=search_engine
).run(keywords)
except:
return "No results, try another search"
class WebSearch(BaseTool):
name = "WebSearch"
description = (
"Input a specific question, returns an answer from web search. "
"Do not mention any specific molecule names, but use more general features to formulate your questions."
)
serp_api_key: str = None
def __init__(self, serp_api_key: str = None):
super().__init__()
self.serp_api_key = serp_api_key
def _run(self, query: str) -> str:
if not self.serp_api_key:
return (
"No SerpAPI key found. This tool may not be used without a SerpAPI key."
)
return web_search(query)
async def _arun(self, query: str) -> str:
raise NotImplementedError("Async not implemented")
class PatentCheck(BaseTool):
name = "PatentCheck"
description = "Input SMILES, returns if molecule is patented. You may also input several SMILES, separated by a period."
def _run(self, smiles: str) -> str:
"""Checks if compound is patented. Give this tool only one SMILES string"""
if is_multiple_smiles(smiles):
smiles_list = split_smiles(smiles)
else:
smiles_list = [smiles]
try:
output_dict = {}
for smi in smiles_list:
r = molbloom.buy(smi, canonicalize=True, catalog="surechembl")
if r:
output_dict[smi] = "Patented"
else:
output_dict[smi] = "Novel"
return str(output_dict)
except:
return "Invalid SMILES string"
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
| [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.prompts.PromptTemplate",
"langchain.chains.llm.LLMChain"
] | [((757, 1091), 'langchain.prompts.PromptTemplate', 'langchain.prompts.PromptTemplate', ([], {'input_variables': "['question']", 'template': '"""\n I would like to find scholarly papers to answer\n this question: {question}. Your response must be at\n most 10 words long.\n \'A search query that would bring up papers that can answer\n this question would be: \'"""'}), '(input_variables=[\'question\'], template=\n """\n I would like to find scholarly papers to answer\n this question: {question}. Your response must be at\n most 10 words long.\n \'A search query that would bring up papers that can answer\n this question would be: \'"""\n )\n', (789, 1091), False, 'import langchain\n'), ((1124, 1177), 'langchain.chains.llm.LLMChain', 'langchain.chains.llm.LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1153, 1177), False, 'import langchain\n'), ((498, 599), 'paperscraper.search_papers', 'paperscraper.search_papers', (['search'], {'pdir': 'pdir', 'semantic_scholar_api_key': 'semantic_scholar_api_key'}), '(search, pdir=pdir, semantic_scholar_api_key=\n semantic_scholar_api_key)\n', (524, 599), False, 'import paperscraper\n'), ((1189, 1213), 'os.path.isdir', 'os.path.isdir', (['"""./query"""'], {}), "('./query')\n", (1202, 1213), False, 'import os\n'), ((1245, 1263), 'os.mkdir', 'os.mkdir', (['"""query/"""'], {}), "('query/')\n", (1253, 1263), False, 'import os\n'), ((4759, 4785), 'chemcrow.utils.is_multiple_smiles', 'is_multiple_smiles', (['smiles'], {}), '(smiles)\n', (4777, 4785), False, 'from chemcrow.utils import is_multiple_smiles, split_smiles\n'), ((1927, 1974), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (1943, 1974), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((4813, 4833), 'chemcrow.utils.split_smiles', 'split_smiles', (['smiles'], {}), '(smiles)\n', (4825, 4833), False, 'from chemcrow.utils import is_multiple_smiles, split_smiles\n'), ((4981, 5039), 'molbloom.buy', 'molbloom.buy', (['smi'], {'canonicalize': '(True)', 'catalog': '"""surechembl"""'}), "(smi, canonicalize=True, catalog='surechembl')\n", (4993, 5039), False, 'import molbloom\n'), ((1380, 1403), 're.sub', 're.sub', (['""" """', '""""""', 'search'], {}), "(' ', '', search)\n", (1386, 1403), False, 'import re\n'), ((3554, 3579), 'os.getenv', 'os.getenv', (['"""SERP_API_KEY"""'], {}), "('SERP_API_KEY')\n", (3563, 3579), False, 'import os\n')] |
from pathlib import Path
from phi.assistant import Assistant
from phi.knowledge.langchain import LangChainKnowledgeBase
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
cookbook_dir = Path("__file__").parent
chroma_db_dir = cookbook_dir.joinpath("storage/chroma_db")
def load_vector_store():
state_of_the_union = cookbook_dir.joinpath("data/demo/state_of_the_union.txt")
# -*- Load the document
raw_documents = TextLoader(str(state_of_the_union)).load()
# -*- Split it into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
# -*- Embed each chunk and load it into the vector store
Chroma.from_documents(documents, OpenAIEmbeddings(), persist_directory=str(chroma_db_dir))
# -*- Load the vector store
load_vector_store()
# -*- Get the vectordb
db = Chroma(embedding_function=OpenAIEmbeddings(), persist_directory=str(chroma_db_dir))
# -*- Create a retriever from the vector store
retriever = db.as_retriever()
# -*- Create a knowledge base from the vector store
knowledge_base = LangChainKnowledgeBase(retriever=retriever)
conv = Assistant(knowledge_base=knowledge_base, debug_mode=True, add_references_to_prompt=True)
conv.print_response("What did the president say about technology?", markdown=True)
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.OpenAIEmbeddings"
] | [((1254, 1297), 'phi.knowledge.langchain.LangChainKnowledgeBase', 'LangChainKnowledgeBase', ([], {'retriever': 'retriever'}), '(retriever=retriever)\n', (1276, 1297), False, 'from phi.knowledge.langchain import LangChainKnowledgeBase\n'), ((1306, 1398), 'phi.assistant.Assistant', 'Assistant', ([], {'knowledge_base': 'knowledge_base', 'debug_mode': '(True)', 'add_references_to_prompt': '(True)'}), '(knowledge_base=knowledge_base, debug_mode=True,\n add_references_to_prompt=True)\n', (1315, 1398), False, 'from phi.assistant import Assistant\n'), ((337, 353), 'pathlib.Path', 'Path', (['"""__file__"""'], {}), "('__file__')\n", (341, 353), False, 'from pathlib import Path\n'), ((672, 727), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (693, 727), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((887, 905), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (903, 905), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1049, 1067), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1065, 1067), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
"""Console script for blackboard_pagi."""
# Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern
# Copyright (c) 2023. Andreas Kirsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import click
import langchain
from langchain.cache import SQLiteCache
from langchain.llms.openai import OpenAI
import blackboard_pagi.controller
langchain.llm_cache = SQLiteCache()
@click.command()
def main():
"""Main entrypoint."""
click.echo("blackboard-pagi")
click.echo("=" * len("blackboard-pagi"))
click.echo("Proto-AGI using a Blackboard System (for the LLM Hackathon by Ben's Bites)")
click.echo("What is your prompt?")
prompt = click.prompt("Prompt", default="How many colleges are there in Oxford and Cambridge?")
# default="What is the meaning of life?")
kernel = blackboard_pagi.controller.Kernel(OpenAI())
note = kernel(prompt)
click.echo("Here is your note:")
click.echo(note)
if __name__ == "__main__":
main() # pragma: no cover
| [
"langchain.llms.openai.OpenAI",
"langchain.cache.SQLiteCache"
] | [((983, 996), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (994, 996), False, 'from langchain.cache import SQLiteCache\n'), ((1000, 1015), 'click.command', 'click.command', ([], {}), '()\n', (1013, 1015), False, 'import click\n'), ((1059, 1088), 'click.echo', 'click.echo', (['"""blackboard-pagi"""'], {}), "('blackboard-pagi')\n", (1069, 1088), False, 'import click\n'), ((1138, 1236), 'click.echo', 'click.echo', (['"""Proto-AGI using a Blackboard System (for the LLM Hackathon by Ben\'s Bites)"""'], {}), '(\n "Proto-AGI using a Blackboard System (for the LLM Hackathon by Ben\'s Bites)"\n )\n', (1148, 1236), False, 'import click\n'), ((1232, 1266), 'click.echo', 'click.echo', (['"""What is your prompt?"""'], {}), "('What is your prompt?')\n", (1242, 1266), False, 'import click\n'), ((1280, 1371), 'click.prompt', 'click.prompt', (['"""Prompt"""'], {'default': '"""How many colleges are there in Oxford and Cambridge?"""'}), "('Prompt', default=\n 'How many colleges are there in Oxford and Cambridge?')\n", (1292, 1371), False, 'import click\n'), ((1502, 1534), 'click.echo', 'click.echo', (['"""Here is your note:"""'], {}), "('Here is your note:')\n", (1512, 1534), False, 'import click\n'), ((1539, 1555), 'click.echo', 'click.echo', (['note'], {}), '(note)\n', (1549, 1555), False, 'import click\n'), ((1461, 1469), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1467, 1469), False, 'from langchain.llms.openai import OpenAI\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AI. @by PyCharm
# @File : OpenAIEmbeddings
# @Time : 2023/7/11 18:40
# @Author : betterme
# @WeChat : meutils
# @Software : PyCharm
# @Description :
import langchain
from langchain.embeddings import OpenAIEmbeddings as _OpenAIEmbeddings
from meutils.pipe import *
from chatllm.llmchain.utils import get_api_key
class OpenAIEmbeddings(_OpenAIEmbeddings):
"""多key多线程"""
get_api_key: Callable[[int], List[str]] = get_api_key
pre_fn: Optional[Callable[[str], str]] = None
# class Config:
# """Configuration for this pydantic object."""
#
# allow_population_by_field_name = True
def embed_documents(
self,
texts: List[str],
chunk_size: Optional[int] = 0,
) -> List[List[float]]:
if self.pre_fn: texts = texts | xmap_(self.pre_fn)
n = int(np.ceil(len(texts) / self.chunk_size))
api_key_set = self.get_api_key(n=n)
max_workers = np.clip(len(api_key_set), 1, 32).astype(int) # 最大线程数
if max_workers > 1:
embeddings_map = {}
for i, api_key in enumerate(api_key_set):
kwargs = self.dict().copy()
kwargs.pop('get_api_key', None) # not permitted
kwargs['openai_api_key'] = api_key
embeddings_map[i] = _OpenAIEmbeddings(**kwargs) # 可以用 OpenAIEmbeddings
if langchain.debug:
logger.info([e.openai_api_key for e in embeddings_map.values()])
logger.info(f"Maximum concurrency: {max_workers * self.chunk_size}")
def __embed_documents(arg):
idx, texts = arg
embeddings = embeddings_map.get(idx % max_workers, 0)
return embeddings.embed_documents(texts)
return (
texts | xgroup(self.chunk_size)
| xenumerate
| xThreadPoolExecutor(__embed_documents, max_workers)
| xchain_
)
return super().embed_documents(texts)
if __name__ == '__main__':
e = OpenAIEmbeddings(chunk_size=5)
e.get_api_key = partial(get_api_key, n=2)
# e.openai_api_key = 'xxx'
print(e.get_api_key())
print(e.openai_api_key)
print(e.embed_documents(['x'] * 6))
print(e.embed_query('x'))
| [
"langchain.embeddings.OpenAIEmbeddings"
] | [((1391, 1418), 'langchain.embeddings.OpenAIEmbeddings', '_OpenAIEmbeddings', ([], {}), '(**kwargs)\n', (1408, 1418), True, 'from langchain.embeddings import OpenAIEmbeddings as _OpenAIEmbeddings\n')] |
####################################################################################
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################
# Author: Adam Paternostro
# Summary: Ask a LLM to find categories in a customer review text and out the results in JSON.
# To setup your environemtn
# python3 -m venv .venv
# source .venv/bin/activate
# pip install --only-binary :all: greenlet
# pip install langchain pip install langchain==0.0.307
# pip install google-cloud-aiplatform
# run it: python sample-prompt-json-output.py
# deactivate
import json
import langchain
from langchain.llms import VertexAI
from langchain.embeddings import VertexAIEmbeddings
llm = VertexAI(
model_name="text-bison@001",
max_output_tokens=1024,
temperature=0,
top_p=0,
top_k=1,
verbose=True,
)
prompt="""For the below review peform the following:
1. Classify the review as one or more of the below classifications.
2. Output the results in the below JSON format.
Classifications:
- "driver likes music"
- "driver has a dirty car"
- "driver has a clean car"
- "driver drives fast"
- "driver drives slow"
JSON format: [ "value" ]
Sample JSON Response: [ "driver likes music", "driver drives slow" ]
Review: I was taking a rideshare ride and the drivers car was spotless. Not a spec of dirt could be found. I could eat off the seats.
I cannot believe how quickly he got me to my destination. It was like taking a rocketship. I was so scared!
"""
result = llm(prompt)
print()
print(result)
print()
print()
# Hopefully it is valid JSON
json_data = str(result)
json_object = json.loads(json_data)
json_formatted_str = json.dumps(json_object, indent=2)
print(json_formatted_str)
| [
"langchain.llms.VertexAI"
] | [((1273, 1385), 'langchain.llms.VertexAI', 'VertexAI', ([], {'model_name': '"""text-bison@001"""', 'max_output_tokens': '(1024)', 'temperature': '(0)', 'top_p': '(0)', 'top_k': '(1)', 'verbose': '(True)'}), "(model_name='text-bison@001', max_output_tokens=1024, temperature=0,\n top_p=0, top_k=1, verbose=True)\n", (1281, 1385), False, 'from langchain.llms import VertexAI\n'), ((2198, 2219), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (2208, 2219), False, 'import json\n'), ((2241, 2274), 'json.dumps', 'json.dumps', (['json_object'], {'indent': '(2)'}), '(json_object, indent=2)\n', (2251, 2274), False, 'import json\n')] |
# Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern
# Copyright (c) 2023. Andreas Kirsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from typing import List, Optional
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
Generation,
)
class CachedChatOpenAI(ChatOpenAI):
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None) -> ChatResult:
messages_prompt = repr(messages)
if langchain.llm_cache:
results = langchain.llm_cache.lookup(messages_prompt, self.model_name)
if results:
assert len(results) == 1
result: Generation = results[0]
chat_result = ChatResult(
generations=[ChatGeneration(message=AIMessage(content=result.text))],
llm_output=result.generation_info,
)
return chat_result
chat_result = super()._generate(messages, stop)
if langchain.llm_cache:
assert len(chat_result.generations) == 1
result = Generation(text=chat_result.generations[0].message.content, generation_info=chat_result.llm_output)
langchain.llm_cache.update(messages_prompt, self.model_name, [result])
return chat_result
| [
"langchain.schema.AIMessage",
"langchain.llm_cache.update",
"langchain.schema.Generation",
"langchain.llm_cache.lookup"
] | [((1220, 1280), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['messages_prompt', 'self.model_name'], {}), '(messages_prompt, self.model_name)\n', (1246, 1280), False, 'import langchain\n'), ((1796, 1900), 'langchain.schema.Generation', 'Generation', ([], {'text': 'chat_result.generations[0].message.content', 'generation_info': 'chat_result.llm_output'}), '(text=chat_result.generations[0].message.content, generation_info\n =chat_result.llm_output)\n', (1806, 1900), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, Generation\n'), ((1908, 1978), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['messages_prompt', 'self.model_name', '[result]'], {}), '(messages_prompt, self.model_name, [result])\n', (1934, 1978), False, 'import langchain\n'), ((1492, 1522), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'result.text'}), '(content=result.text)\n', (1501, 1522), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult, Generation\n')] |
import os
import pathlib
import langchain
import langchain.cache
import langchain.globals
CACHE_BASE = pathlib.Path(f'{os.environ["HOME"]}/.cache/mitaskem/')
CACHE_BASE.mkdir(parents=True, exist_ok=True)
_LLM_CACHE_PATH = CACHE_BASE/'langchain_llm_cache.sqlite'
langchain.globals.set_llm_cache(langchain.cache.SQLiteCache(database_path=_LLM_CACHE_PATH)) | [
"langchain.cache.SQLiteCache"
] | [((104, 158), 'pathlib.Path', 'pathlib.Path', (['f"""{os.environ[\'HOME\']}/.cache/mitaskem/"""'], {}), '(f"{os.environ[\'HOME\']}/.cache/mitaskem/")\n', (116, 158), False, 'import pathlib\n'), ((295, 353), 'langchain.cache.SQLiteCache', 'langchain.cache.SQLiteCache', ([], {'database_path': '_LLM_CACHE_PATH'}), '(database_path=_LLM_CACHE_PATH)\n', (322, 353), False, 'import langchain\n')] |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
class Memory(BaseModel, ABC):
"""Base interface for memory in chains."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
@abstractmethod
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
@abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
@abstractmethod
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
@abstractmethod
def clear(self) -> None:
"""Clear memory contents."""
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[Memory] = None
callback_manager: BaseCallbackManager = Field(
default_factory=get_callback_manager, exclude=True
)
verbose: bool = Field(
default_factory=_get_verbosity
) # Whether to print the response text
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Output keys this chain expects."""
def _validate_inputs(self, inputs: Dict[str, str]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, str]) -> None:
if set(outputs) != set(self.output_keys):
raise ValueError(
f"Did not get output keys that were expected. "
f"Got: {set(outputs)}. Expected: {set(self.output_keys)}."
)
@abstractmethod
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run the logic of this chain and return the output."""
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run the logic of this chain and return the output."""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
outputs = self._call(inputs)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
return self.prep_outputs(inputs, outputs, return_only_outputs)
async def acall(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
if self.callback_manager.is_async:
await self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
else:
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
outputs = await self._acall(inputs)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_chain_error(e, verbose=self.verbose)
else:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
else:
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
return self.prep_outputs(inputs, outputs, return_only_outputs)
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs) for inputs in input_list]
def conversation(self, *args: str, **kwargs: str) -> List[str]:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) == 2:
assert "output" in self.output_keys and "intermediate_steps" in self.output_keys
keep_short = False
if "keep_short" in kwargs:
keep_short = kwargs.pop("keep_short")
outputs = {}
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
outputs = self(args[0])
if kwargs and not args:
outputs = self(kwargs)
intermediate = outputs.get("intermediate_steps") or []
conversation = []
for action, action_output in intermediate:
action: str = action.log.strip()
if not action.startswith(f"AI:"):
action = f"AI: {action}"
if keep_short:
# Hide the internal conversation
lines = action.split("\n")
new_lines = []
for l in lines:
for term in ["Assistant,"]:
idx = l.lower().find(term.lower())
if idx >= 0:
l = l[:idx]
if l.lower().strip() == "ai:":
l = ""
if not l:
continue
new_lines.append(l)
action = "\n".join(new_lines)
conversation.append(action)
if not keep_short or action_output.lstrip().startswith("Here is the edited image"):
if not action_output.startswith("Assistant:"):
action_output = f"Assistant: {action_output}"
conversation.append(action_output)
conversation.append("AI: " + outputs["output"])
return conversation
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return ["AI: " + self(args[0])[self.output_keys[0]]]
if kwargs and not args:
return ["AI: " + self(kwargs)[self.output_keys[0]]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def run(self, *args: str, **kwargs: str) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) == 2:
assert "output" in self.output_keys and "intermediate_steps" in self.output_keys
outputs = {}
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
outputs = self(args[0])
if kwargs and not args:
outputs = self(kwargs)
intermediate = outputs.get("intermediate_steps") or []
assistant = ""
for action, action_output in intermediate:
action: str = action.log.strip()
if not action.startswith(f"AI:"):
action = f"AI: {action}"
if not action_output.startswith("Assistant:"):
action_output = f"Assistant: {action_output}"
assistant += "\n" + action + "\n" + action_output
return assistant + "\n" + "AI: " + outputs["output"]
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0])[self.output_keys[0]]
if kwargs and not args:
return self(kwargs)[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(self, *args: str, **kwargs: str) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (await self.acall(args[0]))[self.output_keys[0]]
if kwargs and not args:
return (await self.acall(kwargs))[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of chain."""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict()
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
| [
"langchain.callbacks.get_callback_manager"
] | [((1401, 1458), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (1406, 1458), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1493, 1530), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1498, 1530), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1830, 1882), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (1839, 1882), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2212, 2255), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2221, 2255), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2183, 2205), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2203, 2205), False, 'from langchain.callbacks import get_callback_manager\n'), ((14634, 14649), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14638, 14649), False, 'from pathlib import Path\n'), ((14969, 15003), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (14978, 15003), False, 'import json\n'), ((15106, 15156), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (15115, 15156), False, 'import yaml\n')] |
import time #← 実行時間を計測するためにtimeモジュールをインポート
import langchain
from langchain.cache import InMemoryCache #← InMemoryCacheをインポート
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
langchain.llm_cache = InMemoryCache() #← llm_cacheにInMemoryCacheを設定
chat = ChatOpenAI()
start = time.time() #← 実行開始時間を記録
result = chat([ #← 一度目の実行を行う
HumanMessage(content="こんにちは!")
])
end = time.time() #← 実行終了時間を記録
print(result.content)
print(f"実行時間: {end - start}秒")
start = time.time() #← 実行開始時間を記録
result = chat([ #← 同じ内容で二度目の実行を行うことでキャッシュが利用され、即時に実行完了している
HumanMessage(content="こんにちは!")
])
end = time.time() #← 実行終了時間を記録
print(result.content)
print(f"実行時間: {end - start}秒") | [
"langchain.cache.InMemoryCache",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((237, 252), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (250, 252), False, 'from langchain.cache import InMemoryCache\n'), ((291, 303), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (301, 303), False, 'from langchain.chat_models import ChatOpenAI\n'), ((312, 323), 'time.time', 'time.time', ([], {}), '()\n', (321, 323), False, 'import time\n'), ((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n'), ((627, 638), 'time.time', 'time.time', ([], {}), '()\n', (636, 638), False, 'import time\n'), ((370, 400), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (382, 400), False, 'from langchain.schema import HumanMessage\n'), ((586, 616), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (598, 616), False, 'from langchain.schema import HumanMessage\n')] |
import re
import urllib
from time import sleep
import langchain
import molbloom
import pandas as pd
import pkg_resources
import requests
import tiktoken
from langchain import LLMChain, PromptTemplate
from langchain.llms import BaseLLM
from langchain.tools import BaseTool
from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto
from .prompts import safety_summary_prompt, summary_each_data
class MoleculeSafety:
def __init__(self, llm: BaseLLM = None):
while True:
try:
self.clintox = pd.read_csv(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz"
)
break
except (ConnectionRefusedError, urllib.error.URLError):
sleep(5)
continue
self.pubchem_data = {}
self.llm = llm
def _fetch_pubchem_data(self, cas_number):
"""Fetch data from PubChem for a given CAS number, or use cached data if it's already been fetched."""
if cas_number not in self.pubchem_data:
try:
url1 = f"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/{cas_number}/cids/JSON"
url = f"https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data/compound/{requests.get(url1).json()['IdentifierList']['CID'][0]}/JSON"
r = requests.get(url)
self.pubchem_data[cas_number] = r.json()
except:
return "Invalid molecule input, no Pubchem entry."
return self.pubchem_data[cas_number]
def ghs_classification(self, text):
"""Gives the ghs classification from Pubchem. Give this tool the name or CAS number of one molecule."""
if is_smiles(text):
return "Please input a valid CAS number."
data = self._fetch_pubchem_data(text)
if isinstance(data, str):
return "Molecule not found in Pubchem."
try:
for section in data["Record"]["Section"]:
if section.get("TOCHeading") == "Chemical Safety":
ghs = [
markup["Extra"]
for markup in section["Information"][0]["Value"][
"StringWithMarkup"
][0]["Markup"]
]
if ghs:
return ghs
except (StopIteration, KeyError):
return None
@staticmethod
def _scrape_pubchem(data, heading1, heading2, heading3):
try:
filtered_sections = []
for section in data["Record"]["Section"]:
toc_heading = section.get("TOCHeading")
if toc_heading == heading1:
for section2 in section["Section"]:
if section2.get("TOCHeading") == heading2:
for section3 in section2["Section"]:
if section3.get("TOCHeading") == heading3:
filtered_sections.append(section3)
return filtered_sections
except:
return None
def _get_safety_data(self, cas):
data = self._fetch_pubchem_data(cas)
safety_data = []
iterations = [
(
[
"Health Hazards",
"GHS Classification",
"Hazards Summary",
"NFPA Hazard Classification",
],
"Safety and Hazards",
"Hazards Identification",
),
(
["Explosive Limits and Potential", "Preventive Measures"],
"Safety and Hazards",
"Safety and Hazard Properties",
),
(
[
"Inhalation Risk",
"Effects of Long Term Exposure",
"Personal Protective Equipment (PPE)",
],
"Safety and Hazards",
"Exposure Control and Personal Protection",
),
(
["Toxicity Summary", "Carcinogen Classification"],
"Toxicity",
"Toxicological Information",
),
]
for items, header1, header2 in iterations:
safety_data.extend(
[self._scrape_pubchem(data, header1, header2, item)] for item in items
)
return safety_data
@staticmethod
def _num_tokens(string, encoding_name="text-davinci-003"):
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_safety_summary(self, cas):
safety_data = self._get_safety_data(cas)
approx_length = int(
(3500 * 4) / len(safety_data) - 0.1 * ((3500 * 4) / len(safety_data))
)
prompt_short = PromptTemplate(
template=summary_each_data, input_variables=["data", "approx_length"]
)
llm_chain_short = LLMChain(prompt=prompt_short, llm=self.llm)
llm_output = []
for info in safety_data:
if self._num_tokens(str(info)) > approx_length:
trunc_info = str(info)[:approx_length]
llm_output.append(
llm_chain_short.run(
{"data": str(trunc_info), "approx_length": approx_length}
)
)
else:
llm_output.append(
llm_chain_short.run(
{"data": str(info), "approx_length": approx_length}
)
)
return llm_output
class SafetySummary(BaseTool):
name = "SafetySummary"
description = (
"Input CAS number, returns a summary of safety information."
"The summary includes Operator safety, GHS information, "
"Environmental risks, and Societal impact."
)
llm: BaseLLM = None
llm_chain: LLMChain = None
pubchem_data: dict = dict()
mol_safety: MoleculeSafety = None
def __init__(self, llm):
super().__init__()
self.mol_safety = MoleculeSafety(llm=llm)
self.llm = llm
prompt = PromptTemplate(
template=safety_summary_prompt, input_variables=["data"]
)
self.llm_chain = LLMChain(prompt=prompt, llm=self.llm)
def _run(self, cas: str) -> str:
if is_smiles(cas):
return "Please input a valid CAS number."
data = self.mol_safety._fetch_pubchem_data(cas)
if isinstance(data, str):
return "Molecule not found in Pubchem."
data = self.mol_safety.get_safety_summary(cas)
return self.llm_chain.run(" ".join(data))
async def _arun(self, cas_number):
raise NotImplementedError("Async not implemented.")
class ExplosiveCheck(BaseTool):
name = "ExplosiveCheck"
description = "Input CAS number, returns if molecule is explosive."
mol_safety: MoleculeSafety = None
def __init__(self):
super().__init__()
self.mol_safety = MoleculeSafety()
def _run(self, cas_number):
"""Checks if a molecule has an explosive GHS classification using pubchem."""
# first check if the input is a CAS number
if is_smiles(cas_number):
return "Please input a valid CAS number."
cls = self.mol_safety.ghs_classification(cas_number)
if cls is None:
return (
"Explosive Check Error. The molecule may not be assigned a GHS rating. "
)
if "Explos" in str(cls) or "explos" in str(cls):
return "Molecule is explosive"
else:
return "Molecule is not known to be explosive"
async def _arun(self, cas_number):
raise NotImplementedError("Async not implemented.")
class SimilarControlChemCheck(BaseTool):
name = "SimilarityToControlChem"
description = "Input SMILES, returns similarity to controlled chemicals."
def _run(self, smiles: str) -> str:
"""Checks max similarity between compound and controlled chemicals.
Input SMILES string."""
data_path = pkg_resources.resource_filename("chemcrow", "data/chem_wep_smi.csv")
cw_df = pd.read_csv(data_path)
try:
if not is_smiles(smiles):
return "Please input a valid SMILES string."
max_sim = cw_df["smiles"].apply(lambda x: self.tanimoto(smiles, x)).max()
if max_sim > 0.35:
return (
f"{smiles} has a high similarity "
f"({max_sim:.4}) to a known controlled chemical."
)
else:
return (
f"{smiles} has a low similarity "
f"({max_sim:.4}) to a known controlled chemical. "
"This is substance is safe, you may proceed with the task."
)
except:
return "Tool error."
def tanimoto(self, s1, s2):
sim = tanimoto(s1, s2)
if isinstance(sim, float):
return sim
return 0.0
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
class ControlChemCheck(BaseTool):
name = "ControlChemCheck"
description = "Input CAS number, True if molecule is a controlled chemical."
similar_control_chem_check = SimilarControlChemCheck()
def _run(self, query: str) -> str:
"""Checks if compound is a controlled chemical. Input CAS number."""
data_path = pkg_resources.resource_filename("chemcrow", "data/chem_wep_smi.csv")
cw_df = pd.read_csv(data_path)
try:
if is_smiles(query):
query_esc = re.escape(query)
found = (
cw_df["smiles"]
.astype(str)
.str.contains(f"^{query_esc}$", regex=True)
.any()
)
else:
found = (
cw_df["cas"]
.astype(str)
.str.contains(f"^\({query}\)$", regex=True)
.any()
)
if found:
return (
f"The molecule {query} appears in a list of "
"controlled chemicals."
)
else:
# Get smiles of CAS number
try:
smi = pubchem_query2smiles(query)
except ValueError as e:
return str(e)
# Check similarity to known controlled chemicals
return self.similar_control_chem_check._run(smi)
except Exception as e:
return f"Error: {e}"
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
| [
"langchain.LLMChain",
"langchain.PromptTemplate"
] | [((1729, 1744), 'chemcrow.utils.is_smiles', 'is_smiles', (['text'], {}), '(text)\n', (1738, 1744), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((4644, 4686), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['encoding_name'], {}), '(encoding_name)\n', (4671, 4686), False, 'import tiktoken\n'), ((4996, 5085), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'summary_each_data', 'input_variables': "['data', 'approx_length']"}), "(template=summary_each_data, input_variables=['data',\n 'approx_length'])\n", (5010, 5085), False, 'from langchain import LLMChain, PromptTemplate\n'), ((5130, 5173), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt_short', 'llm': 'self.llm'}), '(prompt=prompt_short, llm=self.llm)\n', (5138, 5173), False, 'from langchain import LLMChain, PromptTemplate\n'), ((6326, 6398), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'safety_summary_prompt', 'input_variables': "['data']"}), "(template=safety_summary_prompt, input_variables=['data'])\n", (6340, 6398), False, 'from langchain import LLMChain, PromptTemplate\n'), ((6446, 6483), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'self.llm'}), '(prompt=prompt, llm=self.llm)\n', (6454, 6483), False, 'from langchain import LLMChain, PromptTemplate\n'), ((6533, 6547), 'chemcrow.utils.is_smiles', 'is_smiles', (['cas'], {}), '(cas)\n', (6542, 6547), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((7399, 7420), 'chemcrow.utils.is_smiles', 'is_smiles', (['cas_number'], {}), '(cas_number)\n', (7408, 7420), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((8286, 8354), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""chemcrow"""', '"""data/chem_wep_smi.csv"""'], {}), "('chemcrow', 'data/chem_wep_smi.csv')\n", (8317, 8354), False, 'import pkg_resources\n'), ((8371, 8393), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (8382, 8393), True, 'import pandas as pd\n'), ((9155, 9171), 'chemcrow.utils.tanimoto', 'tanimoto', (['s1', 's2'], {}), '(s1, s2)\n', (9163, 9171), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((9718, 9786), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""chemcrow"""', '"""data/chem_wep_smi.csv"""'], {}), "('chemcrow', 'data/chem_wep_smi.csv')\n", (9749, 9786), False, 'import pkg_resources\n'), ((9803, 9825), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (9814, 9825), True, 'import pandas as pd\n'), ((9854, 9870), 'chemcrow.utils.is_smiles', 'is_smiles', (['query'], {}), '(query)\n', (9863, 9870), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((543, 634), 'pandas.read_csv', 'pd.read_csv', (['"""https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz"""'], {}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz')\n", (554, 634), True, 'import pandas as pd\n'), ((1358, 1375), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1370, 1375), False, 'import requests\n'), ((8427, 8444), 'chemcrow.utils.is_smiles', 'is_smiles', (['smiles'], {}), '(smiles)\n', (8436, 8444), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((9900, 9916), 're.escape', 're.escape', (['query'], {}), '(query)\n', (9909, 9916), False, 'import re\n'), ((774, 782), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (779, 782), False, 'from time import sleep\n'), ((10623, 10650), 'chemcrow.utils.pubchem_query2smiles', 'pubchem_query2smiles', (['query'], {}), '(query)\n', (10643, 10650), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((1277, 1295), 'requests.get', 'requests.get', (['url1'], {}), '(url1)\n', (1289, 1295), False, 'import requests\n')] |
# from __future__ import annotations
import os
import re
import itertools
import openai
import tiktoken
import json
from dotenv import load_dotenv
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.schema.language_model import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.chains.base import Chain
from langchain.prompts.base import BasePromptTemplate
from langchain.tools import DuckDuckGoSearchRun
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.tools import DuckDuckGoSearchRun
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains import SequentialChain
import prompts
class ExecuteVerificationChain(Chain):
"""
Implements the logic to execute the verification question for factual acuracy
"""
prompt: BasePromptTemplate
llm: BaseLanguageModel
input_key: str = "verification_questions"
output_key: str = "verification_answers"
use_search_tool: bool = True
search_tool: Any = DuckDuckGoSearchRun()
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
def search_for_verification_question(self,
verification_question: str
) -> str:
search_result = self.search_tool.run(verification_question)
return search_result
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
verification_answers_list = list() # Will contain the answers of each verification questions
question_answer_pair = "" # Final output of verification question and answer pair
# Convert all the verification questions into a list of string
sub_inputs = {k:v for k,v in inputs.items() if k==self.input_key}
verification_questions_prompt_value = self.prompt.format_prompt(**sub_inputs)
verification_questions_str = verification_questions_prompt_value.text
verification_questions_list = verification_questions_str.split("\n")
# Setting up prompt for both search tool and llm self evaluation
execution_prompt_search_tool = PromptTemplate.from_template(prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL)
execution_prompt_self_llm = PromptTemplate.from_template(prompts.EXECUTE_PLAN_PROMPT_SELF_LLM)
# Executing the verification questions, either using search tool or self llm
for question in verification_questions_list:
if self.use_search_tool:
search_result = self.search_for_verification_question(question)
execution_prompt_value = execution_prompt_search_tool.format_prompt(**{"search_result": search_result, "verification_question": question})
else:
execution_prompt_value = execution_prompt_self_llm.format_prompt(**{"verification_question": question})
verification_answer_llm_result = self.llm.generate_prompt([execution_prompt_value], callbacks=run_manager.get_child() if run_manager else None)
verification_answer_str = verification_answer_llm_result.generations[0][0].text
verification_answers_list.append(verification_answer_str)
# Create verification question and answer pair
for question, answer in itertools.zip_longest(verification_questions_list, verification_answers_list):
question_answer_pair += "Question: {} Answer: {}\n".format(question, answer)
if run_manager:
run_manager.on_text("Log something about this run")
return {self.output_key: question_answer_pair}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# Your custom chain logic goes here
# This is just an example that mimics LLMChain
prompt_value = self.prompt.format_prompt(**inputs)
# Whenever you call a language model, or another chain, you should pass
# a callback manager to it. This allows the inner run to be tracked by
# any callbacks that are registered on the outer run.
# You can always obtain a callback manager for this by calling
# `run_manager.get_child()` as shown below.
response = await self.llm.agenerate_prompt(
[prompt_value], callbacks=run_manager.get_child() if run_manager else None
)
# If you want to log something about this run, you can do so by calling
# methods on the `run_manager`, as shown below. This will trigger any
# callbacks that are registered for that event.
if run_manager:
await run_manager.on_text("Log something about this run")
return {self.output_key: response.generations[0][0].text}
@property
def _chain_type(self) -> str:
return "execute_verification_chain" | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.tools.DuckDuckGoSearchRun"
] | [((1312, 1333), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {}), '()\n', (1331, 1333), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((2942, 3011), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL'], {}), '(prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL)\n', (2970, 3011), False, 'from langchain.prompts import PromptTemplate\n'), ((3048, 3114), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompts.EXECUTE_PLAN_PROMPT_SELF_LLM'], {}), '(prompts.EXECUTE_PLAN_PROMPT_SELF_LLM)\n', (3076, 3114), False, 'from langchain.prompts import PromptTemplate\n'), ((4086, 4163), 'itertools.zip_longest', 'itertools.zip_longest', (['verification_questions_list', 'verification_answers_list'], {}), '(verification_questions_list, verification_answers_list)\n', (4107, 4163), False, 'import itertools\n')] |
from __future__ import annotations
import time
from abc import abstractmethod
from typing import Any, List, Tuple, Union
import gradio_client as grc
import huggingface_hub
from gradio_client.client import Job
from gradio_client.utils import QueueError
try:
import langchain as lc
LANGCHAIN_INSTALLED = True
except (ModuleNotFoundError, ImportError):
LANGCHAIN_INSTALLED = False
class GradioTool:
def __init__(
self,
name: str,
description: str,
src: str,
hf_token: str | None = None,
duplicate: bool = True,
) -> None:
self.name = name
self.description = description
if hf_token and self._is_space(src) and duplicate:
self.client = grc.Client.duplicate(from_id=src, hf_token=hf_token)
self.src = self.client.space_id
else:
self.src = src
self.client = grc.Client(self.src, hf_token=hf_token)
self.description += (
"If the output is the string QUEUE_FULL you "
"should try this action again and never stop until you "
"don't see this message."
)
self._block = None
@staticmethod
def _is_space(src: str) -> bool:
try:
huggingface_hub.get_space_runtime(src)
return True
except huggingface_hub.hf_api.RepositoryNotFoundError:
return False
@abstractmethod
def create_job(self, query: str) -> Job:
pass
@abstractmethod
def postprocess(self, output: Union[Tuple[Any], Any]) -> str:
pass
def run(self, query: str):
job = self.create_job(query)
while not job.done():
status = job.status()
print(f"\nJob Status: {str(status.code)} eta: {status.eta}")
time.sleep(30)
try:
output = self.postprocess(job.result())
except QueueError:
output = "QUEUE_FULL"
return output
# Optional gradio functionalities
def _block_input(self, gr) -> List["gr.components.Component"]:
return [gr.Textbox()]
def _block_output(self, gr) -> List["gr.components.Component"]:
return [gr.Textbox()]
def block_input(self) -> List["gr.components.Component"]:
try:
import gradio as gr
GRADIO_INSTALLED = True
except (ModuleNotFoundError, ImportError):
GRADIO_INSTALLED = False
if not GRADIO_INSTALLED:
raise ModuleNotFoundError("gradio must be installed to call block_input")
else:
return self._block_input(gr)
def block_output(self) -> List["gr.components.Component"]:
try:
import gradio as gr
GRADIO_INSTALLED = True
except (ModuleNotFoundError, ImportError):
GRADIO_INSTALLED = False
if not GRADIO_INSTALLED:
raise ModuleNotFoundError("gradio must be installed to call block_output")
else:
return self._block_output(gr)
def block(self):
"""Get the gradio Blocks of this tool for visualization."""
try:
import gradio as gr
except (ModuleNotFoundError, ImportError):
raise ModuleNotFoundError("gradio must be installed to call block")
if not self._block:
self._block = gr.load(name=self.src, src="spaces")
return self._block
# Optional langchain functionalities
@property
def langchain(self) -> "langchain.agents.Tool": # type: ignore
if not LANGCHAIN_INSTALLED:
raise ModuleNotFoundError(
"langchain must be installed to access langchain tool"
)
return lc.agents.Tool( # type: ignore
name=self.name, func=self.run, description=self.description
)
def __repr__(self) -> str:
return f"GradioTool(name={self.name}, src={self.src})"
| [
"langchain.agents.Tool"
] | [((3706, 3781), 'langchain.agents.Tool', 'lc.agents.Tool', ([], {'name': 'self.name', 'func': 'self.run', 'description': 'self.description'}), '(name=self.name, func=self.run, description=self.description)\n', (3720, 3781), True, 'import langchain as lc\n'), ((742, 794), 'gradio_client.Client.duplicate', 'grc.Client.duplicate', ([], {'from_id': 'src', 'hf_token': 'hf_token'}), '(from_id=src, hf_token=hf_token)\n', (762, 794), True, 'import gradio_client as grc\n'), ((906, 945), 'gradio_client.Client', 'grc.Client', (['self.src'], {'hf_token': 'hf_token'}), '(self.src, hf_token=hf_token)\n', (916, 945), True, 'import gradio_client as grc\n'), ((1259, 1297), 'huggingface_hub.get_space_runtime', 'huggingface_hub.get_space_runtime', (['src'], {}), '(src)\n', (1292, 1297), False, 'import huggingface_hub\n'), ((1807, 1821), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1817, 1821), False, 'import time\n'), ((2092, 2104), 'gradio.Textbox', 'gr.Textbox', ([], {}), '()\n', (2102, 2104), True, 'import gradio as gr\n'), ((2191, 2203), 'gradio.Textbox', 'gr.Textbox', ([], {}), '()\n', (2201, 2203), True, 'import gradio as gr\n'), ((3342, 3378), 'gradio.load', 'gr.load', ([], {'name': 'self.src', 'src': '"""spaces"""'}), "(name=self.src, src='spaces')\n", (3349, 3378), True, 'import gradio as gr\n')] |
#!/Users/mark/dev/ml/langchain/read_github/langchain_github/env/bin/python
# change above to the location of your local Python venv installation
import sys, os, shutil
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(parent_dir)
import pathlib
from langchain.docstore.document import Document
import langchain.text_splitter as text_splitter
from langchain.chat_models import ChatOpenAI
from my_llm import standards as my_llm
from my_llm.langchain_class import PubSubChatMessageHistory
from langchain import PromptTemplate
from langchain.document_loaders.unstructured import UnstructuredFileLoader
import logging
chat = ChatOpenAI(temperature=0)
CODE_EXTENSIONS = [".py", ".js", ".java", ".c", ".cpp", ".cc", ".cxx", ".hpp",
".h", ".cs", ".m", ".swift", ".go", ".rs", ".rb", ".php",
".pl", ".kt", ".kts", ".ts", ".scala", ".hs", ".lua", ".sh",
".bash", ".r", ".m", ".sql", ".html", ".css", ".xml", ".json",
".yaml", ".yml"]
# Get Markdown documents from a repository
def get_repo_docs(repo_path, extension, memory, ignore=None, resummarise=False, verbose=False):
repo = pathlib.Path(repo_path)
ignore_path = ""
if ignore is not None:
ignore_path = repo / ignore
if not ignore_path.is_dir():
print("WARNING: --ignore must be a directory")
print('Ignoring %s' % ignore_path)
exts = extension.split(",")
for ext in exts:
the_glob = f"**/*{ext}"
matched_files = list(repo.glob(the_glob))
num_matched_files = len(matched_files)
print(f"Number of matched {ext} files: {num_matched_files}")
# Generate summary md files
if ext!=".md":
k = 0
for non_md_file in repo.glob(the_glob):
k += 1
if str(non_md_file).startswith(str(ignore_path)):
continue
generate_summary(non_md_file, memory, resummarise=resummarise, verbose=verbose)
if verbose:
print(f"Generated summary for a {ext} file: {k} of {num_matched_files} done.")
# Iterate over all files in the repo (including subdirectories)
print(f"Reading {ext} files")
i = 0
j = 0
for md_file in repo.glob(the_glob):
if str(md_file).startswith(str(ignore_path)):
j += 1
continue
i += 1
# Read the content of the file
yield read_file_to_document(md_file)
if verbose:
print(f"Read {i} files so far and ignored {j}: total: {num_matched_files}")
print(f"Read {i} and ignored {j} {ext} files.")
print("Read all files")
def read_file_to_document(md_file, split=False, metadata: dict = None):
try:
loader = UnstructuredFileLoader(md_file)
if split:
# only supported for some file types
docs = loader.load_and_split()
else:
docs = loader.load()
except ValueError as e:
if "file type is not supported in partition" in str(e):
# Convert the file to .txt and try again
txt_file = convert_to_txt(md_file)
loader = UnstructuredFileLoader(txt_file)
if split:
docs = loader.load_and_split()
else:
docs = loader.load()
os.remove(txt_file) # Remove the temporary .txt file after processing
else:
raise e
for doc in docs:
if metadata is not None:
doc.metadata.update(metadata)
return docs
def convert_to_txt(file_path):
file_dir, file_name = os.path.split(file_path)
file_base, file_ext = os.path.splitext(file_name)
txt_file = os.path.join(file_dir, f"{file_base}.txt")
shutil.copyfile(file_path, txt_file)
return txt_file
def code_prompt():
# create prompt to pass in to LLM
template = """
Summarise what the code does below. Use Markdown in your output with the following template:
# a title
summary of script purpose
## keywords
Comma seperated list of 3-4 keywords suitable for this code
## classes
A description of each class
## functions/methods
How the functions or methods of a class work including listing the Inputs and outputs for each function
## code examples of use
The code to summarise is here:
{txt}
"""
return PromptTemplate(
input_variables=["txt"],
template=template,
)
def text_prompt():
# create prompt to pass in to LLM
template = """
Summarise the text below, and add some keywords at the bottom to describe the overall purpose of the text.
The text to summarise is here:
{txt}
"""
return PromptTemplate(
input_variables=["txt"],
template=template,
)
# Function to summarise code from the OpenAI API
def generate_summary(a_file: pathlib.Path, memory, resummarise: bool=False, verbose: bool=False):
if a_file.is_dir():
raise ValueError(f"a_file must not be a directory: {a_file}")
new_file_name = a_file.with_suffix('.md')
if os.path.isfile(new_file_name) and not resummarise:
if verbose:
print(f"Skipping generating summary as found existing code summary file: {new_file_name}")
return
try:
with open(a_file, "r") as file:
file_text = file.read()
except Exception as e:
print(f"Error generating summary: {str(e)}")
return
if len(file_text) < 10:
if verbose:
print(f"Skipping generation as not enough information. Got: {file_text}")
return
document = Document(page_content=file_text, metadata = {"source": os.path.abspath(a_file)})
source_chunks = chunk_doc_to_docs([document], a_file.suffix)
code = True if str(a_file.suffix).lower() in CODE_EXTENSIONS else False
if code:
print("================================================")
print(f"Requesting code summary for {a_file} ")
print("================================================")
prompt = code_prompt()
else:
print("================================================")
print(f"Requesting text summary for {a_file} ")
print("================================================")
prompt = text_prompt()
num_chunks = len(source_chunks)
i=0
for chunk in source_chunks:
logging.info(f"Summarising chunk {i} of {num_chunks} of {a_file}")
i += 1
summary = my_llm.request_llm(
prompt.format(txt=chunk.page_content),
chat,
memory,
metadata={'task':'summarise_chunk'})
my_llm.save_to_file(new_file_name, summary + '\n\n', type = "a")
return pathlib.Path(new_file_name)
# Get source chunks from a repository
def get_source_docs(repo_path, extension, memory, ignore, resummarise, verbose):
source_chunks = []
for source in get_repo_docs(repo_path,
extension=extension,
memory=memory,
ignore=ignore,
resummarise=resummarise,
verbose=verbose):
splitter = choose_splitter(extension)
for chunk in splitter.split_text(source.page_content):
source_chunks.append(Document(page_content=chunk, metadata=source.metadata))
return source_chunks
def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0):
if extension == ".py":
return text_splitter.PythonCodeTextSplitter()
elif extension == ".md":
return text_splitter.MarkdownTextSplitter()
return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def setup_memory(config):
memory = PubSubChatMessageHistory("qna_documents")
if config.get('bucket_name', None) is not None:
memory.set_bucket(config.get('bucket_name'))
memory.load_vectorstore_memory()
if config['reindex']:
# Create a new Chroma DB
exts = '.md,.py'
if config['ext']:
exts = config['ext']
source_chunks = get_source_docs(config['repo'],
extension=exts,
memory=memory,
ignore=config['ignore'],
resummarise=config['resummarise'],
verbose=config['verbose'])
memory.save_vectorstore_memory(source_chunks, verbose=config['verbose'])
return memory
def document_to_dict(document):
return {
'page_content': document.page_content,
'metadata': document.metadata,
}
def process_input(user_input: str,
verbose: bool =True,
bucket_name: str = None,
chat_history = None):
# more only needed if you need to recreate the vectorstore which we wont with web app
config = {
'reindex': False,
'bucket_name': bucket_name
}
if verbose:
print(f"user_input: {user_input}")
print(f"process_input config: {config}")
logging.info(f"user_input: {user_input}")
logging.info(f"process_input config: {config}")
memory = setup_memory(config)
answer = memory.question_memory(user_input,
llm=chat,
verbose=verbose,
chat_history = chat_history)
response = {'result': 'No answer found'}
if answer is not None:
response = {'result': answer['result']}
if answer.get('source_documents') is not None:
source_documents = [document_to_dict(doc) for doc in answer['source_documents']]
response['source_documents'] = source_documents
else:
logging.info('No source documents found')
return response
def add_single_file(filename: str, bucket_name, verbose=False):
config = {
'reindex': False, # as we will trigger file summary directly
'bucket_name': bucket_name
}
filename = pathlib.Path(filename)
if not filename.is_file():
raise ValueError(f"Filename was not a valid file path: {filename}")
docs = read_file_to_document(filename)
chunks = chunk_doc_to_docs(docs, filename.suffix)
memory = setup_memory(config)
docs_output = []
chunk_length = len(chunks)
i = 0
for chunk in chunks:
logging.info(f"Uploading chunk {i} of size {chunk_length} for {filename.name}")
i+=1
memory.add_user_message(chunk.page_content,
metadata={"task": "singlefile load original",
"source": filename.name})
docs_output.append(chunk.page_content)
return docs_output
def summarise_single_file(filename: str, bucket_name, verbose=False):
config = {
'reindex': False, # as we will trigger file summary directly
'bucket_name': bucket_name
}
filename = pathlib.Path(filename)
if not filename.is_file():
raise ValueError(f"Filename was not a valid file path: {filename}")
memory = setup_memory(config)
summary_filename = generate_summary(filename,
memory,
resummarise=True,
verbose=verbose)
if not summary_filename:
return f"No summary generated for {str(filename)}"
documents = read_file_to_document(summary_filename)
chunks = chunk_doc_to_docs(documents, filename.suffix)
output_content = ""
for chunk in chunks:
memory.add_user_message(chunk.page_content,
metadata={"task": "singlefile load summary",
"source": filename.name})
output_content += chunk.page_content + "\n\n"
return output_content
def chunk_doc_to_docs(documents: list, extension: str = ".md"):
"""Turns a Document object into a list of many Document chunks"""
for document in documents:
source_chunks = []
splitter = choose_splitter(extension)
for chunk in splitter.split_text(document.page_content):
source_chunks.append(Document(page_content=chunk, metadata=document.metadata))
return source_chunks
def main(config):
memory = setup_memory(config)
while True:
print('\n\033[31m' + '=Ask a question. CTRL + C to quit.')
print ("=If I don't know, tell me the right answer so I can learn and answer more accurately next time" + '\033[m')
user_input = input()
print('\033[31m')
answer = memory.question_memory(user_input, llm=chat, verbose=config['verbose'])
if answer is not None:
if answer.get('source_documents') is not None:
print('\n== Document sources:')
i = 0
for doc in answer.get('source_documents'):
i += 1
print(f'-- Source {i}')
print(f' - page_content:\n {doc.page_content}')
if config['verbose']:
print(f' - metadata: \n{doc.metadata}')
print('\n================================')
print('== Answer:\n\n' + answer['result'])
else:
print('Sorry')
print('\033[m')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Chat with a GitHub repository",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("repo", help="The GitHub repository on local disk")
parser.add_argument("--reindex", action="store_true",
help="Whether to re-index the doc database that supply context to the Q&A")
parser.add_argument("--ext", help="Comma separated list of file extensions to include. Defaults to '.md,.py'")
parser.add_argument("--ignore", help="Directory to ignore file imports from. Defaults to 'env/'")
parser.add_argument("--resummarise", action="store_true", help="Recreate the code.md files describing the code")
parser.add_argument("--verbose", action="store_true", help="Include metadata such as sources in replies")
parser.add_argument("--bucket", help="A Google Cloud Storage bucket name e.g. ga://your-bucket-name")
args = parser.parse_args()
config = vars(args)
try:
main(config)
except KeyboardInterrupt:
print(' - User exit.')
sys.exit(1) | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.document_loaders.unstructured.UnstructuredFileLoader",
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain.PromptTemplate"
] | [((245, 272), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (260, 272), False, 'import sys, os, shutil\n'), ((667, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (677, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1227), 'pathlib.Path', 'pathlib.Path', (['repo_path'], {}), '(repo_path)\n', (1216, 1227), False, 'import pathlib\n'), ((3797, 3821), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (3810, 3821), False, 'import sys, os, shutil\n'), ((3848, 3875), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3864, 3875), False, 'import sys, os, shutil\n'), ((3891, 3933), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (3903, 3933), False, 'import sys, os, shutil\n'), ((3938, 3974), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (3953, 3974), False, 'import sys, os, shutil\n'), ((4521, 4579), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4535, 4579), False, 'from langchain import PromptTemplate\n'), ((4840, 4898), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4854, 4898), False, 'from langchain import PromptTemplate\n'), ((6905, 6932), 'pathlib.Path', 'pathlib.Path', (['new_file_name'], {}), '(new_file_name)\n', (6917, 6932), False, 'import pathlib\n'), ((7874, 7974), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (7918, 7974), True, 'import langchain.text_splitter as text_splitter\n'), ((8013, 8054), 'my_llm.langchain_class.PubSubChatMessageHistory', 'PubSubChatMessageHistory', (['"""qna_documents"""'], {}), "('qna_documents')\n", (8037, 8054), False, 'from my_llm.langchain_class import PubSubChatMessageHistory\n'), ((9411, 9452), 'logging.info', 'logging.info', (['f"""user_input: {user_input}"""'], {}), "(f'user_input: {user_input}')\n", (9423, 9452), False, 'import logging\n'), ((9457, 9504), 'logging.info', 'logging.info', (['f"""process_input config: {config}"""'], {}), "(f'process_input config: {config}')\n", (9469, 9504), False, 'import logging\n'), ((10381, 10403), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (10393, 10403), False, 'import pathlib\n'), ((11324, 11346), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (11336, 11346), False, 'import pathlib\n'), ((13800, 13928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with a GitHub repository"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Chat with a GitHub repository',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (13823, 13928), False, 'import argparse\n'), ((211, 236), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import sys, os, shutil\n'), ((2949, 2980), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['md_file'], {}), '(md_file)\n', (2971, 2980), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((5232, 5261), 'os.path.isfile', 'os.path.isfile', (['new_file_name'], {}), '(new_file_name)\n', (5246, 5261), False, 'import sys, os, shutil\n'), ((6551, 6617), 'logging.info', 'logging.info', (['f"""Summarising chunk {i} of {num_chunks} of {a_file}"""'], {}), "(f'Summarising chunk {i} of {num_chunks} of {a_file}')\n", (6563, 6617), False, 'import logging\n'), ((6824, 6886), 'my_llm.standards.save_to_file', 'my_llm.save_to_file', (['new_file_name', "(summary + '\\n\\n')"], {'type': '"""a"""'}), "(new_file_name, summary + '\\n\\n', type='a')\n", (6843, 6886), True, 'from my_llm import standards as my_llm\n'), ((7738, 7776), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (7774, 7776), True, 'import langchain.text_splitter as text_splitter\n'), ((10744, 10823), 'logging.info', 'logging.info', (['f"""Uploading chunk {i} of size {chunk_length} for {filename.name}"""'], {}), "(f'Uploading chunk {i} of size {chunk_length} for {filename.name}')\n", (10756, 10823), False, 'import logging\n'), ((7821, 7857), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (7855, 7857), True, 'import langchain.text_splitter as text_splitter\n'), ((10113, 10154), 'logging.info', 'logging.info', (['"""No source documents found"""'], {}), "('No source documents found')\n", (10125, 10154), False, 'import logging\n'), ((14903, 14914), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14911, 14914), False, 'import sys, os, shutil\n'), ((3351, 3383), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {}), '(txt_file)\n', (3373, 3383), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((3520, 3539), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (3529, 3539), False, 'import sys, os, shutil\n'), ((5830, 5853), 'os.path.abspath', 'os.path.abspath', (['a_file'], {}), '(a_file)\n', (5845, 5853), False, 'import sys, os, shutil\n'), ((7533, 7587), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (7541, 7587), False, 'from langchain.docstore.document import Document\n'), ((12585, 12641), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (12593, 12641), False, 'from langchain.docstore.document import Document\n')] |
import inspect
from storage.logger_config import logger
from langchain.tools import BaseTool
import langchain.tools as ltools
from langchain.agents.agent_toolkits import FileManagementToolkit
from langchain.agents import load_tools
import streamlit as st
import os
Local_dir = os.path.dirname(os.path.realpath(__file__))
class Ui_Tool(BaseTool):
name = 'Base_tool'
link = r'https://github.com/americium-241/Omnitool_UI/tree/master'
icon = '🔧 '
description = 'Description'
def _run(self,a):
"""This function should be overwrite when creating a tool and a docstring have to be given"""
logger.debug('You are in the Base tool execution and I inputed :',a)
return 'Success'
def _ui(self):
# Overwrite this function to add options to the tool, use streamlit_extra mui components
pass
# Your existing function to check if a class has required attributes
def has_required_attributes(cls):
"""Check that class possesses a name and description attribute"""
required_attributes = ['name', 'description']
try:
instance = cls(**{attr: "default_value" for attr in required_attributes})
return True
except Exception as e:
#print(f"Failed to instantiate {cls.__name__} due to: {e}")
return False
def make_pre_structured_tools():
"""Monitoring langchains.tools and keeping only tools without any mandatory arguments for initialisation"""
module = ltools
tool_class_names = [member for name, member in inspect.getmembers(module) if isinstance(member, list)][0]
# Retrieve each class using its name and check if it has the required attributes
classes = [getattr(module, class_name) for class_name in tool_class_names]
p_tools = [cl() for cl in classes if has_required_attributes(cl)]
pre_tools= []
toolkit_file = FileManagementToolkit(root_dir=Local_dir+"\\..\\workspace")
pre_tools.extend(toolkit_file.get_tools())
tools_name=[t.name for t in pre_tools]
for t in p_tools :
if t not in pre_tools and t.name != 'DuckDuckGo Results JSON' and t.name not in tools_name:
pre_tools.append(t)
requests_tools = load_tools(["requests_all"])
pre_tools.extend(requests_tools)
return pre_tools
Pre_Structured_Tool=make_pre_structured_tools()
| [
"langchain.agents.agent_toolkits.FileManagementToolkit",
"langchain.agents.load_tools"
] | [((296, 322), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (312, 322), False, 'import os\n'), ((1869, 1930), 'langchain.agents.agent_toolkits.FileManagementToolkit', 'FileManagementToolkit', ([], {'root_dir': "(Local_dir + '\\\\..\\\\workspace')"}), "(root_dir=Local_dir + '\\\\..\\\\workspace')\n", (1890, 1930), False, 'from langchain.agents.agent_toolkits import FileManagementToolkit\n'), ((2197, 2225), 'langchain.agents.load_tools', 'load_tools', (["['requests_all']"], {}), "(['requests_all'])\n", (2207, 2225), False, 'from langchain.agents import load_tools\n'), ((625, 694), 'storage.logger_config.logger.debug', 'logger.debug', (['"""You are in the Base tool execution and I inputed :"""', 'a'], {}), "('You are in the Base tool execution and I inputed :', a)\n", (637, 694), False, 'from storage.logger_config import logger\n'), ((1534, 1560), 'inspect.getmembers', 'inspect.getmembers', (['module'], {}), '(module)\n', (1552, 1560), False, 'import inspect\n')] |
import inspect
import os
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
# os.environ['OPENAI_API_BASE'] = "https://shale.live/v1"
os.environ['OPENAI_API_BASE'] = "http://localhost:8000/v1"
os.environ['OPENAI_API_KEY'] = "shale-/vOlxxgbDAD7f5"
_MODEL = 'OpenHermes-2.5-Mistral-7B'
_DEFAULT_SYSTEM = 'You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.'
def init_cache(dir=None):
if dir is None:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
filepath = calframe[1][1]
dir = os.path.dirname(os.path.realpath(filepath))
database_path = os.path.join(dir, '.langchain.db')
print('LLM cache: ', database_path)
langchain.llm_cache = SQLiteCache(database_path=database_path)
def ask_llm(input, system=_DEFAULT_SYSTEM, history=None):
if history is None or not isinstance(history, list):
history = []
conversations = [('system', system)] + history + [('human', '{input}')]
prompt = ChatPromptTemplate.from_messages(conversations)
llm = ChatOpenAI(temperature=0.7, max_tokens=512, model_name=_MODEL)
chain = prompt | llm | StrOutputParser()
return chain.invoke({'input': input})
| [
"langchain.schema.output_parser.StrOutputParser",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.SQLiteCache"
] | [((947, 981), 'os.path.join', 'os.path.join', (['dir', '""".langchain.db"""'], {}), "(dir, '.langchain.db')\n", (959, 981), False, 'import os\n'), ((1048, 1088), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'database_path'}), '(database_path=database_path)\n', (1059, 1088), False, 'from langchain.cache import SQLiteCache\n'), ((1316, 1363), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['conversations'], {}), '(conversations)\n', (1348, 1363), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((1374, 1436), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(512)', 'model_name': '_MODEL'}), '(temperature=0.7, max_tokens=512, model_name=_MODEL)\n', (1384, 1436), False, 'from langchain.chat_models import ChatOpenAI\n'), ((756, 778), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (776, 778), False, 'import inspect\n'), ((798, 833), 'inspect.getouterframes', 'inspect.getouterframes', (['curframe', '(2)'], {}), '(curframe, 2)\n', (820, 833), False, 'import inspect\n'), ((1464, 1481), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1479, 1481), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((898, 924), 'os.path.realpath', 'os.path.realpath', (['filepath'], {}), '(filepath)\n', (914, 924), False, 'import os\n')] |
from __future__ import annotations
import logging
from typing import Any, Dict, Iterable, List, Optional, Tuple
from zep_python import API_URL, NotFoundError, ZepClient
from zep_python.document import Document as ZepDocument
from zep_python.document import DocumentCollection
try:
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
except ImportError:
raise ImportError(
"Could not import langchain-core package. "
"Please install it with `pip install langchain-core`."
)
logger = logging.getLogger()
class ZepVectorStore(VectorStore):
"""`Zep` VectorStore.
Provides methods for adding texts or documents to a Zep Collection,
searching for similar documents, and deleting documents.
Search scores are calculated using cosine similarity normalized to [0, 1].
Args:
collection_name (str): The name of the collection in the Zep store.
description (Optional[str]): The description of the collection.
metadata (Optional[Dict[str, Any]]): The metadata to associate with the
collection.
zep_client (Optional[ZepClient]): The Zep client to use.
api_url (str): The URL of the Zep API. Defaults to "https://api.getzep.com".
Not required if passing in a ZepClient.
api_key (Optional[str]): The API key for the Zep API.
Not required if passing in a ZepClient.
"""
def __init__(
self,
collection_name: str,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
zep_client: Optional[ZepClient] = None,
api_url: Optional[str] = API_URL,
api_key: Optional[str] = None,
) -> None:
super().__init__()
if not collection_name:
raise ValueError(
"collection_name must be specified when using ZepVectorStore."
)
if zep_client is None:
self._client = ZepClient(api_url=api_url, api_key=api_key)
else:
self._client = zep_client
self.collection_name = collection_name
self.c_description = description
self.c_metadata = metadata
self._collection = self._load_collection()
def _load_collection(self) -> DocumentCollection:
"""
Load the collection from the Zep backend.
"""
try:
collection = self._client.document.get_collection(self.collection_name)
except NotFoundError:
logger.info(
f"Collection {self.collection_name} not found. Creating new collection."
)
collection = self._create_collection()
return collection
def _create_collection(self) -> DocumentCollection:
"""
Create a new collection in the Zep backend.
"""
collection = self._client.document.add_collection(
name=self.collection_name,
description=self.c_description,
metadata=self.c_metadata,
)
return collection
def _generate_documents_to_add(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None, # langchain spelling
document_ids: Optional[List[str]] = None,
) -> List[ZepDocument]:
documents: List[ZepDocument] = []
for i, d in enumerate(texts):
documents.append(
ZepDocument(
content=d,
metadata=metadatas[i] if metadatas else None,
document_id=document_ids[i] if document_ids else None,
)
)
return documents
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None, # langchain spelling
document_ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
document_ids: Optional list of document ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not self._collection:
raise ValueError(
"collection should be an instance of a Zep DocumentCollection"
)
documents = self._generate_documents_to_add(texts, metadatas, document_ids)
uuids = self._collection.add_documents(documents)
return uuids
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None, # langchain spelling
document_ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore."""
if not self._collection:
raise ValueError(
"collection should be an instance of a Zep DocumentCollection"
)
documents = self._generate_documents_to_add(texts, metadatas, document_ids)
uuids = await self._collection.aadd_documents(documents)
return uuids
def search(
self,
query: str,
search_type: str,
metadata_filter: Optional[Dict[str, Any]] = None,
k: int = 3,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query using specified search type."""
if search_type == "similarity":
return self.similarity_search(
query, k=k, metadata=metadata_filter, **kwargs
)
elif search_type == "mmr":
return self.max_marginal_relevance_search(
query, k=k, metadata_filter=metadata_filter, **kwargs
)
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity' or 'mmr'."
)
async def asearch(
self,
query: str,
search_type: str,
metadata_filter: Optional[Dict[str, Any]] = None,
k: int = 3,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query using specified search type."""
if search_type == "similarity":
return await self.asimilarity_search(
query, k=k, metadata=metadata_filter, **kwargs
)
elif search_type == "mmr":
return await self.amax_marginal_relevance_search(
query, k=k, metadata_filter=metadata_filter, **kwargs
)
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity' or 'mmr'."
)
def similarity_search(
self,
query: str,
k: int = 4,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query."""
results = self._similarity_search_with_relevance_scores(
query, k=k, metadata_filter=metadata, **kwargs
)
return [doc for doc, _ in results]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with distance."""
return self._similarity_search_with_relevance_scores(
query, k=k, metadata_filter=metadata, **kwargs
)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
metadata_filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Default similarity search with relevance scores.
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
metadata_filter: Optional, metadata filter
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 and
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
if not self._collection:
raise ValueError(
"collection should be an instance of a Zep DocumentCollection"
)
results = self._collection.search(
query, limit=k, metadata=metadata_filter, **kwargs
)
return [
(
Document(
page_content=doc.content,
metadata=doc.metadata or {},
),
doc.score or 0.0,
)
for doc in results
]
async def asimilarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
metadata_filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query."""
if not self._collection:
raise ValueError(
"collection should be an instance of a Zep DocumentCollection"
)
results = await self._collection.asearch(
query, limit=k, metadata=metadata_filter, **kwargs
)
return [
(
Document(
page_content=doc.content,
metadata=doc.metadata or {},
),
doc.score or 0.0,
)
for doc in results
]
async def asimilarity_search(
self,
query: str,
k: int = 4,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query."""
results = await self.asimilarity_search_with_relevance_scores(
query, k, metadata_filter=metadata, **kwargs
)
return [doc for doc, _ in results]
def max_marginal_relevance_search( # type: ignore # ignore inconsistent override
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
metadata_filter: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance reranking.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: (Unsupported) Number of Documents to fetch to pass to MMR
algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
metadata_filter: Optional, metadata to filter the resulting set of retrieved
docs
Returns:
List of Documents selected by maximal marginal relevance.
NOTE: Zep automatically tunes the number of results returned by the search prior
to reranking based on `k`. `fetch_k` is ignored.
"""
if not self._collection:
raise ValueError(
"collection should be an instance of a Zep DocumentCollection"
)
results = self._collection.search(
query,
limit=k,
metadata=metadata_filter,
search_type="mmr",
mmr_lambda=lambda_mult,
)
return [
Document(page_content=d.content, metadata=d.metadata or {}) for d in results
]
async def amax_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
metadata_filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance reranking.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: (Unsupported) Number of Documents to fetch to pass to MMR
algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
metadata_filter: Optional, metadata to filter the resulting set of retrieved
docs
Returns:
List of Documents selected by maximal marginal relevance.
NOTE: Zep automatically tunes the number of results returned by the
search prior to reranking based on `k`. `fetch_k` is ignored.
"""
if not self._collection:
raise ValueError(
"collection should be an instance of a Zep DocumentCollection"
)
results = await self._collection.asearch(
query,
limit=k,
metadata=metadata_filter,
search_type="mmr",
mmr_lambda=lambda_mult,
)
return [
Document(page_content=d.content, metadata=d.metadata or {}) for d in results
]
@classmethod
def from_texts( # type: ignore # ignore inconsistent override
cls,
texts: List[str],
collection_name: str,
metadatas: Optional[List[dict]] = None,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
zep_client: Optional[ZepClient] = None,
api_url: Optional[str] = API_URL,
api_key: Optional[str] = None,
**kwargs: Any,
) -> ZepVectorStore:
"""
Class method that returns a ZepVectorStore instance initialized from texts.
If the collection does not exist, it will be created.
Args:
texts (List[str]): The list of texts to add to the vectorstore.
collection_name (str): The name of the collection in the Zep store.
metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata
associated with the texts.
description (Optional[str]): The description of the collection.
metadata (Optional[Dict[str, Any]]): The metadata to associate with the
collection.
zep_client (Optional[ZepClient]): The Zep client to use.
api_url (Optional[str]): The URL of the Zep API. Defaults to
"https://api.getzep.com". Not required if passing in a ZepClient.
api_key (Optional[str]): The API key for the Zep API. Not required if
passing in a ZepClient.
**kwargs: Additional parameters specific to the vectorstore.
Returns:
ZepVectorStore: An instance of ZepVectorStore.
"""
vecstore = cls(
collection_name,
description=description,
metadata=metadata,
zep_client=zep_client,
api_url=api_url,
api_key=api_key,
)
vecstore.add_texts(texts, metadatas)
return vecstore
@classmethod
async def afrom_texts( # type: ignore # ignore inconsistent override
cls,
texts: List[str],
collection_name: str,
metadatas: Optional[List[dict]] = None,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
zep_client: Optional[ZepClient] = None,
api_url: Optional[str] = API_URL,
api_key: Optional[str] = None,
**kwargs: Any,
) -> ZepVectorStore:
"""
Class method that asynchronously returns a ZepVectorStore instance
initialized from texts.
If the collection does not exist, it will be created.
Args:
texts (List[str]): The list of texts to add to the vectorstore.
collection_name (str): The name of the collection in the Zep store.
metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata
associated with the texts.
description (Optional[str]): The description of the collection.
metadata (Optional[Dict[str, Any]]): The metadata to associate with the
collection.
zep_client (Optional[ZepClient]): The Zep client to use.
api_url (Optional[str]): The URL of the Zep API. Defaults to
"https://api.getzep.com". Not required if passing in a ZepClient.
api_key (Optional[str]): The API key for the Zep API. Not required if
passing in a ZepClient.
**kwargs: Additional parameters specific to the vectorstore.
Returns:
ZepVectorStore: An instance of ZepVectorStore.
"""
vecstore = cls(
collection_name,
description=description,
metadata=metadata,
zep_client=zep_client,
api_url=api_url,
api_key=api_key,
)
await vecstore.aadd_texts(texts, metadatas)
return vecstore
@classmethod
def from_documents( # type: ignore # ignore inconsistent override
cls,
documents: List[Document],
**kwargs: Any,
) -> ZepVectorStore:
"""Return VectorStore initialized from documents."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, metadatas=metadatas, **kwargs)
@classmethod
async def afrom_documents( # type: ignore # ignore inconsistent override
cls,
documents: List[Document],
**kwargs: Any,
) -> ZepVectorStore:
"""Asynchronously return VectorStore initialized from documents."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return await cls.afrom_texts(texts, metadatas=metadatas, **kwargs)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by Zep vector UUIDs.
Parameters
----------
ids : Optional[List[str]]
The UUIDs of the vectors to delete.
Raises
------
ValueError
If no UUIDs are provided.
"""
if ids is None or len(ids) == 0:
raise ValueError("No uuids provided to delete.")
if self._collection is None:
raise ValueError("No collection name provided.")
for u in ids:
self._collection.delete_document(u)
| [
"langchain_core.documents.Document"
] | [((565, 584), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (582, 584), False, 'import logging\n'), ((1990, 2033), 'zep_python.ZepClient', 'ZepClient', ([], {'api_url': 'api_url', 'api_key': 'api_key'}), '(api_url=api_url, api_key=api_key)\n', (1999, 2033), False, 'from zep_python import API_URL, NotFoundError, ZepClient\n'), ((12161, 12220), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'd.content', 'metadata': '(d.metadata or {})'}), '(page_content=d.content, metadata=d.metadata or {})\n', (12169, 12220), False, 'from langchain_core.documents import Document\n'), ((13967, 14026), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'd.content', 'metadata': '(d.metadata or {})'}), '(page_content=d.content, metadata=d.metadata or {})\n', (13975, 14026), False, 'from langchain_core.documents import Document\n'), ((3439, 3566), 'zep_python.document.Document', 'ZepDocument', ([], {'content': 'd', 'metadata': '(metadatas[i] if metadatas else None)', 'document_id': '(document_ids[i] if document_ids else None)'}), '(content=d, metadata=metadatas[i] if metadatas else None,\n document_id=document_ids[i] if document_ids else None)\n', (3450, 3566), True, 'from zep_python.document import Document as ZepDocument\n'), ((8977, 9040), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'doc.content', 'metadata': '(doc.metadata or {})'}), '(page_content=doc.content, metadata=doc.metadata or {})\n', (8985, 9040), False, 'from langchain_core.documents import Document\n'), ((9799, 9862), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'doc.content', 'metadata': '(doc.metadata or {})'}), '(page_content=doc.content, metadata=doc.metadata or {})\n', (9807, 9862), False, 'from langchain_core.documents import Document\n')] |
import os
import json
from typing import List
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from supabase.client import Client, create_client
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.tools import StructuredTool
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
import langchain
load_dotenv()
# Set debug to True to see A LOT of details of langchain's inner workings
# langchain.debug = True
# The name of the table in Supabase, where the vectors are stored
matchVectorFunctionName = "match_embeddings"
# Create the supabase client
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
class ToolInputSchema(BaseModel):
question: str = Field(..., description="A fully formed question.")
class KnowledgeAnswer(BaseModel):
answer: str = Field(..., description="The answer to the question.")
sources: List[str] = Field(
...,
description="The sources which contributed to the answer.",
)
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.3)
prompt_msgs = [
SystemMessagePromptTemplate.from_template(
"""You're an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.
Context:
{context}"""
),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(prompt_msgs)
chain = create_structured_output_chain(KnowledgeAnswer, llm, prompt)
def get_answer(question: str) -> str:
try:
vectors = OpenAIEmbeddings().embed_documents([question])
embeddings = supabase.rpc(
matchVectorFunctionName, dict(query_embedding=vectors[0], match_count=7)
).execute()
print(f"⚡ Retrieved {len(embeddings.data)} vectors from Supabase:")
for entry in embeddings.data:
print("🔖 Title:", entry["metadata"]["title"])
print("🌐 Source:", entry["metadata"]["source"])
print("📊 Similarity:", entry["similarity"])
print("📄 Content:", entry["content"].replace("\n", " ")[:100] + "...")
print("-" * 50)
result = chain.run(context=json.dumps(embeddings.data), question=question)
print("📝 Result of knowledge extraction chain:", result)
return f"""Answer: {result.answer}
Sources: {json.dumps(result.sources)}
"""
except Exception as e:
print(e)
return "The wiki knowledgebase is currently not available. We are working on it. Tell the user to use the wiki directly. https://www.defichainwiki.com/"
description = """Use this if you need to answer any question about DeFiChain which does not require live-data. Make sure to include the source of the answer in your response."""
wikiTool = StructuredTool(
name="defichain_wiki_knowledge",
description=description,
func=get_answer,
args_schema=ToolInputSchema,
)
if __name__ == "__main__":
while True:
question = input(
"Ask something, that can be answered using information from DeFiChainWiki: "
)
print("✅", get_answer(question))
| [
"langchain.chains.openai_functions.create_structured_output_chain",
"langchain.tools.StructuredTool",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((528, 541), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (539, 541), False, 'from dotenv import load_dotenv\n'), ((799, 824), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (808, 824), False, 'import os\n'), ((840, 865), 'os.getenv', 'os.getenv', (['"""SUPABASE_KEY"""'], {}), "('SUPABASE_KEY')\n", (849, 865), False, 'import os\n'), ((885, 926), 'supabase.client.create_client', 'create_client', (['SUPABASE_URL', 'SUPABASE_KEY'], {}), '(SUPABASE_URL, SUPABASE_KEY)\n', (898, 926), False, 'from supabase.client import Client, create_client\n'), ((1269, 1328), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-16k"""', 'temperature': '(0.3)'}), "(model_name='gpt-3.5-turbo-16k', temperature=0.3)\n", (1279, 1328), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1687, 1732), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['prompt_msgs'], {}), '(prompt_msgs)\n', (1719, 1732), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1742, 1802), 'langchain.chains.openai_functions.create_structured_output_chain', 'create_structured_output_chain', (['KnowledgeAnswer', 'llm', 'prompt'], {}), '(KnowledgeAnswer, llm, prompt)\n', (1772, 1802), False, 'from langchain.chains.openai_functions import create_structured_output_chain\n'), ((3106, 3228), 'langchain.tools.StructuredTool', 'StructuredTool', ([], {'name': '"""defichain_wiki_knowledge"""', 'description': 'description', 'func': 'get_answer', 'args_schema': 'ToolInputSchema'}), "(name='defichain_wiki_knowledge', description=description,\n func=get_answer, args_schema=ToolInputSchema)\n", (3120, 3228), False, 'from langchain.tools import StructuredTool\n'), ((983, 1033), 'pydantic.Field', 'Field', (['...'], {'description': '"""A fully formed question."""'}), "(..., description='A fully formed question.')\n", (988, 1033), False, 'from pydantic import BaseModel, Field\n'), ((1088, 1141), 'pydantic.Field', 'Field', (['...'], {'description': '"""The answer to the question."""'}), "(..., description='The answer to the question.')\n", (1093, 1141), False, 'from pydantic import BaseModel, Field\n'), ((1167, 1237), 'pydantic.Field', 'Field', (['...'], {'description': '"""The sources which contributed to the answer."""'}), "(..., description='The sources which contributed to the answer.')\n", (1172, 1237), False, 'from pydantic import BaseModel, Field\n'), ((1350, 1610), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""'], {}), '(\n """You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""\n )\n', (1391, 1610), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1620, 1674), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (1660, 1674), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1870, 1888), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1886, 1888), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2493, 2520), 'json.dumps', 'json.dumps', (['embeddings.data'], {}), '(embeddings.data)\n', (2503, 2520), False, 'import json\n'), ((2684, 2710), 'json.dumps', 'json.dumps', (['result.sources'], {}), '(result.sources)\n', (2694, 2710), False, 'import json\n')] |
####################################################################################
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################
# Author: Adam Paternostro
# Summary: Use Google Search along with text-bison for Langchan example
# To setup your environemtn
# python3 -m venv .venv
# source .venv/bin/activate
# pip install --only-binary :all: greenlet
# pip install langchain pip install langchain==0.0.307
# pip install google-cloud-aiplatform
# pip install streamlit==1.27.2
# pip install python-dotenv==1.0.0
# pip install google-api-python-client==2.100.0
# pip install numexpr==2.8.6
# pip install youtube_search==2.1.2
# run it: python sample-prompt-agent-serper.py
# deactivate
# update or install the necessary libraries
# import libraries
import json
import langchain
from langchain.llms import VertexAI
from langchain.agents import load_tools, initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.tools import Tool
from langchain.tools import YouTubeSearchTool
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.utilities import GoogleSerperAPIWrapper
from langchain.chains import LLMMathChain
from dotenv import load_dotenv
import streamlit as st
import os
load_dotenv()
llm = VertexAI(
model_name="text-bison@001",
max_output_tokens=1024,
temperature=0.25,
top_p=0,
top_k=1,
verbose=True,
)
tools = load_tools(["google-serper", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
#agent.run("Who is the current presidents wfie? What is their current age raised multiplied by 5?")
agent.run("""Get a list of NYC events for tonight and return the results in the following JSON format""") | [
"langchain.agents.initialize_agent",
"langchain.agents.load_tools",
"langchain.llms.VertexAI"
] | [((1859, 1872), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1870, 1872), False, 'from dotenv import load_dotenv\n'), ((1881, 1997), 'langchain.llms.VertexAI', 'VertexAI', ([], {'model_name': '"""text-bison@001"""', 'max_output_tokens': '(1024)', 'temperature': '(0.25)', 'top_p': '(0)', 'top_k': '(1)', 'verbose': '(True)'}), "(model_name='text-bison@001', max_output_tokens=1024, temperature=\n 0.25, top_p=0, top_k=1, verbose=True)\n", (1889, 1997), False, 'from langchain.llms import VertexAI\n'), ((2029, 2079), 'langchain.agents.load_tools', 'load_tools', (["['google-serper', 'llm-math']"], {'llm': 'llm'}), "(['google-serper', 'llm-math'], llm=llm)\n", (2039, 2079), False, 'from langchain.agents import load_tools, initialize_agent, AgentType\n'), ((2089, 2168), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (2105, 2168), False, 'from langchain.agents import load_tools, initialize_agent, AgentType\n')] |
import django
django.setup()
from sefaria.model.text import Ref, library
import re
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage, SystemMessage
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
from functools import reduce
from util.sefaria_specific import get_raw_ref_text
import typer
from tqdm import tqdm
import csv
def get_topics_for_title(title: str, lang: str):
index = library.get_index(title)
rows = []
for segment_oref in tqdm(index.all_section_refs()[:20]):
print('-----')
print(segment_oref.normal())
topics = get_topics_for_tref(segment_oref, lang)
rows += [{"Ref": segment_oref.normal(), "Text": get_raw_ref_text(segment_oref, lang), "Topics": ", ".join(topics)}]
with open("output/Pri Eitz Chaim Topics.csv", "w") as fout:
cout = csv.DictWriter(fout, ['Ref', 'Text', "Topics"])
cout.writeheader()
cout.writerows(rows)
def get_topics_for_tref(oref: Ref, lang: str):
text = get_raw_ref_text(oref, lang)
return get_raw_topics(text, lang)
def get_raw_topics(text, lang):
short_to_long_lang = {
"he": "Hebrew", "en": "English"
}
examples_by_lang = {
"he":
"<topic>תרומה</topic>\n"
"<topic>פרשת נח</topic>\n"
"<topic>אברהם</topic>\n"
"<topic>שבת</topic>\n",
"en":
"<topic>Teruma</topic>\n"
"<topic>Parashat Noach</topic>\n"
"<topic>Abraham</topic>\n"
"<topic>Shabbat</topic>\n"
}
system_message = SystemMessage(content=
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n"
"<task>\n"
"Output list of high-level topics discussed by the input\n"
"Topics should be important enough that they would warrant an entry in the index in the back of a book\n"
"Each topic should be wrapped in <topic> tags\n"
"Topics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\n"
"Citations are not topics. E.g. Genesis 1:4 is not a topic\n"
"Topics should be written assuming a Torah context. Phrases like \"Torah perspective\", \"in Judaism\", \"in the Torah\" and \"Biblical Narrative\" should not appear in a topic.\n"
f"Topics should be written in {short_to_long_lang[lang]}."
"</task>"
"<examples>\n"
f"{examples_by_lang[lang]}"
"</examples>\n"
"<negative_examples>\n"
"<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n"
"<topic>Opinions on how to shake lulav</topic>\n"
"</negative_examples>"
)
user_prompt = PromptTemplate.from_template("# Input\n{text}")
human_message = HumanMessage(content=user_prompt.format(text=text))
# llm = ChatOpenAI(model="gpt-4", temperature=0)
llm = ChatAnthropic(model="claude-2", temperature=0)
response = llm([system_message, human_message])
# print('---')
# human_refine = HumanMessage(content="Of the topics above, list the most fundamental topics for understanding the source text. Exclude topics that are very specific.")
# response2 = llm([system_message, human_message, response, human_refine])
# human_breakup = HumanMessage(content="Of the topics above, break up complex topics into simpler topics.\n"
# "<examples>\n"
# "<topic>הלכות מזוזה בבית כנסת</topic> should become <topic>מזוזה</topic> and <topic>בית כנסה</topic>\n"
# "<topic>שאלה בדין תקיעת שופר ביום כיפור</topic> should become <topic>תקיעת שופר</topic> and <topic>יום כיפור</topic>\n"
# "<topic>הלכות עירוב</topic> should remain unchanged."
# "</examples>")
#
# response3 = llm([system_message, human_message, response, human_refine, response2, human_breakup])
topics = reduce(lambda a, b: a + [b.group(1).strip()], re.finditer(r"<topic>(.+?)</topic>", response.content), [])
return topics
if __name__ == '__main__':
typer.run(get_topics_for_title)
| [
"langchain.chat_models.ChatAnthropic",
"langchain.prompts.PromptTemplate.from_template",
"langchain.schema.SystemMessage",
"langchain.cache.SQLiteCache"
] | [((14, 28), 'django.setup', 'django.setup', ([], {}), '()\n', (26, 28), False, 'import django\n'), ((358, 400), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (369, 400), False, 'from langchain.cache import SQLiteCache\n'), ((591, 615), 'sefaria.model.text.library.get_index', 'library.get_index', (['title'], {}), '(title)\n', (608, 615), False, 'from sefaria.model.text import Ref, library\n'), ((1176, 1204), 'util.sefaria_specific.get_raw_ref_text', 'get_raw_ref_text', (['oref', 'lang'], {}), '(oref, lang)\n', (1192, 1204), False, 'from util.sefaria_specific import get_raw_ref_text\n'), ((1743, 2759), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'f"""You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n<task>\nOutput list of high-level topics discussed by the input\nTopics should be important enough that they would warrant an entry in the index in the back of a book\nEach topic should be wrapped in <topic> tags\nTopics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\nCitations are not topics. E.g. Genesis 1:4 is not a topic\nTopics should be written assuming a Torah context. Phrases like "Torah perspective", "in Judaism", "in the Torah" and "Biblical Narrative" should not appear in a topic.\nTopics should be written in {short_to_long_lang[lang]}.</task><examples>\n{examples_by_lang[lang]}</examples>\n<negative_examples>\n<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n<topic>Opinions on how to shake lulav</topic>\n</negative_examples>"""'}), '(content=\n f"""You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n<task>\nOutput list of high-level topics discussed by the input\nTopics should be important enough that they would warrant an entry in the index in the back of a book\nEach topic should be wrapped in <topic> tags\nTopics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\nCitations are not topics. E.g. Genesis 1:4 is not a topic\nTopics should be written assuming a Torah context. Phrases like "Torah perspective", "in Judaism", "in the Torah" and "Biblical Narrative" should not appear in a topic.\nTopics should be written in {short_to_long_lang[lang]}.</task><examples>\n{examples_by_lang[lang]}</examples>\n<negative_examples>\n<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n<topic>Opinions on how to shake lulav</topic>\n</negative_examples>"""\n )\n', (1756, 2759), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((3468, 3515), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""# Input\n{text}"""'], {}), "('# Input\\n{text}')\n", (3496, 3515), False, 'from langchain.prompts import PromptTemplate\n'), ((3652, 3698), 'langchain.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model': '"""claude-2"""', 'temperature': '(0)'}), "(model='claude-2', temperature=0)\n", (3665, 3698), False, 'from langchain.chat_models import ChatAnthropic\n'), ((4941, 4972), 'typer.run', 'typer.run', (['get_topics_for_title'], {}), '(get_topics_for_title)\n', (4950, 4972), False, 'import typer\n'), ((1011, 1058), 'csv.DictWriter', 'csv.DictWriter', (['fout', "['Ref', 'Text', 'Topics']"], {}), "(fout, ['Ref', 'Text', 'Topics'])\n", (1025, 1058), False, 'import csv\n'), ((4829, 4882), 're.finditer', 're.finditer', (['"""<topic>(.+?)</topic>"""', 'response.content'], {}), "('<topic>(.+?)</topic>', response.content)\n", (4840, 4882), False, 'import re\n'), ((864, 900), 'util.sefaria_specific.get_raw_ref_text', 'get_raw_ref_text', (['segment_oref', 'lang'], {}), '(segment_oref, lang)\n', (880, 900), False, 'from util.sefaria_specific import get_raw_ref_text\n')] |
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
#fix Error: module 'langchain' has no attribute 'verbose'
import langchain
langchain.verbose = False
class Chatbot:
def __init__(self, model_name, temperature, vectors):
self.model_name = model_name
self.temperature = temperature
self.vectors = vectors
qa_template = """
You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.
If you don't know the answer, just say you don't know. Do NOT try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Use as much detail as possible when responding.
context: {context}
=========
question: {question}
======
"""
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ])
def conversational_chat(self, query):
"""
Start a conversational chat with a model via Langchain
"""
llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)
retriever = self.vectors.as_retriever()
chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})
chain_input = {"question": query, "chat_history": st.session_state["history"]}
result = chain(chain_input)
st.session_state["history"].append((query, result["answer"]))
#count_tokens_chain(chain, chain_input)
return result["answer"]
def count_tokens_chain(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')
return result
| [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.prompts.prompt.PromptTemplate",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI"
] | [((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')] |
"""
A simple CUI application to visualize and query a customer database using the `textual` package.
"""
from dataclasses import dataclass
import langchain
from langchain.cache import SQLiteCache
from langchain.llms import OpenAI
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Button, DataTable, Footer, Header, Input
from llm_strategy import llm_strategy
langchain.llm_cache = SQLiteCache()
base_llm = OpenAI(max_tokens=1024)
@llm_strategy(base_llm)
@dataclass
class Customer:
key: str
first_name: str
last_name: str
birthdate: str
address: str
@property
def age(self: "Customer") -> int:
"""Return the current age of the customer.
This is a computed property based on `birthdate` and the current year (2022).
"""
raise NotImplementedError()
@dataclass
class CustomerDatabase:
customers: list[Customer]
def find_customer_key(self: "CustomerDatabase", query: str) -> list[str]:
"""Find the keys of the customers that match a natural language query best (sorted by closeness to the match).
We support semantic queries instead of SQL, so we can search for things like
"the customer that was born in 1990".
Args:
query: Natural language query
Returns:
The index of the best matching customer in the database.
"""
raise NotImplementedError()
def load(self: "CustomerDatabase"):
"""Load the customer database from a file."""
raise NotImplementedError()
def store(self: "CustomerDatabase"):
"""Store the customer database to a file."""
raise NotImplementedError()
@llm_strategy(base_llm)
@dataclass
class MockCustomerDatabase(CustomerDatabase):
def load(self):
self.customers = self.create_mock_customers(10)
def store(self):
pass
@staticmethod
def create_mock_customers(num_customers: int = 1) -> list[Customer]:
"""
Create mock customers with believable data (our customers are world citizens).
"""
raise NotImplementedError()
class CustomerDatabaseApp(App):
"""A simple textual application to visualize and query a customer database.
We show all the customers in a table and allow the user to query the database using natural language
in a search box at the bottom of the screen.
"""
PRIORITY_BINDINGS = False
BINDINGS = [("q", "quit", "Quit the application"), ("s", "screenshot", "Take a screenshot")]
database: CustomerDatabase = MockCustomerDatabase([])
data_table = DataTable(id="customer_table")
search_box = Input(id="search_box", placeholder="Search for a customer (use any kind of query")
footer_bar = Horizontal(search_box)
def on_mount(self) -> None:
self.database.load()
self.data_table.add_columns("First Name", "Last Name", "Birthdate", "Address", "Age")
self.search("")
def compose(self) -> ComposeResult:
self.footer_bar.styles.dock = "bottom"
self.footer_bar.styles.width = "100%"
self.footer_bar.styles.height = 4
self.data_table.styles.height = "auto"
self.data_table.styles.width = "100%"
self.screen.styles.height = "100%"
self.search_box.styles.width = "100%"
yield Header()
yield self.footer_bar
yield Footer()
yield self.data_table
def search(self, query: str):
"""Search the customer database using a natural language query."""
self.data_table.clear()
if not query:
for customer in self.database.customers:
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
else:
keys = self.database.find_customer_key(query)
for key in keys:
customers_for_key = [customer for customer in self.database.customers if customer.key == key]
assert len(customers_for_key) == 1
customer = customers_for_key[0]
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button is self.exit_button:
self.exit()
def on_input_submitted(self, event: Input.Submitted) -> None:
if event.input is self.search_box:
self.search(event.value)
if __name__ == "__main__":
app = CustomerDatabaseApp()
app.run()
| [
"langchain.llms.OpenAI",
"langchain.cache.SQLiteCache"
] | [((447, 460), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (458, 460), False, 'from langchain.cache import SQLiteCache\n'), ((472, 495), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(1024)'}), '(max_tokens=1024)\n', (478, 495), False, 'from langchain.llms import OpenAI\n'), ((499, 521), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (511, 521), False, 'from llm_strategy import llm_strategy\n'), ((1731, 1753), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (1743, 1753), False, 'from llm_strategy import llm_strategy\n'), ((2643, 2673), 'textual.widgets.DataTable', 'DataTable', ([], {'id': '"""customer_table"""'}), "(id='customer_table')\n", (2652, 2673), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2691, 2778), 'textual.widgets.Input', 'Input', ([], {'id': '"""search_box"""', 'placeholder': '"""Search for a customer (use any kind of query"""'}), "(id='search_box', placeholder=\n 'Search for a customer (use any kind of query')\n", (2696, 2778), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2791, 2813), 'textual.containers.Horizontal', 'Horizontal', (['search_box'], {}), '(search_box)\n', (2801, 2813), False, 'from textual.containers import Horizontal\n'), ((3369, 3377), 'textual.widgets.Header', 'Header', ([], {}), '()\n', (3375, 3377), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((3422, 3430), 'textual.widgets.Footer', 'Footer', ([], {}), '()\n', (3428, 3430), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n')] |
import langchain
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.retrievers import BM25Retriever, EnsembleRetriever
from langchain.vectorstores import FAISS
langchain.verbose = True
load_dotenv()
# ChatGPTで生成した架空の人名とプロフィール
texts = [
"""Name: Zephyrina Bluemoon
Profile: Zephyrina Bluemoon is an astrophysicist who was awarded the Nobel Prize in Physics in 2040. His research on dark matter and multidimensional universes has led to the development of a new cosmological theory.
""",
"""Name: Quill Solstice
Profile: Quill Solstice is an internationally renowned environmental activist, working on climate change and biodiversity conservation. His initiatives have received widespread support, especially among the youth around the world.
""",
"""Name: Seraphim Vortex
Profile: Seraphim Vortex is a globally acclaimed pianist, whose performances are often described as "the voice of nature". Through her classical music, she conveys a message of environmental preservation to the world.
""",
"""Name: Eclipse Stardust
Profile: Eclipse Stardust is an AI developer known for her research in autonomous drones. Her drone technology has been used in disaster rescue and environmental surveys, saving many lives.
""",
"""Name: Celestia Rainbow
Profile: Celestia Rainbow is a world-famous novelist, and her works have been translated into more than 30 languages. Her novels, characterized by a deep understanding of humanity and delicate portrayals of the human heart, have received international acclaim.
""",
]
# 用意したデータをFAISSで検索する準備
embeddings = OpenAIEmbeddings()
db = FAISS.from_texts(texts, embeddings)
faiss_retriever = db.as_retriever(search_kwargs={"k": 1})
# 用意したデータをBM25で検索する準備
bm25_retriever = BM25Retriever.from_texts(texts, k=1)
# 2つのRetrieverを組み合わせる
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]
)
# 「関連する文書を検索 => LLMに回答を生成させる」を実行する「RetrievalQA」を準備
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
qa_chain = RetrievalQA.from_chain_type(
llm=chat, chain_type="stuff", retriever=ensemble_retriever
)
query = "Zephyrina Bluemoonさんについて教えてください。"
result = qa_chain.run(query)
print(result)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.chat_models.ChatOpenAI",
"langchain.retrievers.BM25Retriever.from_texts",
"langchain.vectorstores.FAISS.from_texts",
"langchain.retrievers.EnsembleRetriever",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((325, 338), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (336, 338), False, 'from dotenv import load_dotenv\n'), ((1707, 1725), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1723, 1725), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1731, 1766), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1747, 1766), False, 'from langchain.vectorstores import FAISS\n'), ((1865, 1901), 'langchain.retrievers.BM25Retriever.from_texts', 'BM25Retriever.from_texts', (['texts'], {'k': '(1)'}), '(texts, k=1)\n', (1889, 1901), False, 'from langchain.retrievers import BM25Retriever, EnsembleRetriever\n'), ((1946, 2034), 'langchain.retrievers.EnsembleRetriever', 'EnsembleRetriever', ([], {'retrievers': '[bm25_retriever, faiss_retriever]', 'weights': '[0.5, 0.5]'}), '(retrievers=[bm25_retriever, faiss_retriever], weights=[\n 0.5, 0.5])\n', (1963, 2034), False, 'from langchain.retrievers import BM25Retriever, EnsembleRetriever\n'), ((2095, 2148), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (2105, 2148), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2160, 2252), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'chain_type': '"""stuff"""', 'retriever': 'ensemble_retriever'}), "(llm=chat, chain_type='stuff', retriever=\n ensemble_retriever)\n", (2187, 2252), False, 'from langchain.chains import RetrievalQA\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUMBER_ID = "<Phone number ID from your WhatsApp API Setup>"
def __init__(self):
self.headers = {
"Authorization": f"Bearer {self.WHATSAPP_API_TOKEN}",
"Content-Type": "application/json",
}
self.API_URL = self.API_URL + self.WHATSAPP_CLOUD_NUMBER_ID
def send_text_message(self,message, phone_number):
payload = {
"messaging_product": 'whatsapp',
"to": phone_number,
"type": "text",
"text": {
"preview_url": False,
"body": message
}
}
response = requests.post(f"{self.API_URL}/messages", json=payload,headers=self.headers)
print(response.status_code)
assert response.status_code == 200, "Error sending message"
return response.status_code
os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>"
llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llm = Replicate(
model=llama2_13b_chat,
model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500}
)
client = WhatsAppClient()
app = Flask(__name__)
@app.route("/")
def hello_llama():
return "<p>Hello Llama 2</p>"
@app.route('/msgrcvd', methods=['POST', 'GET'])
def msgrcvd():
message = request.args.get('message')
#client.send_template_message("hello_world", "en_US", "14086745477")
answer = llm(message)
print(message)
print(answer)
client.send_text_message(llm(message), "14086745477")
return message + "<p/>" + answer
| [
"langchain.llms.Replicate"
] | [((1502, 1609), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1511, 1609), False, 'from langchain.llms import Replicate\n'), ((1647, 1662), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1652, 1662), False, 'from flask import Flask\n'), ((1815, 1842), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (1831, 1842), False, 'from flask import request\n'), ((1101, 1178), 'requests.post', 'requests.post', (['f"""{self.API_URL}/messages"""'], {'json': 'payload', 'headers': 'self.headers'}), "(f'{self.API_URL}/messages', json=payload, headers=self.headers)\n", (1114, 1178), False, 'import requests\n')] |
import langchain
from langchain.cache import InMemoryCache
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
langchain.llm_cache = InMemoryCache()
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
if __name__ == "__main__":
# Run the chain only specifying the input variable.
print(chain.run("colorful socks"))
| [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.cache.InMemoryCache",
"langchain.llms.OpenAI"
] | [((199, 214), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (212, 214), False, 'from langchain.cache import InMemoryCache\n'), ((223, 246), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (229, 246), False, 'from langchain.llms import OpenAI\n'), ((256, 372), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (270, 372), False, 'from langchain.prompts import PromptTemplate\n'), ((389, 421), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (397, 421), False, 'from langchain.chains import LLMChain\n')] |
import langchain
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from steamship import File, Task
from steamship.invocable import PackageService, post
from steamship_langchain.cache import SteamshipCache
from steamship_langchain.llms import OpenAI
class SummarizeAudioPackage(PackageService):
def __init__(self, **kwargs):
super().__init__(**kwargs)
langchain.llm_cache = SteamshipCache(client=self.client)
self.llm = OpenAI(client=self.client, cache=True)
@post("summarize_file")
def summarize_file(self, file_handle: str) -> str:
file = File.get(self.client, handle=file_handle)
text_splitter = CharacterTextSplitter()
texts = []
for block in file.blocks:
texts.extend(text_splitter.split_text(block.text))
docs = [Document(page_content=t) for t in texts]
chain = load_summarize_chain(self.llm, chain_type="map_reduce")
return chain.run(docs)
@post("summarize_audio_file")
def summarize_audio_file(self, file_handle: str) -> Task[str]:
transcriber = self.client.use_plugin("whisper-s2t-blockifier")
audio_file = File.get(self.client, handle=file_handle)
transcribe_task = audio_file.blockify(plugin_instance=transcriber.handle)
return self.invoke_later(
"summarize_file",
wait_on_tasks=[transcribe_task],
arguments={"file_handle": audio_file.handle},
)
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chains.summarize.load_summarize_chain",
"langchain.docstore.document.Document"
] | [((613, 635), 'steamship.invocable.post', 'post', (['"""summarize_file"""'], {}), "('summarize_file')\n", (617, 635), False, 'from steamship.invocable import PackageService, post\n'), ((1078, 1106), 'steamship.invocable.post', 'post', (['"""summarize_audio_file"""'], {}), "('summarize_audio_file')\n", (1082, 1106), False, 'from steamship.invocable import PackageService, post\n'), ((514, 548), 'steamship_langchain.cache.SteamshipCache', 'SteamshipCache', ([], {'client': 'self.client'}), '(client=self.client)\n', (528, 548), False, 'from steamship_langchain.cache import SteamshipCache\n'), ((568, 606), 'steamship_langchain.llms.OpenAI', 'OpenAI', ([], {'client': 'self.client', 'cache': '(True)'}), '(client=self.client, cache=True)\n', (574, 606), False, 'from steamship_langchain.llms import OpenAI\n'), ((706, 747), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (714, 747), False, 'from steamship import File, Task\n'), ((772, 795), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {}), '()\n', (793, 795), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((985, 1040), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['self.llm'], {'chain_type': '"""map_reduce"""'}), "(self.llm, chain_type='map_reduce')\n", (1005, 1040), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1266, 1307), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (1274, 1307), False, 'from steamship import File, Task\n'), ((928, 952), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (936, 952), False, 'from langchain.docstore.document import Document\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import logging
import requests
from typing import Optional, List, Dict, Mapping, Any
import langchain
from langchain.llms.base import LLM
from langchain.cache import InMemoryCache
logging.basicConfig(level=logging.INFO)
# 启动llm的缓存
langchain.llm_cache = InMemoryCache()
class AgentZhipuAI(LLM):
import zhipuai as zhipuai
# 模型服务url
url = "127.0.0.1"
zhipuai.api_key ="1f565e40af1198e11ff1fd8a5b42771d.SjNfezc40YFsz2KC"#控制台中获取的 APIKey 信息
model = "chatglm_pro" # 大模型版本
history = []
def getText(self,role, content):
# role 是指定角色,content 是 prompt 内容
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
self.history.append(jsoncon)
return self.history
@property
def _llm_type(self) -> str:
return "AgentZhipuAI"
@classmethod
def _post(self, url: str, query: Dict) -> Any:
"""POST请求"""
response = requests.post(url, data=query).json()
return response
def _call(self, prompt: str, stop: Optional[List[str]] = None,role = "user") -> str:
"""_call"""
# construct query
response = self.zhipuai.model_api.invoke(
model=self.model,
prompt=self.getText(role=role, content=prompt)
)
choices = (response['data']['choices'])[0]
self.history.append(choices)
return choices["content"]
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.
"""
_param_dict = {
"url": self.url
}
return _param_dict
if __name__ == '__main__':
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
llm = AgentZhipuAI()
# 没有输入变量的示例prompt
no_input_prompt = PromptTemplate(input_variables=[], template="给我讲个笑话。")
no_input_prompt.format()
prompt = PromptTemplate(
input_variables=["location", "street"],
template="作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。",
)
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run({"location": "南京", "street": "新街口"}))
from langchain.chains import ConversationChain
conversation = ConversationChain(llm=llm, verbose=True)
output = conversation.predict(input="你好!")
print(output)
output = conversation.predict(input="南京是哪里的省会?")
print(output)
output = conversation.predict(input="那里有什么好玩的地方,简单的说一个就好。")
print(output)
| [
"langchain.chains.LLMChain",
"langchain.cache.InMemoryCache",
"langchain.prompts.PromptTemplate",
"langchain.chains.ConversationChain"
] | [((183, 222), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (202, 222), False, 'import logging\n'), ((256, 271), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (269, 271), False, 'from langchain.cache import InMemoryCache\n'), ((1830, 1884), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': '[]', 'template': '"""给我讲个笑话。"""'}), "(input_variables=[], template='给我讲个笑话。')\n", (1844, 1884), False, 'from langchain.prompts import PromptTemplate\n'), ((1928, 2059), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['location', 'street']", 'template': '"""作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。"""'}), "(input_variables=['location', 'street'], template=\n '作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。')\n", (1942, 2059), False, 'from langchain.prompts import PromptTemplate\n'), ((2091, 2123), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2099, 2123), False, 'from langchain.chains import LLMChain\n'), ((2254, 2294), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (2271, 2294), False, 'from langchain.chains import ConversationChain\n'), ((942, 972), 'requests.post', 'requests.post', (['url'], {'data': 'query'}), '(url, data=query)\n', (955, 972), False, 'import requests\n')] |
'''
Example script to automatically write a screenplay from a newsgroup post using agents with Crew.ai (https://github.com/joaomdmoura/crewAI)
You can also try it out with a personal email with many replies back and forth and see it turn into a movie script.
Demonstrates:
- multiple API endpoints (offical Mistral, Together.ai, Anyscale)
- running single tasks: spam detection and scoring
- running a crew to create a screenplay from a newsgroup post by first analyzing the text, creating a dialogue and ultimately formatting it
Additional endpoints requirements:
pip install langchain_mistralai
pip install langchain-together
Author: Toon Beerten ([email protected])
License: MIT
'''
import os
import re
from crewai import Agent, Task, Crew, Process
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import openai
#endpoint specific imports
import langchain_mistralai
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_community.llms import Together
from langchain_community.chat_models import ChatAnyscale
## Choose here which API endpoint to use, uncomment only one:
# Official Mistral: benefit of having access to mistral-medium
# Together.ai: lots of models to choose from
# Anyscale: cheapest at the time of writing
#endpoint = 'mistral_official'
#endpoint = 'togetherai'
endpoint = 'mistral_official'
#put you API keys here
mistral_key = ''
togetherai_key = ''
anyscale_key = ''
#model choice: i already have good results with mistralai/Mistral-7B-Instruct-v0.2
if endpoint == 'mistral_official':
mixtral=ChatMistralAI(mistral_api_key=mistral_key, model="mistral-tiny",temperature=0.6)
elif endpoint == 'togetherai':
#i get timeouts using Together() , so i use ChatOpenAI() instead
#mixtral = Together(model="mistralai/Mistral-7B-Instruct-v0.2", together_api_key=togetherai_key ) #or mistralai/Mixtral-8x7B-Instruct-v0.1
mixtral= openai.ChatOpenAI(base_url="https://api.together.xyz/v1", api_key=togetherai_key, temperature=0.5, model="mistralai/Mistral-7B-Instruct-v0.2")
elif endpoint == 'anyscale':
mixtral = ChatAnyscale(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=anyscale_key, streaming=False)
## Define Agents
spamfilter = Agent(
role='spamfilter',
goal='''Decide whether a text is spam or not.''',
backstory='You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
analyst = Agent(
role='analyse',
goal='''You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.''',
backstory='You are an expert discussion analyst.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scriptwriter = Agent(
role='scriptwriter',
goal='Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.',
backstory='''You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.''',
llm=mixtral,
verbose=True,
allow_delegation=False
)
formatter = Agent(
role='formatter',
goal='''Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).''',
backstory='You are an expert text formatter.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scorer = Agent(
role='scorer',
goal='''You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:
Scale:
1-3: Poor - The dialogue has significant issues that prevent effective communication.
4-6: Average - The dialogue has some good points but also has notable weaknesses.
7-9: Good - The dialogue is mostly effective with minor issues.
10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.
Factors to Consider:
Clarity: How clear is the exchange? Are the statements and responses easy to understand?
Relevance: Do the responses stay on topic and contribute to the conversation's purpose?
Conciseness: Is the dialogue free of unnecessary information or redundancy?
Politeness: Are the participants respectful and considerate in their interaction?
Engagement: Do the participants seem interested and actively involved in the dialogue?
Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?
Coherence: Does the dialogue make logical sense as a whole?
Responsiveness: Do the participants address each other's points adequately?
Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?
Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?
''',
backstory='You are an expert at scoring conversations on a scale of 1 to 10.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
#this is one example of a public post in the newsgroup alt.atheism
#try it out yourself by replacing this with your own email thread or text or ...
discussion = '''From: [email protected] (Keith Allan Schneider)
Subject: Re: <Political Atheists?
Organization: California Institute of Technology, Pasadena
Lines: 50
NNTP-Posting-Host: punisher.caltech.edu
[email protected] (Robert Beauchaine) writes:
>>I think that about 70% (or so) people approve of the
>>death penalty, even realizing all of its shortcomings. Doesn't this make
>>it reasonable? Or are *you* the sole judge of reasonability?
>Aside from revenge, what merits do you find in capital punishment?
Are we talking about me, or the majority of the people that support it?
Anyway, I think that "revenge" or "fairness" is why most people are in
favor of the punishment. If a murderer is going to be punished, people
that think that he should "get what he deserves." Most people wouldn't
think it would be fair for the murderer to live, while his victim died.
>Revenge? Petty and pathetic.
Perhaps you think that it is petty and pathetic, but your views are in the
minority.
>We have a local televised hot topic talk show that very recently
>did a segment on capital punishment. Each and every advocate of
>the use of this portion of our system of "jurisprudence" cited the
>main reason for supporting it: "That bastard deserved it". True
>human compassion, forgiveness, and sympathy.
Where are we required to have compassion, forgiveness, and sympathy? If
someone wrongs me, I will take great lengths to make sure that his advantage
is removed, or a similar situation is forced upon him. If someone kills
another, then we can apply the golden rule and kill this person in turn.
Is not our entire moral system based on such a concept?
Or, are you stating that human life is sacred, somehow, and that it should
never be violated? This would sound like some sort of religious view.
>>I mean, how reasonable is imprisonment, really, when you think about it?
>>Sure, the person could be released if found innocent, but you still
>>can't undo the imiprisonment that was served. Perhaps we shouldn't
>>imprision people if we could watch them closely instead. The cost would
>>probably be similar, especially if we just implanted some sort of
>>electronic device.
>Would you rather be alive in prison or dead in the chair?
Once a criminal has committed a murder, his desires are irrelevant.
And, you still have not answered my question. If you are concerned about
the death penalty due to the possibility of the execution of an innocent,
then why isn't this same concern shared with imprisonment. Shouldn't we,
by your logic, administer as minimum as punishment as possible, to avoid
violating the liberty or happiness of an innocent person?
keith
'''
# Filter out spam and vulgar posts
task0 = Task(description='Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n' + discussion, agent=spamfilter)
result = task0.execute()
if "STOP" in result:
#stop here and proceed to next post
print('This spam message will be filtered out')
# process post with a crew of agents, ultimately delivering a well formatted dialogue
task1 = Task(description='Analyse in much detail the following discussion:\n### DISCUSSION:\n' + discussion, agent=analyst)
task2 = Task(description='Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.', agent=scriptwriter)
task3 = Task(description='''Format the script exactly like this:
## (person 1):
(first text line from person 1)
## (person 2):
(first text line from person 2)
## (person 1):
(second text line from person 1)
## (person 2):
(second text line from person 2)
''', agent=formatter)
crew = Crew(
agents=[analyst, scriptwriter,formatter],
tasks=[task1, task2, task3],
verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels
process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next.
)
result = crew.kickoff()
#get rid of directions and actions between brackets, eg: (smiling)
result = re.sub(r'\(.*?\)', '', result)
print('===================== end result from crew ===================================')
print(result)
print('===================== score ==================================================')
task4 = Task(description='Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n'+result, agent=scorer)
score = task4.execute()
score = score.split('\n')[0] #sometimes an explanation comes after score, ignore
print(f'Scoring the dialogue as: {score}/10') | [
"langchain.chat_models.openai.ChatOpenAI",
"langchain_community.chat_models.ChatAnyscale",
"langchain_mistralai.chat_models.ChatMistralAI"
] | [((2292, 2556), 'crewai.Agent', 'Agent', ([], {'role': '"""spamfilter"""', 'goal': '"""Decide whether a text is spam or not."""', 'backstory': '"""You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='spamfilter', goal='Decide whether a text is spam or not.',\n backstory=\n 'You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2297, 2556), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2581, 2886), 'crewai.Agent', 'Agent', ([], {'role': '"""analyse"""', 'goal': '"""You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain."""', 'backstory': '"""You are an expert discussion analyst."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='analyse', goal=\n 'You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.'\n , backstory='You are an expert discussion analyst.', llm=mixtral,\n verbose=True, allow_delegation=False)\n", (2586, 2886), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2916, 3352), 'crewai.Agent', 'Agent', ([], {'role': '"""scriptwriter"""', 'goal': '"""Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals."""', 'backstory': '"""You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='scriptwriter', goal=\n 'Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.'\n , backstory=\n 'You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2921, 3352), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3375, 3631), 'crewai.Agent', 'Agent', ([], {'role': '"""formatter"""', 'goal': '"""Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling)."""', 'backstory': '"""You are an expert text formatter."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='formatter', goal=\n 'Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).'\n , backstory='You are an expert text formatter.', llm=mixtral, verbose=\n True, allow_delegation=False)\n", (3380, 3631), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3654, 5254), 'crewai.Agent', 'Agent', ([], {'role': '"""scorer"""', 'goal': '"""You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """', 'backstory': '"""You are an expert at scoring conversations on a scale of 1 to 10."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), '(role=\'scorer\', goal=\n """You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """\n , backstory=\n \'You are an expert at scoring conversations on a scale of 1 to 10.\',\n llm=mixtral, verbose=True, allow_delegation=False)\n', (3659, 5254), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8227, 8430), 'crewai.Task', 'Task', ([], {'description': '("""Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion)', 'agent': 'spamfilter'}), '(description=\n """Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion, agent=spamfilter)\n', (8231, 8430), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8659, 8785), 'crewai.Task', 'Task', ([], {'description': '("""Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion)', 'agent': 'analyst'}), '(description=\n """Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion, agent=analyst)\n', (8663, 8785), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8784, 8991), 'crewai.Task', 'Task', ([], {'description': '"""Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes."""', 'agent': 'scriptwriter'}), "(description=\n 'Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.'\n , agent=scriptwriter)\n", (8788, 8991), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8991, 9332), 'crewai.Task', 'Task', ([], {'description': '"""Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """', 'agent': 'formatter'}), '(description=\n """Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """\n , agent=formatter)\n', (8995, 9332), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9344, 9463), 'crewai.Crew', 'Crew', ([], {'agents': '[analyst, scriptwriter, formatter]', 'tasks': '[task1, task2, task3]', 'verbose': '(2)', 'process': 'Process.sequential'}), '(agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3],\n verbose=2, process=Process.sequential)\n', (9348, 9463), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9847, 9878), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 'result'], {}), "('\\\\(.*?\\\\)', '', result)\n", (9853, 9878), False, 'import re\n'), ((10082, 10288), 'crewai.Task', 'Task', ([], {'description': '("""Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result)', 'agent': 'scorer'}), '(description=\n """Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result, agent=scorer)\n', (10086, 10288), False, 'from crewai import Agent, Task, Crew, Process\n'), ((1635, 1720), 'langchain_mistralai.chat_models.ChatMistralAI', 'ChatMistralAI', ([], {'mistral_api_key': 'mistral_key', 'model': '"""mistral-tiny"""', 'temperature': '(0.6)'}), "(mistral_api_key=mistral_key, model='mistral-tiny',\n temperature=0.6)\n", (1648, 1720), False, 'from langchain_mistralai.chat_models import ChatMistralAI\n'), ((1970, 2122), 'langchain.chat_models.openai.ChatOpenAI', 'openai.ChatOpenAI', ([], {'base_url': '"""https://api.together.xyz/v1"""', 'api_key': 'togetherai_key', 'temperature': '(0.5)', 'model': '"""mistralai/Mistral-7B-Instruct-v0.2"""'}), "(base_url='https://api.together.xyz/v1', api_key=\n togetherai_key, temperature=0.5, model='mistralai/Mistral-7B-Instruct-v0.2'\n )\n", (1987, 2122), False, 'from langchain.chat_models import openai\n'), ((2157, 2257), 'langchain_community.chat_models.ChatAnyscale', 'ChatAnyscale', ([], {'model': '"""mistralai/Mistral-7B-Instruct-v0.1"""', 'api_key': 'anyscale_key', 'streaming': '(False)'}), "(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=\n anyscale_key, streaming=False)\n", (2169, 2257), False, 'from langchain_community.chat_models import ChatAnyscale\n')] |
'''
Example script to automatically write a screenplay from a newsgroup post using agents with Crew.ai (https://github.com/joaomdmoura/crewAI)
You can also try it out with a personal email with many replies back and forth and see it turn into a movie script.
Demonstrates:
- multiple API endpoints (offical Mistral, Together.ai, Anyscale)
- running single tasks: spam detection and scoring
- running a crew to create a screenplay from a newsgroup post by first analyzing the text, creating a dialogue and ultimately formatting it
Additional endpoints requirements:
pip install langchain_mistralai
pip install langchain-together
Author: Toon Beerten ([email protected])
License: MIT
'''
import os
import re
from crewai import Agent, Task, Crew, Process
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import openai
#endpoint specific imports
import langchain_mistralai
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_community.llms import Together
from langchain_community.chat_models import ChatAnyscale
## Choose here which API endpoint to use, uncomment only one:
# Official Mistral: benefit of having access to mistral-medium
# Together.ai: lots of models to choose from
# Anyscale: cheapest at the time of writing
#endpoint = 'mistral_official'
#endpoint = 'togetherai'
endpoint = 'mistral_official'
#put you API keys here
mistral_key = ''
togetherai_key = ''
anyscale_key = ''
#model choice: i already have good results with mistralai/Mistral-7B-Instruct-v0.2
if endpoint == 'mistral_official':
mixtral=ChatMistralAI(mistral_api_key=mistral_key, model="mistral-tiny",temperature=0.6)
elif endpoint == 'togetherai':
#i get timeouts using Together() , so i use ChatOpenAI() instead
#mixtral = Together(model="mistralai/Mistral-7B-Instruct-v0.2", together_api_key=togetherai_key ) #or mistralai/Mixtral-8x7B-Instruct-v0.1
mixtral= openai.ChatOpenAI(base_url="https://api.together.xyz/v1", api_key=togetherai_key, temperature=0.5, model="mistralai/Mistral-7B-Instruct-v0.2")
elif endpoint == 'anyscale':
mixtral = ChatAnyscale(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=anyscale_key, streaming=False)
## Define Agents
spamfilter = Agent(
role='spamfilter',
goal='''Decide whether a text is spam or not.''',
backstory='You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
analyst = Agent(
role='analyse',
goal='''You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.''',
backstory='You are an expert discussion analyst.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scriptwriter = Agent(
role='scriptwriter',
goal='Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.',
backstory='''You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.''',
llm=mixtral,
verbose=True,
allow_delegation=False
)
formatter = Agent(
role='formatter',
goal='''Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).''',
backstory='You are an expert text formatter.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scorer = Agent(
role='scorer',
goal='''You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:
Scale:
1-3: Poor - The dialogue has significant issues that prevent effective communication.
4-6: Average - The dialogue has some good points but also has notable weaknesses.
7-9: Good - The dialogue is mostly effective with minor issues.
10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.
Factors to Consider:
Clarity: How clear is the exchange? Are the statements and responses easy to understand?
Relevance: Do the responses stay on topic and contribute to the conversation's purpose?
Conciseness: Is the dialogue free of unnecessary information or redundancy?
Politeness: Are the participants respectful and considerate in their interaction?
Engagement: Do the participants seem interested and actively involved in the dialogue?
Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?
Coherence: Does the dialogue make logical sense as a whole?
Responsiveness: Do the participants address each other's points adequately?
Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?
Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?
''',
backstory='You are an expert at scoring conversations on a scale of 1 to 10.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
#this is one example of a public post in the newsgroup alt.atheism
#try it out yourself by replacing this with your own email thread or text or ...
discussion = '''From: [email protected] (Keith Allan Schneider)
Subject: Re: <Political Atheists?
Organization: California Institute of Technology, Pasadena
Lines: 50
NNTP-Posting-Host: punisher.caltech.edu
[email protected] (Robert Beauchaine) writes:
>>I think that about 70% (or so) people approve of the
>>death penalty, even realizing all of its shortcomings. Doesn't this make
>>it reasonable? Or are *you* the sole judge of reasonability?
>Aside from revenge, what merits do you find in capital punishment?
Are we talking about me, or the majority of the people that support it?
Anyway, I think that "revenge" or "fairness" is why most people are in
favor of the punishment. If a murderer is going to be punished, people
that think that he should "get what he deserves." Most people wouldn't
think it would be fair for the murderer to live, while his victim died.
>Revenge? Petty and pathetic.
Perhaps you think that it is petty and pathetic, but your views are in the
minority.
>We have a local televised hot topic talk show that very recently
>did a segment on capital punishment. Each and every advocate of
>the use of this portion of our system of "jurisprudence" cited the
>main reason for supporting it: "That bastard deserved it". True
>human compassion, forgiveness, and sympathy.
Where are we required to have compassion, forgiveness, and sympathy? If
someone wrongs me, I will take great lengths to make sure that his advantage
is removed, or a similar situation is forced upon him. If someone kills
another, then we can apply the golden rule and kill this person in turn.
Is not our entire moral system based on such a concept?
Or, are you stating that human life is sacred, somehow, and that it should
never be violated? This would sound like some sort of religious view.
>>I mean, how reasonable is imprisonment, really, when you think about it?
>>Sure, the person could be released if found innocent, but you still
>>can't undo the imiprisonment that was served. Perhaps we shouldn't
>>imprision people if we could watch them closely instead. The cost would
>>probably be similar, especially if we just implanted some sort of
>>electronic device.
>Would you rather be alive in prison or dead in the chair?
Once a criminal has committed a murder, his desires are irrelevant.
And, you still have not answered my question. If you are concerned about
the death penalty due to the possibility of the execution of an innocent,
then why isn't this same concern shared with imprisonment. Shouldn't we,
by your logic, administer as minimum as punishment as possible, to avoid
violating the liberty or happiness of an innocent person?
keith
'''
# Filter out spam and vulgar posts
task0 = Task(description='Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n' + discussion, agent=spamfilter)
result = task0.execute()
if "STOP" in result:
#stop here and proceed to next post
print('This spam message will be filtered out')
# process post with a crew of agents, ultimately delivering a well formatted dialogue
task1 = Task(description='Analyse in much detail the following discussion:\n### DISCUSSION:\n' + discussion, agent=analyst)
task2 = Task(description='Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.', agent=scriptwriter)
task3 = Task(description='''Format the script exactly like this:
## (person 1):
(first text line from person 1)
## (person 2):
(first text line from person 2)
## (person 1):
(second text line from person 1)
## (person 2):
(second text line from person 2)
''', agent=formatter)
crew = Crew(
agents=[analyst, scriptwriter,formatter],
tasks=[task1, task2, task3],
verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels
process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next.
)
result = crew.kickoff()
#get rid of directions and actions between brackets, eg: (smiling)
result = re.sub(r'\(.*?\)', '', result)
print('===================== end result from crew ===================================')
print(result)
print('===================== score ==================================================')
task4 = Task(description='Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n'+result, agent=scorer)
score = task4.execute()
score = score.split('\n')[0] #sometimes an explanation comes after score, ignore
print(f'Scoring the dialogue as: {score}/10') | [
"langchain.chat_models.openai.ChatOpenAI",
"langchain_community.chat_models.ChatAnyscale",
"langchain_mistralai.chat_models.ChatMistralAI"
] | [((2292, 2556), 'crewai.Agent', 'Agent', ([], {'role': '"""spamfilter"""', 'goal': '"""Decide whether a text is spam or not."""', 'backstory': '"""You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='spamfilter', goal='Decide whether a text is spam or not.',\n backstory=\n 'You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2297, 2556), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2581, 2886), 'crewai.Agent', 'Agent', ([], {'role': '"""analyse"""', 'goal': '"""You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain."""', 'backstory': '"""You are an expert discussion analyst."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='analyse', goal=\n 'You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.'\n , backstory='You are an expert discussion analyst.', llm=mixtral,\n verbose=True, allow_delegation=False)\n", (2586, 2886), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2916, 3352), 'crewai.Agent', 'Agent', ([], {'role': '"""scriptwriter"""', 'goal': '"""Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals."""', 'backstory': '"""You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='scriptwriter', goal=\n 'Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.'\n , backstory=\n 'You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2921, 3352), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3375, 3631), 'crewai.Agent', 'Agent', ([], {'role': '"""formatter"""', 'goal': '"""Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling)."""', 'backstory': '"""You are an expert text formatter."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='formatter', goal=\n 'Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).'\n , backstory='You are an expert text formatter.', llm=mixtral, verbose=\n True, allow_delegation=False)\n", (3380, 3631), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3654, 5254), 'crewai.Agent', 'Agent', ([], {'role': '"""scorer"""', 'goal': '"""You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """', 'backstory': '"""You are an expert at scoring conversations on a scale of 1 to 10."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), '(role=\'scorer\', goal=\n """You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """\n , backstory=\n \'You are an expert at scoring conversations on a scale of 1 to 10.\',\n llm=mixtral, verbose=True, allow_delegation=False)\n', (3659, 5254), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8227, 8430), 'crewai.Task', 'Task', ([], {'description': '("""Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion)', 'agent': 'spamfilter'}), '(description=\n """Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion, agent=spamfilter)\n', (8231, 8430), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8659, 8785), 'crewai.Task', 'Task', ([], {'description': '("""Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion)', 'agent': 'analyst'}), '(description=\n """Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion, agent=analyst)\n', (8663, 8785), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8784, 8991), 'crewai.Task', 'Task', ([], {'description': '"""Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes."""', 'agent': 'scriptwriter'}), "(description=\n 'Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.'\n , agent=scriptwriter)\n", (8788, 8991), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8991, 9332), 'crewai.Task', 'Task', ([], {'description': '"""Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """', 'agent': 'formatter'}), '(description=\n """Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """\n , agent=formatter)\n', (8995, 9332), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9344, 9463), 'crewai.Crew', 'Crew', ([], {'agents': '[analyst, scriptwriter, formatter]', 'tasks': '[task1, task2, task3]', 'verbose': '(2)', 'process': 'Process.sequential'}), '(agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3],\n verbose=2, process=Process.sequential)\n', (9348, 9463), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9847, 9878), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 'result'], {}), "('\\\\(.*?\\\\)', '', result)\n", (9853, 9878), False, 'import re\n'), ((10082, 10288), 'crewai.Task', 'Task', ([], {'description': '("""Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result)', 'agent': 'scorer'}), '(description=\n """Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result, agent=scorer)\n', (10086, 10288), False, 'from crewai import Agent, Task, Crew, Process\n'), ((1635, 1720), 'langchain_mistralai.chat_models.ChatMistralAI', 'ChatMistralAI', ([], {'mistral_api_key': 'mistral_key', 'model': '"""mistral-tiny"""', 'temperature': '(0.6)'}), "(mistral_api_key=mistral_key, model='mistral-tiny',\n temperature=0.6)\n", (1648, 1720), False, 'from langchain_mistralai.chat_models import ChatMistralAI\n'), ((1970, 2122), 'langchain.chat_models.openai.ChatOpenAI', 'openai.ChatOpenAI', ([], {'base_url': '"""https://api.together.xyz/v1"""', 'api_key': 'togetherai_key', 'temperature': '(0.5)', 'model': '"""mistralai/Mistral-7B-Instruct-v0.2"""'}), "(base_url='https://api.together.xyz/v1', api_key=\n togetherai_key, temperature=0.5, model='mistralai/Mistral-7B-Instruct-v0.2'\n )\n", (1987, 2122), False, 'from langchain.chat_models import openai\n'), ((2157, 2257), 'langchain_community.chat_models.ChatAnyscale', 'ChatAnyscale', ([], {'model': '"""mistralai/Mistral-7B-Instruct-v0.1"""', 'api_key': 'anyscale_key', 'streaming': '(False)'}), "(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=\n anyscale_key, streaming=False)\n", (2169, 2257), False, 'from langchain_community.chat_models import ChatAnyscale\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.