id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
bee5fe1281a2-0
Source code for langchain.chains.retrieval_qa.base """Chain for question-answering against a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR from langchain.prompts import PromptTemplate from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore class BaseRetrievalQA(Chain): combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_source_documents: bool = False """Return the source documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys.
https://python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
bee5fe1281a2-1
def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt) document_prompt = PromptTemplate( input_variables=["page_content"], template="Context:\n{page_content}" ) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="context", document_prompt=document_prompt, ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain( llm, chain_type=chain_type, **_chain_type_kwargs ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @abstractmethod def _get_docs(self, question: str) -> List[Document]:
https://python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
bee5fe1281a2-2
def _get_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] docs = self._get_docs(question) answer = self.combine_documents_chain.run( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} @abstractmethod async def _aget_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example:
https://python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
bee5fe1281a2-3
the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.input_key] docs = await self._aget_docs(question) answer = await self.combine_documents_chain.arun( input_documents=docs, question=question, callbacks=_run_manager.get_child() ) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} [docs]class RetrievalQA(BaseRetrievalQA): """Chain for question-answering against an index. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.faiss import FAISS from langchain.vectorstores.base import VectorStoreRetriever retriever = VectorStoreRetriever(vectorstore=FAISS(...)) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=retriever) """ retriever: BaseRetriever = Field(exclude=True) def _get_docs(self, question: str) -> List[Document]: return self.retriever.get_relevant_documents(question) async def _aget_docs(self, question: str) -> List[Document]: return await self.retriever.aget_relevant_documents(question) [docs]class VectorDBQA(BaseRetrievalQA):
https://python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
bee5fe1281a2-4
[docs]class VectorDBQA(BaseRetrievalQA): """Chain for question-answering against a vector database.""" vectorstore: VectorStore = Field(exclude=True, alias="vectorstore") """Vector Database to connect to.""" k: int = 4 """Number of documents to query for.""" search_type: str = "similarity" """Search type to use over vectorstore. `similarity` or `mmr`.""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQA` is deprecated - " "please use `from langchain.chains import RetrievalQA`" ) return values @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def _get_docs(self, question: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( question, k=self.k, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs
https://python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
bee5fe1281a2-5
return docs async def _aget_docs(self, question: str) -> List[Document]: raise NotImplementedError("VectorDBQA does not support async") @property def _chain_type(self) -> str: """Return the chain type.""" return "vector_db_qa" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chains/retrieval_qa/base.html
39b0ebe15cc7-0
Source code for langchain.chains.pal.base """Implements Program-Aided Language Models. As in https://arxiv.org/pdf/2211.10435.pdf. """ from __future__ import annotations import warnings from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT from langchain.chains.pal.math_prompt import MATH_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.utilities import PythonREPL [docs]class PALChain(Chain): """Implements Program-Aided Language Models.""" llm_chain: LLMChain llm: Optional[BaseLanguageModel] = None """[Deprecated]""" prompt: BasePromptTemplate = MATH_PROMPT """[Deprecated]""" stop: str = "\n\n" get_answer_expr: str = "print(solution())" python_globals: Optional[Dict[str, Any]] = None python_locals: Optional[Dict[str, Any]] = None output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( "Directly instantiating an PALChain with an llm is deprecated. "
https://python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
39b0ebe15cc7-1
"Directly instantiating an PALChain with an llm is deprecated. " "Please instantiate with llm_chain argument or using the one of " "the class method constructors from_math_prompt, " "from_colored_object_prompt." ) if "llm_chain" not in values and values["llm"] is not None: values["llm_chain"] = LLMChain(llm=values["llm"], prompt=MATH_PROMPT) return values @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() code = self.llm_chain.predict( stop=[self.stop], callbacks=_run_manager.get_child(), **inputs ) _run_manager.on_text(code, color="green", end="\n", verbose=self.verbose) repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals) res = repl.run(code + f"\n{self.get_answer_expr}") output = {self.output_key: res.strip()} if self.return_intermediate_steps: output["intermediate_steps"] = code
https://python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
39b0ebe15cc7-2
if self.return_intermediate_steps: output["intermediate_steps"] = code return output [docs] @classmethod def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain: """Load PAL from math prompt.""" llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT) return cls( llm_chain=llm_chain, stop="\n\n", get_answer_expr="print(solution())", **kwargs, ) [docs] @classmethod def from_colored_object_prompt( cls, llm: BaseLanguageModel, **kwargs: Any ) -> PALChain: """Load PAL from colored object prompt.""" llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT) return cls( llm_chain=llm_chain, stop="\n\n\n", get_answer_expr="print(answer)", **kwargs, ) @property def _chain_type(self) -> str: return "pal_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chains/pal/base.html
05d16a780e7e-0
Source code for langchain.chains.conversation.base """Chain that carries on a conversation and calls an LLM.""" from typing import Dict, List from pydantic import Extra, Field, root_validator from langchain.chains.conversation.prompt import PROMPT from langchain.chains.llm import LLMChain from langchain.memory.buffer import ConversationBufferMemory from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMemory [docs]class ConversationChain(LLMChain): """Chain to have a conversation and load context from memory. Example: .. code-block:: python from langchain import ConversationChain, OpenAI conversation = ConversationChain(llm=OpenAI()) """ memory: BaseMemory = Field(default_factory=ConversationBufferMemory) """Default memory store.""" prompt: BasePromptTemplate = PROMPT """Default conversation prompt to use.""" input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" return [self.input_key] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables input_key = values["input_key"] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys "
https://python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
05d16a780e7e-1
f"The input key {input_key} was also found in the memory keys " f"({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values["prompt"].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but got {memory_keys} as inputs from " f"memory, and {input_key} as the normal input key." ) return values By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/chains/conversation/base.html
2cbd0cc9ee5d-0
Source code for langchain.retrievers.wikipedia from typing import List from langchain.schema import BaseRetriever, Document from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper): """ It is effectively a wrapper for WikipediaAPIWrapper. It wraps load() to get_relevant_documents(). It uses all WikipediaAPIWrapper arguments without any change. """ [docs] def get_relevant_documents(self, query: str) -> List[Document]: return self.load(query=query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/wikipedia.html
0ead0d283996-0
Source code for langchain.retrievers.pinecone_hybrid_search """Taken from: https://docs.pinecone.io/docs/hybrid-search""" import hashlib from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document def hash_text(text: str) -> str: return str(hashlib.sha256(text.encode("utf-8")).hexdigest()) def create_index( contexts: List[str], index: Any, embeddings: Embeddings, sparse_encoder: Any, ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ) -> None: batch_size = 32 _iterator = range(0, len(contexts), batch_size) try: from tqdm.auto import tqdm _iterator = tqdm(_iterator) except ImportError: pass if ids is None: # create unique ids using hash of the text ids = [hash_text(context) for context in contexts] for i in _iterator: # find end of batch i_end = min(i + batch_size, len(contexts)) # extract batch context_batch = contexts[i:i_end] batch_ids = ids[i:i_end] metadata_batch = ( metadatas[i:i_end] if metadatas else [{} for _ in context_batch] ) # add context passages as metadata meta = [ {"context": context, **metadata} for context, metadata in zip(context_batch, metadata_batch) ] # create dense vectors dense_embeds = embeddings.embed_documents(context_batch)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
0ead0d283996-1
# create dense vectors dense_embeds = embeddings.embed_documents(context_batch) # create sparse vectors sparse_embeds = sparse_encoder.encode_documents(context_batch) for s in sparse_embeds: s["values"] = [float(s1) for s1 in s["values"]] vectors = [] # loop through the data and create dictionaries for upserts for doc_id, sparse, dense, metadata in zip( batch_ids, sparse_embeds, dense_embeds, meta ): vectors.append( { "id": doc_id, "sparse_values": sparse, "values": dense, "metadata": metadata, } ) # upload the documents to the new hybrid index index.upsert(vectors) [docs]class PineconeHybridSearchRetriever(BaseRetriever, BaseModel): embeddings: Embeddings sparse_encoder: Any index: Any top_k: int = 4 alpha: float = 0.5 class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_texts( self, texts: List[str], ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ) -> None: create_index( texts, self.index, self.embeddings, self.sparse_encoder, ids=ids, metadatas=metadatas, ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try:
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
0ead0d283996-2
"""Validate that api key and python package exists in environment.""" try: from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401 from pinecone_text.sparse.base_sparse_encoder import ( BaseSparseEncoder, # noqa:F401 ) except ImportError: raise ValueError( "Could not import pinecone_text python package. " "Please install it with `pip install pinecone_text`." ) return values [docs] def get_relevant_documents(self, query: str) -> List[Document]: from pinecone_text.hybrid import hybrid_convex_scale sparse_vec = self.sparse_encoder.encode_queries(query) # convert the question into a dense vector dense_vec = self.embeddings.embed_query(query) # scale alpha with hybrid_scale dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha) sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]] # query pinecone with the query parameters result = self.index.query( vector=dense_vec, sparse_vector=sparse_vec, top_k=self.top_k, include_metadata=True, ) final_result = [] for res in result["matches"]: context = res["metadata"].pop("context") final_result.append( Document(page_content=context, metadata=res["metadata"]) ) # return search results as json return final_result [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html
c5ae040622a8-0
Source code for langchain.retrievers.knn """KNN Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb""" from __future__ import annotations import concurrent.futures from typing import Any, List, Optional import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) [docs]class KNNRetriever(BaseRetriever, BaseModel): embeddings: Embeddings index: Any texts: List[str] k: int = 4 relevancy_threshold: Optional[float] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> KNNRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) [docs] def get_relevant_documents(self, query: str) -> List[Document]: query_embeds = np.array(self.embeddings.embed_query(query)) # calc L2 norm index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True)) query_embeds = query_embeds / np.sqrt((query_embeds**2).sum()) similarities = index_embeds.dot(query_embeds)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html
c5ae040622a8-1
similarities = index_embeds.dot(query_embeds) sorted_ix = np.argsort(-similarities) denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [ Document(page_content=self.texts[row]) for row in sorted_ix[0 : self.k] if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ) ] return top_k_results [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/knn.html
643b4d068e77-0
Source code for langchain.retrievers.arxiv from typing import List from langchain.schema import BaseRetriever, Document from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): """ It is effectively a wrapper for ArxivAPIWrapper. It wraps load() to get_relevant_documents(). It uses all ArxivAPIWrapper arguments without any change. """ [docs] def get_relevant_documents(self, query: str) -> List[Document]: return self.load(query=query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/arxiv.html
47fe7d0cf27a-0
Source code for langchain.retrievers.databerry from typing import List, Optional import aiohttp import requests from langchain.schema import BaseRetriever, Document [docs]class DataberryRetriever(BaseRetriever): datastore_url: str top_k: Optional[int] api_key: Optional[str] def __init__( self, datastore_url: str, top_k: Optional[int] = None, api_key: Optional[str] = None, ): self.datastore_url = datastore_url self.api_key = api_key self.top_k = top_k [docs] def get_relevant_documents(self, query: str) -> List[Document]: response = requests.post( self.datastore_url, json={ "query": query, **({"topK": self.top_k} if self.top_k is not None else {}), }, headers={ "Content-Type": "application/json", **( {"Authorization": f"Bearer {self.api_key}"} if self.api_key is not None else {} ), }, ) data = response.json() return [ Document( page_content=r["text"], metadata={"source": r["source"], "score": r["score"]}, ) for r in data["results"] ] [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.datastore_url, json={ "query": query,
https://python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html
47fe7d0cf27a-1
self.datastore_url, json={ "query": query, **({"topK": self.top_k} if self.top_k is not None else {}), }, headers={ "Content-Type": "application/json", **( {"Authorization": f"Bearer {self.api_key}"} if self.api_key is not None else {} ), }, ) as response: data = await response.json() return [ Document( page_content=r["text"], metadata={"source": r["source"], "score": r["score"]}, ) for r in data["results"] ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html
7901cb345878-0
Source code for langchain.retrievers.svm """SMV Retriever. Largely based on https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb""" from __future__ import annotations import concurrent.futures from typing import Any, List, Optional import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever, Document def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray: with concurrent.futures.ThreadPoolExecutor() as executor: return np.array(list(executor.map(embeddings.embed_query, contexts))) [docs]class SVMRetriever(BaseRetriever, BaseModel): embeddings: Embeddings index: Any texts: List[str] k: int = 4 relevancy_threshold: Optional[float] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) -> SVMRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs) [docs] def get_relevant_documents(self, query: str) -> List[Document]: from sklearn import svm query_embeds = np.array(self.embeddings.embed_query(query)) x = np.concatenate([query_embeds[None, ...], self.index]) y = np.zeros(x.shape[0]) y[0] = 1 clf = svm.LinearSVC(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
7901cb345878-1
y[0] = 1 clf = svm.LinearSVC( class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1 ) clf.fit(x, y) similarities = clf.decision_function(x) sorted_ix = np.argsort(-similarities) # svm.LinearSVC in scikit-learn is non-deterministic. # if a text is the same as a query, there is no guarantee # the query will be in the first index. # this performs a simple swap, this works because anything # left of the 0 should be equivalent. zero_index = np.where(sorted_ix == 0)[0][0] if zero_index != 0: sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0] denominator = np.max(similarities) - np.min(similarities) + 1e-6 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [] for row in sorted_ix[1 : self.k + 1]: if ( self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold ): top_k_results.append(Document(page_content=self.texts[row - 1])) return top_k_results [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html
5da1350477b8-0
Source code for langchain.retrievers.contextual_compression """Retriever that wraps a base retriever and filters the results.""" from typing import List from pydantic import BaseModel, Extra from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.schema import BaseRetriever, Document [docs]class ContextualCompressionRetriever(BaseRetriever, BaseModel): """Retriever that wraps a base retriever and compresses the results.""" base_compressor: BaseDocumentCompressor """Compressor for compressing retrieved documents.""" base_retriever: BaseRetriever """Base Retriever to use for getting relevant documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: Sequence of relevant documents """ docs = self.base_retriever.get_relevant_documents(query) compressed_docs = self.base_compressor.compress_documents(docs, query) return list(compressed_docs) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ docs = await self.base_retriever.aget_relevant_documents(query) compressed_docs = await self.base_compressor.acompress_documents(docs, query) return list(compressed_docs) By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html
5da1350477b8-1
return list(compressed_docs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/contextual_compression.html
0f2d60d38862-0
Source code for langchain.retrievers.chatgpt_plugin_retriever from __future__ import annotations from typing import List, Optional import aiohttp import requests from pydantic import BaseModel from langchain.schema import BaseRetriever, Document [docs]class ChatGPTPluginRetriever(BaseRetriever, BaseModel): url: str bearer_token: str top_k: int = 3 filter: Optional[dict] = None aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def get_relevant_documents(self, query: str) -> List[Document]: url, json, headers = self._create_request(query) response = requests.post(url, json=json, headers=headers) results = response.json()["results"][0]["results"] docs = [] for d in results: content = d.pop("text") docs.append(Document(page_content=content, metadata=d)) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: url, json, headers = self._create_request(query) if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.post(url, headers=headers, json=json) as response: res = await response.json() else: async with self.aiosession.post( url, headers=headers, json=json ) as response: res = await response.json() results = res["results"][0]["results"] docs = [] for d in results: content = d.pop("text")
https://python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
0f2d60d38862-1
for d in results: content = d.pop("text") docs.append(Document(page_content=content, metadata=d)) return docs def _create_request(self, query: str) -> tuple[str, dict, dict]: url = f"{self.url}/query" json = { "queries": [ { "query": query, "filter": self.filter, "top_k": self.top_k, } ] } headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.bearer_token}", } return url, json, headers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/chatgpt_plugin_retriever.html
a97a5fc7bc72-0
Source code for langchain.retrievers.tfidf """TF-IDF Retriever. Largely based on https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb""" from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel from langchain.schema import BaseRetriever, Document [docs]class TFIDFRetriever(BaseRetriever, BaseModel): vectorizer: Any docs: List[Document] tfidf_array: Any k: int = 4 class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] @classmethod def from_texts( cls, texts: Iterable[str], metadatas: Optional[Iterable[dict]] = None, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: try: from sklearn.feature_extraction.text import TfidfVectorizer except ImportError: raise ImportError( "Could not import scikit-learn, please install with `pip install " "scikit-learn`." ) tfidf_params = tfidf_params or {} vectorizer = TfidfVectorizer(**tfidf_params) tfidf_array = vectorizer.fit_transform(texts) metadatas = metadatas or ({} for _ in texts) docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)]
https://python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
a97a5fc7bc72-1
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs) [docs] @classmethod def from_documents( cls, documents: Iterable[Document], *, tfidf_params: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> TFIDFRetriever: texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents)) return cls.from_texts( texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs ) [docs] def get_relevant_documents(self, query: str) -> List[Document]: from sklearn.metrics.pairwise import cosine_similarity query_vec = self.vectorizer.transform( [query] ) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats) results = cosine_similarity(self.tfidf_array, query_vec).reshape( (-1,) ) # Op -- (n_docs,1) -- Cosine Sim with each doc return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]] return return_docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html
3947d9bac228-0
Source code for langchain.retrievers.weaviate_hybrid_search """Wrapper around weaviate vector database.""" from __future__ import annotations from typing import Any, Dict, List, Optional from uuid import uuid4 from pydantic import Extra from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class WeaviateHybridSearchRetriever(BaseRetriever): def __init__( self, client: Any, index_name: str, text_key: str, alpha: float = 0.5, k: int = 4, attributes: Optional[List[str]] = None, create_schema_if_missing: bool = True, ): try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self.k = k self.alpha = alpha self._index_name = index_name self._text_key = text_key self._query_attrs = [self._text_key] if attributes is not None: self._query_attrs.extend(attributes) if create_schema_if_missing: self._create_schema_if_missing() def _create_schema_if_missing(self) -> None: class_obj = { "class": self._index_name, "properties": [{"name": self._text_key, "dataType": ["text"]}],
https://python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
3947d9bac228-1
"properties": [{"name": self._text_key, "dataType": ["text"]}], "vectorizer": "text2vec-openai", } if not self._client.schema.exists(self._index_name): self._client.schema.create_class(class_obj) [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True # added text_key [docs] def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]: """Upload documents to Weaviate.""" from weaviate.util import get_valid_uuid with self._client.batch as batch: ids = [] for i, doc in enumerate(docs): metadata = doc.metadata or {} data_properties = {self._text_key: doc.page_content, **metadata} # If the UUID of one of the objects already exists # then the existing objectwill be replaced by the new object. if "uuids" in kwargs: _id = kwargs["uuids"][i] else: _id = get_valid_uuid(uuid4()) batch.add_data_object(data_properties, self._index_name, _id) ids.append(_id) return ids [docs] def get_relevant_documents( self, query: str, where_filter: Optional[Dict[str, object]] = None ) -> List[Document]: """Look up similar documents in Weaviate.""" query_obj = self._client.query.get(self._index_name, self._query_attrs) if where_filter: query_obj = query_obj.with_where(where_filter)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
3947d9bac228-2
if where_filter: query_obj = query_obj.with_where(where_filter) result = query_obj.with_hybrid(query, alpha=self.alpha).with_limit(self.k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] async def aget_relevant_documents( self, query: str, where_filter: Optional[Dict[str, object]] = None ) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/weaviate_hybrid_search.html
739acf7fbb7b-0
Source code for langchain.retrievers.azure_cognitive_search """Retriever wrapper for Azure Cognitive Search.""" from __future__ import annotations import json from typing import Dict, List, Optional import aiohttp import requests from pydantic import BaseModel, Extra, root_validator from langchain.schema import BaseRetriever, Document from langchain.utils import get_from_dict_or_env [docs]class AzureCognitiveSearchRetriever(BaseRetriever, BaseModel): """Wrapper around Azure Cognitive Search.""" service_name: str = "" """Name of Azure Cognitive Search service""" index_name: str = "" """Name of Index inside Azure Cognitive Search service""" api_key: str = "" """API Key. Both Admin and Query keys work, but for reading data it's recommended to use a Query key.""" api_version: str = "2020-06-30" """API version""" aiosession: Optional[aiohttp.ClientSession] = None """ClientSession, in case we want to reuse connection for better performance.""" content_key: str = "content" """Key in a retrieved result to set as the Document page_content.""" class Config: extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that service name, index name and api key exists in environment.""" values["service_name"] = get_from_dict_or_env( values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME" ) values["index_name"] = get_from_dict_or_env( values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME" )
https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
739acf7fbb7b-1
) values["api_key"] = get_from_dict_or_env( values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY" ) return values def _build_search_url(self, query: str) -> str: base_url = f"https://{self.service_name}.search.windows.net/" endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}" return base_url + endpoint_path + f"&search={query}" @property def _headers(self) -> Dict[str, str]: return { "Content-Type": "application/json", "api-key": self.api_key, } def _search(self, query: str) -> List[dict]: search_url = self._build_search_url(query) response = requests.get(search_url, headers=self._headers) if response.status_code != 200: raise Exception(f"Error in search request: {response}") return json.loads(response.text)["value"] async def _asearch(self, query: str) -> List[dict]: search_url = self._build_search_url(query) if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.get(search_url, headers=self._headers) as response: response_json = await response.json() else: async with self.aiosession.get( search_url, headers=self._headers ) as response: response_json = await response.json() return response_json["value"] [docs] def get_relevant_documents(self, query: str) -> List[Document]: search_results = self._search(query) return [
https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
739acf7fbb7b-2
search_results = self._search(query) return [ Document(page_content=result.pop(self.content_key), metadata=result) for result in search_results ] [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: search_results = await self._asearch(query) return [ Document(page_content=result.pop(self.content_key), metadata=result) for result in search_results ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html
525d42f2caab-0
Source code for langchain.retrievers.remote_retriever from typing import List, Optional import aiohttp import requests from pydantic import BaseModel from langchain.schema import BaseRetriever, Document [docs]class RemoteLangChainRetriever(BaseRetriever, BaseModel): url: str headers: Optional[dict] = None input_key: str = "message" response_key: str = "response" page_content_key: str = "page_content" metadata_key: str = "metadata" [docs] def get_relevant_documents(self, query: str) -> List[Document]: response = requests.post( self.url, json={self.input_key: query}, headers=self.headers ) result = response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: async with aiohttp.ClientSession() as session: async with session.request( "POST", self.url, headers=self.headers, json={self.input_key: query} ) as response: result = await response.json() return [ Document( page_content=r[self.page_content_key], metadata=r[self.metadata_key] ) for r in result[self.response_key] ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/remote_retriever.html
2692c0a20b6f-0
Source code for langchain.retrievers.pupmed from typing import List from langchain.schema import BaseRetriever, Document from langchain.utilities.pupmed import PubMedAPIWrapper [docs]class PubMedRetriever(BaseRetriever, PubMedAPIWrapper): """ It is effectively a wrapper for PubMedAPIWrapper. It wraps load() to get_relevant_documents(). It uses all PubMedAPIWrapper arguments without any change. """ [docs] def get_relevant_documents(self, query: str) -> List[Document]: return self.load_docs(query=query) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/pupmed.html
7bffede86950-0
Source code for langchain.retrievers.metal from typing import Any, List, Optional from langchain.schema import BaseRetriever, Document [docs]class MetalRetriever(BaseRetriever): def __init__(self, client: Any, params: Optional[dict] = None): from metal_sdk.metal import Metal if not isinstance(client, Metal): raise ValueError( "Got unexpected client, should be of type metal_sdk.metal.Metal. " f"Instead, got {type(client)}" ) self.client: Metal = client self.params = params or {} [docs] def get_relevant_documents(self, query: str) -> List[Document]: results = self.client.search({"text": query}, **self.params) final_results = [] for r in results["data"]: metadata = {k: v for k, v in r.items() if k != "text"} final_results.append(Document(page_content=r["text"], metadata=metadata)) return final_results [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/metal.html
13ce0c825ad7-0
Source code for langchain.retrievers.vespa_retriever """Wrapper for retrieving documents from Vespa.""" from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Sequence, Union from langchain.schema import BaseRetriever, Document if TYPE_CHECKING: from vespa.application import Vespa [docs]class VespaRetriever(BaseRetriever): def __init__( self, app: Vespa, body: Dict, content_field: str, metadata_fields: Optional[Sequence[str]] = None, ): self._application = app self._query_body = body self._content_field = content_field self._metadata_fields = metadata_fields or () def _query(self, body: Dict) -> List[Document]: response = self._application.query(body) if not str(response.status_code).startswith("2"): raise RuntimeError( "Could not retrieve data from Vespa. Error code: {}".format( response.status_code ) ) root = response.json["root"] if "errors" in root: raise RuntimeError(json.dumps(root["errors"])) docs = [] for child in response.hits: page_content = child["fields"].pop(self._content_field, "") if self._metadata_fields == "*": metadata = child["fields"] else: metadata = {mf: child["fields"].get(mf) for mf in self._metadata_fields} metadata["id"] = child["id"] docs.append(Document(page_content=page_content, metadata=metadata)) return docs
https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
13ce0c825ad7-1
docs.append(Document(page_content=page_content, metadata=metadata)) return docs [docs] def get_relevant_documents(self, query: str) -> List[Document]: body = self._query_body.copy() body["query"] = query return self._query(body) [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError [docs] def get_relevant_documents_with_filter( self, query: str, *, _filter: Optional[str] = None ) -> List[Document]: body = self._query_body.copy() _filter = f" and {_filter}" if _filter else "" body["yql"] = body["yql"] + _filter body["query"] = query return self._query(body) [docs] @classmethod def from_params( cls, url: str, content_field: str, *, k: Optional[int] = None, metadata_fields: Union[Sequence[str], Literal["*"]] = (), sources: Union[Sequence[str], Literal["*"], None] = None, _filter: Optional[str] = None, yql: Optional[str] = None, **kwargs: Any, ) -> VespaRetriever: """Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple ().
https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
13ce0c825ad7-2
document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "pyvespa is not installed, please install with `pip install pyvespa`" ) app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( "yql should only be specified if both sources and _filter are not " "specified." ) else: if metadata_fields == "*": _fields = "*" body["summary"] = "short" else: _fields = ", ".join([content_field] + list(metadata_fields or [])) _sources = ", ".join(sources) if isinstance(sources, Sequence) else "*" _filter = f" and {_filter}" if _filter else "" yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}" body["yql"] = yql if k: body["hits"] = k return cls(app, body, content_field, metadata_fields=metadata_fields) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html
57aba3fdad27-0
Source code for langchain.retrievers.zep from __future__ import annotations from typing import TYPE_CHECKING, Dict, List, Optional from langchain.schema import BaseRetriever, Document if TYPE_CHECKING: from zep_python import MemorySearchResult [docs]class ZepRetriever(BaseRetriever): """A Retriever implementation for the Zep long-term memory store. Search your user's long-term chat history with Zep. Note: You will need to provide the user's `session_id` to use this retriever. More on Zep: Zep provides long-term conversation storage for LLM apps. The server stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs. For server installation instructions, see: https://getzep.github.io/deployment/quickstart/ """ def __init__( self, session_id: str, url: str, top_k: Optional[int] = None, ): try: from zep_python import ZepClient except ImportError: raise ValueError( "Could not import zep-python package. " "Please install it with `pip install zep-python`." ) self.zep_client = ZepClient(base_url=url) self.session_id = session_id self.top_k = top_k def _search_result_to_doc( self, results: List[MemorySearchResult] ) -> List[Document]: return [ Document( page_content=r.message.pop("content"), metadata={"score": r.dist, **r.message}, ) for r in results
https://python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html
57aba3fdad27-1
) for r in results if r.message ] [docs] def get_relevant_documents( self, query: str, metadata: Optional[Dict] = None ) -> List[Document]: from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) results: List[MemorySearchResult] = self.zep_client.search_memory( self.session_id, payload, limit=self.top_k ) return self._search_result_to_doc(results) [docs] async def aget_relevant_documents( self, query: str, metadata: Optional[Dict] = None ) -> List[Document]: from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload( text=query, metadata=metadata ) results: List[MemorySearchResult] = await self.zep_client.asearch_memory( self.session_id, payload, limit=self.top_k ) return self._search_result_to_doc(results) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/zep.html
0d2476affc40-0
Source code for langchain.retrievers.time_weighted_retriever """Retriever that combines embedding similarity with recency in retrieving values.""" import datetime from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain.schema import BaseRetriever, Document from langchain.vectorstores.base import VectorStore def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float: """Get the hours passed between two datetime objects.""" return (time - ref_time).total_seconds() / 3600 [docs]class TimeWeightedVectorStoreRetriever(BaseRetriever, BaseModel): """Retriever combining embedding similarity with recency.""" vectorstore: VectorStore """The vectorstore to store documents and determine salience.""" search_kwargs: dict = Field(default_factory=lambda: dict(k=100)) """Keyword arguments to pass to the vectorstore similarity search.""" # TODO: abstract as a queue memory_stream: List[Document] = Field(default_factory=list) """The memory_stream of documents to search through.""" decay_rate: float = Field(default=0.01) """The exponential decay factor used as (1.0-decay_rate)**(hrs_passed).""" k: int = 4 """The maximum number of documents to retrieve in a given call.""" other_score_keys: List[str] = [] """Other keys in the metadata to factor into the score, e.g. 'importance'.""" default_salience: Optional[float] = None """The salience to assign memories not retrieved from the vector store. None assigns no salience to documents not fetched from the vector store. """ class Config:
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
0d2476affc40-1
""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def _get_combined_score( self, document: Document, vector_relevance: Optional[float], current_time: datetime.datetime, ) -> float: """Return the combined score for a document.""" hours_passed = _get_hours_passed( current_time, document.metadata["last_accessed_at"], ) score = (1.0 - self.decay_rate) ** hours_passed for key in self.other_score_keys: if key in document.metadata: score += document.metadata[key] if vector_relevance is not None: score += vector_relevance return score [docs] def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]: """Return documents that are salient to the query.""" docs_and_scores: List[Tuple[Document, float]] docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores( query, **self.search_kwargs ) results = {} for fetched_doc, relevance in docs_and_scores: if "buffer_idx" in fetched_doc.metadata: buffer_idx = fetched_doc.metadata["buffer_idx"] doc = self.memory_stream[buffer_idx] results[buffer_idx] = (doc, relevance) return results [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = { doc.metadata["buffer_idx"]: (doc, self.default_salience) for doc in self.memory_stream[-self.k :]
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
0d2476affc40-2
for doc in self.memory_stream[-self.k :] } # If a doc is considered salient, update the salience score docs_and_scores.update(self.get_salient_docs(query)) rescored_docs = [ (doc, self._get_combined_score(doc, relevance, current_time)) for doc, relevance in docs_and_scores.values() ] rescored_docs.sort(key=lambda x: x[1], reverse=True) result = [] # Ensure frequently accessed memories aren't forgotten for doc, _ in rescored_docs[: self.k]: # TODO: Update vector store doc once `update` method is exposed. buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]] buffered_doc.metadata["last_accessed_at"] = current_time result.append(buffered_doc) return result [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: """Return documents that are relevant to the query.""" raise NotImplementedError [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
0d2476affc40-3
doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return self.vectorstore.add_documents(dup_docs, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" current_time = kwargs.get("current_time") if current_time is None: current_time = datetime.datetime.now() # Avoid mutating input documents dup_docs = [deepcopy(d) for d in documents] for i, doc in enumerate(dup_docs): if "last_accessed_at" not in doc.metadata: doc.metadata["last_accessed_at"] = current_time if "created_at" not in doc.metadata: doc.metadata["created_at"] = current_time doc.metadata["buffer_idx"] = len(self.memory_stream) + i self.memory_stream.extend(dup_docs) return await self.vectorstore.aadd_documents(dup_docs, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/time_weighted_retriever.html
a9e8edd150b9-0
Source code for langchain.retrievers.elastic_search_bm25 """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from typing import Any, Iterable, List from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class ElasticSearchBM25Retriever(BaseRetriever): """Wrapper around Elasticsearch using BM25 as a retrieval method. To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. """ def __init__(self, client: Any, index_name: str): self.client = client self.index_name = index_name [docs] @classmethod def create(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
a9e8edd150b9-1
self.index_name = index_name [docs] @classmethod def create( cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75 ) -> ElasticSearchBM25Retriever: from elasticsearch import Elasticsearch # Create an Elasticsearch client instance es = Elasticsearch(elasticsearch_url) # Define the index settings and mappings settings = { "analysis": {"analyzer": {"default": {"type": "standard"}}}, "similarity": { "custom_bm25": { "type": "BM25", "k1": k1, "b": b, } }, } mappings = { "properties": { "content": { "type": "text", "similarity": "custom_bm25", # Use the custom BM25 similarity } } } # Create the index with the specified settings and mappings es.indices.create(index=index_name, mappings=mappings, settings=settings) return cls(es, index_name) [docs] def add_texts( self, texts: Iterable[str], refresh_indices: bool = True, ) -> List[str]: """Run more texts through the embeddings and add to the retriver. Args: texts: Iterable of strings to add to the retriever. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the retriever. """ try: from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. "
https://python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
a9e8edd150b9-2
raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = [] for i, text in enumerate(texts): _id = str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, "content": text, "_id": _id, } ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids [docs] def get_relevant_documents(self, query: str) -> List[Document]: query_dict = {"query": {"match": {"content": query}}} res = self.client.search(index=self.index_name, body=query_dict) docs = [] for r in res["hits"]["hits"]: docs.append(Document(page_content=r["_source"]["content"])) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/elastic_search_bm25.html
10c6282fdd51-0
Source code for langchain.retrievers.self_query.base """Retriever that generates and executes structured queries over its own data source.""" from typing import Any, Dict, List, Optional, Type, cast from pydantic import BaseModel, Field, root_validator from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.chains.query_constructor.base import load_query_constructor_chain from langchain.chains.query_constructor.ir import StructuredQuery, Visitor from langchain.chains.query_constructor.schema import AttributeInfo from langchain.retrievers.self_query.chroma import ChromaTranslator from langchain.retrievers.self_query.pinecone import PineconeTranslator from langchain.retrievers.self_query.qdrant import QdrantTranslator from langchain.retrievers.self_query.weaviate import WeaviateTranslator from langchain.schema import BaseRetriever, Document from langchain.vectorstores import Chroma, Pinecone, Qdrant, VectorStore, Weaviate def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: """Get the translator class corresponding to the vector store class.""" vectorstore_cls = vectorstore.__class__ BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = { Pinecone: PineconeTranslator, Chroma: ChromaTranslator, Weaviate: WeaviateTranslator, Qdrant: QdrantTranslator, } if vectorstore_cls not in BUILTIN_TRANSLATORS: raise ValueError( f"Self query retriever with Vector Store type {vectorstore_cls}" f" not supported." ) if isinstance(vectorstore, Qdrant): return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
10c6282fdd51-1
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key) return BUILTIN_TRANSLATORS[vectorstore_cls]() [docs]class SelfQueryRetriever(BaseRetriever, BaseModel): """Retriever that wraps around a vector store and uses an LLM to generate the vector store queries.""" vectorstore: VectorStore """The underlying vector store from which documents will be retrieved.""" llm_chain: LLMChain """The LLMChain for generating the vector store queries.""" search_type: str = "similarity" """The search type to perform on the vector store.""" search_kwargs: dict = Field(default_factory=dict) """Keyword arguments to pass in to the vector store search.""" structured_query_translator: Visitor """Translator for turning internal query language into vectorstore search params.""" verbose: bool = False class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator(pre=True) def validate_translator(cls, values: Dict) -> Dict: """Validate translator.""" if "structured_query_translator" not in values: values["structured_query_translator"] = _get_builtin_translator( values["vectorstore"] ) return values [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ inputs = self.llm_chain.prep_inputs({"query": query}) structured_query = cast( StructuredQuery, self.llm_chain.predict_and_parse(callbacks=None, **inputs) ) if self.verbose:
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
10c6282fdd51-2
) if self.verbose: print(structured_query) new_query, new_kwargs = self.structured_query_translator.visit_structured_query( structured_query ) if structured_query.limit is not None: new_kwargs["k"] = structured_query.limit search_kwargs = {**self.search_kwargs, **new_kwargs} docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, document_contents: str, metadata_field_info: List[AttributeInfo], structured_query_translator: Optional[Visitor] = None, chain_kwargs: Optional[Dict] = None, enable_limit: bool = False, **kwargs: Any, ) -> "SelfQueryRetriever": if structured_query_translator is None: structured_query_translator = _get_builtin_translator(vectorstore) chain_kwargs = chain_kwargs or {} if "allowed_comparators" not in chain_kwargs: chain_kwargs[ "allowed_comparators" ] = structured_query_translator.allowed_comparators if "allowed_operators" not in chain_kwargs: chain_kwargs[ "allowed_operators" ] = structured_query_translator.allowed_operators llm_chain = load_query_constructor_chain( llm, document_contents, metadata_field_info, enable_limit=enable_limit, **chain_kwargs, ) return cls(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
10c6282fdd51-3
**chain_kwargs, ) return cls( llm_chain=llm_chain, vectorstore=vectorstore, structured_query_translator=structured_query_translator, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
696baaf34630-0
Source code for langchain.retrievers.document_compressors.cohere_rerank from __future__ import annotations from typing import TYPE_CHECKING, Dict, Sequence from pydantic import Extra, root_validator from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.schema import Document from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from cohere import Client else: # We do to avoid pydantic annotation issues when actually instantiating # while keeping this import optional try: from cohere import Client except ImportError: pass [docs]class CohereRerank(BaseDocumentCompressor): client: Client top_n: int = 3 model: str = "rerank-english-v2.0" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: if len(documents) == 0: # to avoid empty api call return []
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
696baaf34630-1
return [] doc_list = list(documents) _docs = [d.page_content for d in doc_list] results = self.client.rerank( model=self.model, query=query, documents=_docs, top_n=self.top_n ) final_results = [] for r in results: doc = doc_list[r.index] doc.metadata["relevance_score"] = r.relevance_score final_results.append(doc) return final_results [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
3888adb2c5a6-0
Source code for langchain.retrievers.document_compressors.base """Interface for retrieved document compressors.""" from abc import ABC, abstractmethod from typing import List, Sequence, Union from pydantic import BaseModel from langchain.schema import BaseDocumentTransformer, Document class BaseDocumentCompressor(BaseModel, ABC): """Base abstraction interface for document compression.""" @abstractmethod def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" @abstractmethod async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" [docs]class DocumentCompressorPipeline(BaseDocumentCompressor): """Document compressor that uses a pipeline of transformers.""" transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]] """List of document filters that are chained together and run in sequence.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Transform a list of documents.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): documents = _transformer.compress_documents(documents, query) elif isinstance(_transformer, BaseDocumentTransformer): documents = _transformer.transform_documents(documents) else: raise ValueError(f"Got unexpected transformer type: {_transformer}") return documents [docs] async def acompress_documents( self, documents: Sequence[Document], query: str
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
3888adb2c5a6-1
self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): documents = await _transformer.acompress_documents(documents, query) elif isinstance(_transformer, BaseDocumentTransformer): documents = await _transformer.atransform_documents(documents) else: raise ValueError(f"Got unexpected transformer type: {_transformer}") return documents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
af4bdd4dd0b9-0
Source code for langchain.retrievers.document_compressors.chain_extract """DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" from __future__ import annotations import asyncio from typing import Any, Callable, Dict, Optional, Sequence from langchain import LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_extract_prompt import ( prompt_template, ) from langchain.schema import BaseOutputParser, Document def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} class NoOutputParser(BaseOutputParser[str]): """Parse outputs that could return a null string of some sort.""" no_output_str: str = "NO_OUTPUT" def parse(self, text: str) -> str: cleaned_text = text.strip() if cleaned_text == self.no_output_str: return "" return cleaned_text def _get_default_chain_prompt() -> PromptTemplate: output_parser = NoOutputParser() template = prompt_template.format(no_output_str=output_parser.no_output_str) return PromptTemplate( template=template, input_variables=["question", "context"], output_parser=output_parser, ) [docs]class LLMChainExtractor(BaseDocumentCompressor): llm_chain: LLMChain """LLM wrapper to use for compressing documents.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" [docs] def compress_documents(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
af4bdd4dd0b9-1
[docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress page content of raw documents.""" compressed_docs = [] for doc in documents: _input = self.get_input(query, doc) output = self.llm_chain.predict_and_parse(**_input) if len(output) == 0: continue compressed_docs.append(Document(page_content=output, metadata=doc.metadata)) return compressed_docs [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress page content of raw documents asynchronously.""" outputs = await asyncio.gather( *[ self.llm_chain.apredict_and_parse(**self.get_input(query, doc)) for doc in documents ] ) compressed_docs = [] for i, doc in enumerate(documents): if len(outputs[i]) == 0: continue compressed_docs.append( Document(page_content=outputs[i], metadata=doc.metadata) ) return compressed_docs [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, get_input: Optional[Callable[[str, Document], str]] = None, llm_chain_kwargs: Optional[dict] = None, ) -> LLMChainExtractor: """Initialize from LLM.""" _prompt = prompt if prompt is not None else _get_default_chain_prompt() _get_input = get_input if get_input is not None else default_get_input
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
af4bdd4dd0b9-2
_get_input = get_input if get_input is not None else default_get_input llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {})) return cls(llm_chain=llm_chain, get_input=_get_input) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
01b0ebad7c19-0
Source code for langchain.retrievers.document_compressors.embeddings_filter """Document compressor that uses embeddings to drop documents unrelated to the query.""" from typing import Callable, Dict, Optional, Sequence import numpy as np from pydantic import root_validator from langchain.document_transformers import ( _get_embeddings_from_stateful_docs, get_stateful_documents, ) from langchain.embeddings.base import Embeddings from langchain.math_utils import cosine_similarity from langchain.retrievers.document_compressors.base import ( BaseDocumentCompressor, ) from langchain.schema import Document [docs]class EmbeddingsFilter(BaseDocumentCompressor): embeddings: Embeddings """Embeddings to use for embedding document contents and queries.""" similarity_fn: Callable = cosine_similarity """Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity.""" k: Optional[int] = 20 """The number of relevant documents to return. Can be set to None, in which case `similarity_threshold` must be specified. Defaults to 20.""" similarity_threshold: Optional[float] """Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if `k` is set to None.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate similarity parameters.""" if values["k"] is None and values["similarity_threshold"] is None: raise ValueError("Must specify one of `k` or `similarity_threshold`.") return values
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
01b0ebad7c19-1
return values [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs( self.embeddings, stateful_documents ) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][: self.k] if self.similarity_threshold is not None: similar_enough = np.where( similarity[included_idxs] > self.similarity_threshold ) included_idxs = included_idxs[similar_enough] return [stateful_documents[i] for i in included_idxs] [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents.""" raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
e7ba3c7cb21f-0
Source code for langchain.retrievers.document_compressors.chain_filter """Filter that uses an LLM to drop documents that aren't relevant to the query.""" from typing import Any, Callable, Dict, Optional, Sequence from langchain import BasePromptTemplate, LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel from langchain.output_parsers.boolean import BooleanOutputParser from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_filter_prompt import ( prompt_template, ) from langchain.schema import Document def _get_default_chain_prompt() -> PromptTemplate: return PromptTemplate( template=prompt_template, input_variables=["question", "context"], output_parser=BooleanOutputParser(), ) def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} [docs]class LLMChainFilter(BaseDocumentCompressor): """Filter that drops documents that aren't relevant to the query.""" llm_chain: LLMChain """LLM wrapper to use for filtering documents. The chain prompt is expected to have a BooleanOutputParser.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents based on their relevance to the query.""" filtered_docs = [] for doc in documents: _input = self.get_input(query, doc) include_doc = self.llm_chain.predict_and_parse(**_input)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
e7ba3c7cb21f-1
include_doc = self.llm_chain.predict_and_parse(**_input) if include_doc: filtered_docs.append(doc) return filtered_docs [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents.""" raise NotImplementedError [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any ) -> "LLMChainFilter": _prompt = prompt if prompt is not None else _get_default_chain_prompt() llm_chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=llm_chain, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
e117e2c0e128-0
Source code for langchain.memory.vectorstore """Class for a VectorStore-backed memory object.""" from typing import Any, Dict, List, Optional, Union from pydantic import Field from langchain.memory.chat_memory import BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import Document from langchain.vectorstores.base import VectorStoreRetriever [docs]class VectorStoreRetrieverMemory(BaseMemory): """Class for a VectorStore-backed memory object.""" retriever: VectorStoreRetriever = Field(exclude=True) """VectorStoreRetriever object to connect to.""" memory_key: str = "history" #: :meta private: """Key name to locate the memories in the result of load_memory_variables.""" input_key: Optional[str] = None """Key name to index the inputs to load_memory_variables.""" return_docs: bool = False """Whether or not to return the result of querying the database directly.""" @property def memory_variables(self) -> List[str]: """The list of keys emitted from the load_memory_variables method.""" return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key [docs] def load_memory_variables( self, inputs: Dict[str, Any] ) -> Dict[str, Union[List[Document], str]]: """Return history buffer.""" input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query)
https://python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
e117e2c0e128-1
docs = self.retriever.get_relevant_documents(query) result: Union[List[Document], str] if not self.return_docs: result = "\n".join([doc.page_content for doc in docs]) else: result = docs return {self.memory_key: result} def _form_documents( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> List[Document]: """Format context from this conversation to buffer.""" # Each document should only include the current turn, not the chat history filtered_inputs = {k: v for k, v in inputs.items() if k != self.memory_key} texts = [ f"{k}: {v}" for k, v in list(filtered_inputs.items()) + list(outputs.items()) ] page_content = "\n".join(texts) return [Document(page_content=page_content)] [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" documents = self._form_documents(inputs, outputs) self.retriever.add_documents(documents) [docs] def clear(self) -> None: """Nothing to clear.""" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/vectorstore.html
4570d2508ec6-0
Source code for langchain.memory.token_buffer from typing import Any, Dict, List from langchain.base_language import BaseLanguageModel from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationTokenBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel memory_key: str = "history" max_token_limit: int = 2000 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer: Any = self.buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: final_buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) # Prune buffer if it exceeds max token limit buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit:
https://python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
4570d2508ec6-1
if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/token_buffer.html
0d9192231543-0
Source code for langchain.memory.summary_buffer from typing import Any, Dict, List from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.summary import SummarizerMixin from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin): """Buffer with summarizer for storing conversation memory.""" max_token_limit: int = 2000 moving_summary_buffer: str = "" memory_key: str = "history" @property def buffer(self) -> List[BaseMessage]: return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" buffer = self.buffer if self.moving_summary_buffer != "": first_messages: List[BaseMessage] = [ self.summary_message_cls(content=self.moving_summary_buffer) ] buffer = first_messages + buffer if self.return_messages: final_buffer: Any = buffer else: final_buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix ) return {self.memory_key: final_buffer} @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables): raise ValueError(
https://python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
0d9192231543-1
if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.prune() [docs] def prune(self) -> None: """Prune buffer if it exceeds max token limit""" buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit: pruned_memory.append(buffer.pop(0)) curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) self.moving_summary_buffer = self.predict_new_summary( pruned_memory, self.moving_summary_buffer ) [docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.moving_summary_buffer = "" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/summary_buffer.html
9fa6bf712415-0
Source code for langchain.memory.buffer_window from typing import Any, Dict, List from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMessage, get_buffer_string [docs]class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 @property def buffer(self) -> List[BaseMessage]: """String buffer of memory.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" buffer: Any = self.buffer[-self.k * 2 :] if self.k > 0 else [] if not self.return_messages: buffer = get_buffer_string( buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) return {self.memory_key: buffer} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/buffer_window.html
e732b166b109-0
Source code for langchain.memory.combined import warnings from typing import Any, Dict, List, Set from pydantic import validator from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMemory [docs]class CombinedMemory(BaseMemory): """Class for combining multiple memories' data together.""" memories: List[BaseMemory] """For tracking all the memories that should be accessed.""" @validator("memories") def check_repeated_memory_variable( cls, value: List[BaseMemory] ) -> List[BaseMemory]: all_variables: Set[str] = set() for val in value: overlap = all_variables.intersection(val.memory_variables) if overlap: raise ValueError( f"The same variables {overlap} are found in multiple" "memory object, which is not allowed by CombinedMemory." ) all_variables |= set(val.memory_variables) return value @validator("memories") def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]: """Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: if isinstance(val, BaseChatMemory): if val.input_key is None: warnings.warn( "When using CombinedMemory, " "input keys should be so the input is known. " f" Was not set on {val}" ) return value @property def memory_variables(self) -> List[str]: """All the memory variables that this instance provides.""" """Collected from the all the linked memories.""" memory_variables = [] for memory in self.memories: memory_variables.extend(memory.memory_variables)
https://python.langchain.com/en/latest/_modules/langchain/memory/combined.html
e732b166b109-1
for memory in self.memories: memory_variables.extend(memory.memory_variables) return memory_variables [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load all vars from sub-memories.""" memory_data: Dict[str, Any] = {} # Collect vars from all sub-memories for memory in self.memories: data = memory.load_memory_variables(inputs) memory_data = { **memory_data, **data, } return memory_data [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this session for every memory.""" # Save context for all sub-memories for memory in self.memories: memory.save_context(inputs, outputs) [docs] def clear(self) -> None: """Clear context from this session for every memory.""" for memory in self.memories: memory.clear() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/combined.html
663c07ebd9fa-0
Source code for langchain.memory.simple from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class SimpleMemory(BaseMemory): """Simple memory for storing context or other bits of information that shouldn't ever change between prompts. """ memories: Dict[str, Any] = dict() @property def memory_variables(self) -> List[str]: return list(self.memories.keys()) [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.memories [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed, my memory is set in stone.""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/simple.html
da14d0a0ae48-0
Source code for langchain.memory.summary from __future__ import annotations from typing import Any, Dict, List, Type from pydantic import BaseModel, root_validator from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import SUMMARY_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseChatMessageHistory, BaseMessage, SystemMessage, get_buffer_string, ) class SummarizerMixin(BaseModel): human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel prompt: BasePromptTemplate = SUMMARY_PROMPT summary_message_cls: Type[BaseMessage] = SystemMessage def predict_new_summary( self, messages: List[BaseMessage], existing_summary: str ) -> str: new_lines = get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) chain = LLMChain(llm=self.llm, prompt=self.prompt) return chain.predict(summary=existing_summary, new_lines=new_lines) [docs]class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin): """Conversation summarizer to memory.""" buffer: str = "" memory_key: str = "history" #: :meta private: [docs] @classmethod def from_messages( cls, llm: BaseLanguageModel, chat_memory: BaseChatMessageHistory, *, summarize_step: int = 2, **kwargs: Any, ) -> ConversationSummaryMemory:
https://python.langchain.com/en/latest/_modules/langchain/memory/summary.html
da14d0a0ae48-1
**kwargs: Any, ) -> ConversationSummaryMemory: obj = cls(llm=llm, chat_memory=chat_memory, **kwargs) for i in range(0, len(obj.chat_memory.messages), summarize_step): obj.buffer = obj.predict_new_summary( obj.chat_memory.messages[i : i + summarize_step], obj.buffer ) return obj @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" if self.return_messages: buffer: Any = [self.summary_message_cls(content=self.buffer)] else: buffer = self.buffer return {self.memory_key: buffer} @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables expected_keys = {"summary", "new_lines"} if expected_keys != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but it should have {expected_keys}." ) return values [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self.buffer = self.predict_new_summary( self.chat_memory.messages[-2:], self.buffer ) [docs] def clear(self) -> None: """Clear memory contents."""
https://python.langchain.com/en/latest/_modules/langchain/memory/summary.html
da14d0a0ae48-2
[docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.buffer = "" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/summary.html
304bf7f3d154-0
Source code for langchain.memory.kg from typing import Any, Dict, List, Type, Union from pydantic import Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.graphs import NetworkxEntityGraph from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import ( BaseMessage, SystemMessage, get_buffer_string, ) [docs]class ConversationKGMemory(BaseChatMemory): """Knowledge graph memory for storing conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = []
https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html
304bf7f3d154-1
entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key [docs] def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix,
https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html
304bf7f3d154-2
human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) [docs] def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) [docs] def clear(self) -> None: """Clear memory contents."""
https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html
304bf7f3d154-3
[docs] def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/kg.html
561a2d0f6a56-0
Source code for langchain.memory.buffer from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key from langchain.schema import get_buffer_string [docs]class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" if self.return_messages: return self.chat_memory.messages else: return get_buffer_string( self.chat_memory.messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs]class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict:
https://python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
561a2d0f6a56-1
def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) [docs] def clear(self) -> None: """Clear memory contents.""" self.buffer = "" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/buffer.html
0a4756c71bef-0
Source code for langchain.memory.readonly from typing import Any, Dict, List from langchain.schema import BaseMemory [docs]class ReadOnlySharedMemory(BaseMemory): """A memory wrapper that is read-only and cannot be changed.""" memory: BaseMemory @property def memory_variables(self) -> List[str]: """Return memory variables.""" return self.memory.memory_variables [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Load memory variables from memory.""" return self.memory.load_memory_variables(inputs) [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Nothing should be saved or changed""" pass [docs] def clear(self) -> None: """Nothing to clear, got a memory like a vault.""" pass By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/readonly.html
cedff6f725f5-0
Source code for langchain.memory.entity import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel, Field from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMessage, get_buffer_string logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass [docs]class InMemoryEntityStore(BaseEntityStore): """Basic in-memory entity store.""" store: Dict[str, Optional[str]] = {} [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default)
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cedff6f725f5-1
return self.store.get(key, default) [docs] def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value [docs] def delete(self, key: str) -> None: del self.store[key] [docs] def exists(self, key: str) -> bool: return key in self.store [docs] def clear(self) -> None: return self.store.clear() [docs]class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = redis.Redis.from_url(url=url, decode_responses=True)
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cedff6f725f5-2
self.redis_client = redis.Redis.from_url(url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) [docs] def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") [docs] def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 [docs] def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable)
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cedff6f725f5-3
iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) [docs]class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) [docs] def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cedff6f725f5-4
query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default [docs] def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) [docs] def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) [docs] def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None [docs] def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) [docs]class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer to memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cedff6f725f5-5
ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT entity_cache: List[str] = [] k: int = 3 chat_history_key: str = "history" entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] [docs] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) if output.strip() == "NONE": entities = [] else: entities = [w.strip() for w in output.split(",")] entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") self.entity_cache = entities if self.return_messages:
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
cedff6f725f5-6
self.entity_cache = entities if self.return_messages: buffer: Any = self.buffer[-self.k * 2 :] else: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } [docs] def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) for entity in self.entity_cache: existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) self.entity_store.set(entity, output.strip()) [docs] def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/entity.html
790cda312eb1-0
Source code for langchain.memory.chat_message_histories.cassandra import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) DEFAULT_KEYSPACE_NAME = "chat_history" DEFAULT_TABLE_NAME = "message_store" DEFAULT_USERNAME = "cassandra" DEFAULT_PASSWORD = "cassandra" DEFAULT_PORT = 9042 [docs]class CassandraChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in Cassandra. Args: contact_points: list of ips to connect to Cassandra cluster session_id: arbitrary key that is used to store the messages of a single chat session. port: port to connect to Cassandra cluster username: username to connect to Cassandra cluster password: password to connect to Cassandra cluster keyspace_name: name of the keyspace to use table_name: name of the table to use """ def __init__( self, contact_points: List[str], session_id: str, port: int = DEFAULT_PORT, username: str = DEFAULT_USERNAME, password: str = DEFAULT_PASSWORD, keyspace_name: str = DEFAULT_KEYSPACE_NAME, table_name: str = DEFAULT_TABLE_NAME, ): self.contact_points = contact_points self.session_id = session_id self.port = port self.username = username self.password = password self.keyspace_name = keyspace_name self.table_name = table_name try: from cassandra import ( AuthenticationFailed, OperationTimedOut, UnresolvableContactPoints, )
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
790cda312eb1-1
OperationTimedOut, UnresolvableContactPoints, ) from cassandra.cluster import Cluster, PlainTextAuthProvider except ImportError: raise ValueError( "Could not import cassandra-driver python package. " "Please install it with `pip install cassandra-driver`." ) self.cluster: Cluster = Cluster( contact_points, port=port, auth_provider=PlainTextAuthProvider( username=self.username, password=self.password ), ) try: self.session = self.cluster.connect() except ( AuthenticationFailed, UnresolvableContactPoints, OperationTimedOut, ) as error: logger.error( "Unable to establish connection with \ cassandra chat message history database" ) raise error self._prepare_cassandra() def _prepare_cassandra(self) -> None: """Create the keyspace and table if they don't exist yet""" from cassandra import OperationTimedOut, Unavailable try: self.session.execute( f"""CREATE KEYSPACE IF NOT EXISTS {self.keyspace_name} WITH REPLICATION = {{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};""" ) except (OperationTimedOut, Unavailable) as error: logger.error( f"Unable to create cassandra \ chat message history keyspace: {self.keyspace_name}." ) raise error self.session.set_keyspace(self.keyspace_name) try: self.session.execute( f"""CREATE TABLE IF NOT EXISTS {self.table_name} (id UUID, session_id varchar,
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
790cda312eb1-2
{self.table_name} (id UUID, session_id varchar, history text, PRIMARY KEY ((session_id), id) );""" ) except (OperationTimedOut, Unavailable) as error: logger.error( f"Unable to create cassandra \ chat message history table: {self.table_name}" ) raise error @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from Cassandra""" from cassandra import ReadFailure, ReadTimeout, Unavailable try: rows = self.session.execute( f"""SELECT * FROM {self.table_name} WHERE session_id = '{self.session_id}' ;""" ) except (Unavailable, ReadTimeout, ReadFailure) as error: logger.error("Unable to Retreive chat history messages from cassadra") raise error if rows: items = [json.loads(row.history) for row in rows] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in Cassandra""" import uuid from cassandra import Unavailable, WriteFailure, WriteTimeout try: self.session.execute( """INSERT INTO message_store (id, session_id, history) VALUES (%s, %s, %s);""", (uuid.uuid4(), self.session_id, json.dumps(_message_to_dict(message))), ) except (Unavailable, WriteTimeout, WriteFailure) as error: logger.error("Unable to write chat history messages to cassandra") raise error
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
790cda312eb1-3
logger.error("Unable to write chat history messages to cassandra") raise error [docs] def clear(self) -> None: """Clear session memory from Cassandra""" from cassandra import OperationTimedOut, Unavailable try: self.session.execute( f"DELETE FROM {self.table_name} WHERE session_id = '{self.session_id}';" ) except (Unavailable, OperationTimedOut) as error: logger.error("Unable to clear chat history messages from cassandra") raise error def __del__(self) -> None: if self.session: self.session.shutdown() if self.cluster: self.cluster.shutdown() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/cassandra.html
ec5e1d80ddba-0
Source code for langchain.memory.chat_message_histories.mongodb import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) DEFAULT_DBNAME = "chat_history" DEFAULT_COLLECTION_NAME = "message_store" [docs]class MongoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in MongoDB. Args: connection_string: connection string to connect to MongoDB session_id: arbitrary key that is used to store the messages of a single chat session. database_name: name of the database to use collection_name: name of the collection to use """ def __init__( self, connection_string: str, session_id: str, database_name: str = DEFAULT_DBNAME, collection_name: str = DEFAULT_COLLECTION_NAME, ): from pymongo import MongoClient, errors self.connection_string = connection_string self.session_id = session_id self.database_name = database_name self.collection_name = collection_name try: self.client: MongoClient = MongoClient(connection_string) except errors.ConnectionFailure as error: logger.error(error) self.db = self.client[database_name] self.collection = self.db[collection_name] self.collection.create_index("SessionId") @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({"SessionId": self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor:
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html
ec5e1d80ddba-1
except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document["History"]) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in MongoDB""" from pymongo import errors try: self.collection.insert_one( { "SessionId": self.session_id, "History": json.dumps(_message_to_dict(message)), } ) except errors.WriteError as err: logger.error(err) [docs] def clear(self) -> None: """Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({"SessionId": self.session_id}) except errors.WriteError as err: logger.error(err) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html
571095ab28a9-0
Source code for langchain.memory.chat_message_histories.file import json import logging from pathlib import Path from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) [docs]class FileChatMessageHistory(BaseChatMessageHistory): """ Chat message history that stores history in a local file. Args: file_path: path of the local file to store the messages. """ def __init__(self, file_path: str): self.file_path = Path(file_path) if not self.file_path.exists(): self.file_path.touch() self.file_path.write_text(json.dumps([])) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from the local file""" items = json.loads(self.file_path.read_text()) messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in the local file""" messages = messages_to_dict(self.messages) messages.append(messages_to_dict([message])[0]) self.file_path.write_text(json.dumps(messages)) [docs] def clear(self) -> None: """Clear session memory from the local file""" self.file_path.write_text(json.dumps([])) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/file.html
bf5d84ee17a4-0
Source code for langchain.memory.chat_message_histories.redis import json import logging from typing import List, Optional from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, ) logger = logging.getLogger(__name__) [docs]class RedisChatMessageHistory(BaseChatMessageHistory): def __init__( self, session_id: str, url: str = "redis://localhost:6379/0", key_prefix: str = "message_store:", ttl: Optional[int] = None, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) try: self.redis_client = redis.Redis.from_url(url=url) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl @property def key(self) -> str: """Construct the record key to use""" return self.key_prefix + self.session_id @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from Redis""" _items = self.redis_client.lrange(self.key, 0, -1) items = [json.loads(m.decode("utf-8")) for m in _items[::-1]] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in Redis"""
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html
bf5d84ee17a4-1
"""Append the message to the record in Redis""" self.redis_client.lpush(self.key, json.dumps(_message_to_dict(message))) if self.ttl: self.redis_client.expire(self.key, self.ttl) [docs] def clear(self) -> None: """Clear session memory from Redis""" self.redis_client.delete(self.key) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Jun 07, 2023.
https://python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/redis.html